code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =[1]
for i in range(2 , lowercase ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
SCREAMING_SNAKE_CASE_: Any =[]
SCREAMING_SNAKE_CASE_: List[Any] =list(range(lowercase ) )
# Find permutation
while factorials:
SCREAMING_SNAKE_CASE_: List[str] =factorials.pop()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any =divmod(lowercase , lowercase )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : Dict ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict =FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE_: Any =jax.device_count()
SCREAMING_SNAKE_CASE_: Dict =num_samples * [prompt]
SCREAMING_SNAKE_CASE_: List[Any] =sd_pipe.prepare_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =replicate(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =shard(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =jax.random.split(lowerCAmelCase , jax.device_count() )
SCREAMING_SNAKE_CASE_: Tuple =sd_pipe(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , num_inference_steps=25 , jit=lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
SCREAMING_SNAKE_CASE_: Dict =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE_: Dict =images[0, 253:256, 253:256, -1]
SCREAMING_SNAKE_CASE_: Optional[int] =jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE_: Optional[int] =jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int ="""stabilityai/stable-diffusion-2"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =FlaxDPMSolverMultistepScheduler.from_pretrained(lowerCAmelCase , subfolder="""scheduler""" )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =FlaxStableDiffusionPipeline.from_pretrained(
lowerCAmelCase , scheduler=lowerCAmelCase , revision="""bf16""" , dtype=jnp.bfloataa , )
SCREAMING_SNAKE_CASE_: Optional[int] =scheduler_params
SCREAMING_SNAKE_CASE_: Tuple ="""A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE_: Union[str, Any] =jax.device_count()
SCREAMING_SNAKE_CASE_: Optional[Any] =num_samples * [prompt]
SCREAMING_SNAKE_CASE_: List[Any] =sd_pipe.prepare_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =replicate(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =shard(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_: Any =jax.random.split(lowerCAmelCase , jax.device_count() )
SCREAMING_SNAKE_CASE_: Tuple =sd_pipe(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , num_inference_steps=25 , jit=lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
SCREAMING_SNAKE_CASE_: str =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE_: Any =images[0, 253:256, 253:256, -1]
SCREAMING_SNAKE_CASE_: Optional[Any] =jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE_: Optional[int] =jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 36
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_UpperCAmelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class a ( UpperCAmelCase__ ):
UpperCamelCase : int = ['pixel_values']
def __init__( self : Optional[Any] , lowerCAmelCase : bool = True , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase : bool = True , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : bool = True , lowerCAmelCase : Union[int, float] = 1 / 255 , lowerCAmelCase : bool = True , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : bool = True , **lowerCAmelCase : Dict , ) -> None:
'''simple docstring'''
super().__init__(**lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =size if size is not None else {"""shortest_edge""": 224}
SCREAMING_SNAKE_CASE_: Union[str, Any] =get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
SCREAMING_SNAKE_CASE_: str =get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase , param_name="""crop_size""" )
SCREAMING_SNAKE_CASE_: Dict =do_resize
SCREAMING_SNAKE_CASE_: List[Any] =size
SCREAMING_SNAKE_CASE_: Dict =resample
SCREAMING_SNAKE_CASE_: str =do_center_crop
SCREAMING_SNAKE_CASE_: List[str] =crop_size
SCREAMING_SNAKE_CASE_: Any =do_rescale
SCREAMING_SNAKE_CASE_: Union[str, Any] =rescale_factor
SCREAMING_SNAKE_CASE_: Union[str, Any] =do_normalize
SCREAMING_SNAKE_CASE_: Tuple =image_mean if image_mean is not None else OPENAI_CLIP_MEAN
SCREAMING_SNAKE_CASE_: Optional[int] =image_std if image_std is not None else OPENAI_CLIP_STD
SCREAMING_SNAKE_CASE_: List[str] =do_convert_rgb
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : np.ndarray , lowerCAmelCase : Dict[str, int] , lowerCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : str , ) -> np.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
SCREAMING_SNAKE_CASE_: Any =get_resize_output_image_size(lowerCAmelCase , size=size["""shortest_edge"""] , default_to_square=lowerCAmelCase )
return resize(lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def lowerCamelCase__ ( self : int , lowerCAmelCase : np.ndarray , lowerCAmelCase : Dict[str, int] , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : Tuple , ) -> np.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =get_size_dict(lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(lowerCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=lowerCAmelCase , **lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : np.ndarray , lowerCAmelCase : Union[int, float] , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : Optional[Any] , ) -> Any:
'''simple docstring'''
return rescale(lowerCAmelCase , scale=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def lowerCamelCase__ ( self : int , lowerCAmelCase : np.ndarray , lowerCAmelCase : Union[float, List[float]] , lowerCAmelCase : Union[float, List[float]] , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : str , ) -> np.ndarray:
'''simple docstring'''
return normalize(lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : ImageInput , lowerCAmelCase : bool = None , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : PILImageResampling = None , lowerCAmelCase : bool = None , lowerCAmelCase : int = None , lowerCAmelCase : bool = None , lowerCAmelCase : float = None , lowerCAmelCase : bool = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : bool = None , lowerCAmelCase : Optional[Union[str, TensorType]] = None , lowerCAmelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase : List[str] , ) -> PIL.Image.Image:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_: Tuple =size if size is not None else self.size
SCREAMING_SNAKE_CASE_: List[str] =get_size_dict(lowerCAmelCase , param_name="""size""" , default_to_square=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_: str =do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE_: Union[str, Any] =crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE_: Union[str, Any] =get_size_dict(lowerCAmelCase , param_name="""crop_size""" , default_to_square=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_: Dict =rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_: int =do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_: Optional[Any] =image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_: str =image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_: Union[str, Any] =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
SCREAMING_SNAKE_CASE_: Optional[Any] =make_list_of_images(lowerCAmelCase )
if not valid_images(lowerCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
SCREAMING_SNAKE_CASE_: int =[convert_to_rgb(lowerCAmelCase ) for image in images]
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_: List[Any] =[to_numpy_array(lowerCAmelCase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_: Any =[self.resize(image=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE_: List[str] =[self.center_crop(image=lowerCAmelCase , size=lowerCAmelCase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE_: Optional[int] =[self.rescale(image=lowerCAmelCase , scale=lowerCAmelCase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_: str =[self.normalize(image=lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase ) for image in images]
SCREAMING_SNAKE_CASE_: Optional[int] =[to_channel_dimension_format(lowerCAmelCase , lowerCAmelCase ) for image in images]
SCREAMING_SNAKE_CASE_: str ={"""pixel_values""": images}
return BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
| 36
|
"""simple docstring"""
def __magic_name__ ( lowercase = 200_0000 ):
SCREAMING_SNAKE_CASE_: List[Any] =[0 for i in range(n + 1 )]
SCREAMING_SNAKE_CASE_: Union[str, Any] =1
SCREAMING_SNAKE_CASE_: Optional[Any] =1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =1
SCREAMING_SNAKE_CASE_: Dict =0
for i in range(lowercase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f"""{solution() = }""")
| 36
| 1
|
"""simple docstring"""
def __magic_name__ ( lowercase ):
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
SCREAMING_SNAKE_CASE_: str =sum(lowercase ) / len(lowercase ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36
|
"""simple docstring"""
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""bert""", choices=["""bert"""])
parser.add_argument("""--model_name""", default="""bert-base-uncased""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_bert-base-uncased_0247911.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
_UpperCAmelCase = parser.parse_args()
if args.model_type == "bert":
_UpperCAmelCase = BertForMaskedLM.from_pretrained(args.model_name)
_UpperCAmelCase = """bert"""
else:
raise ValueError("""args.model_type should be \"bert\".""")
_UpperCAmelCase = model.state_dict()
_UpperCAmelCase = {}
for w in ["word_embeddings", "position_embeddings"]:
_UpperCAmelCase = state_dict[f"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""]
_UpperCAmelCase = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
_UpperCAmelCase = state_dict["""cls.predictions.decoder.weight"""]
_UpperCAmelCase = state_dict["""cls.predictions.bias"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[f"""cls.predictions.transform.dense.{w}"""]
_UpperCAmelCase = state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 36
| 1
|
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
SCREAMING_SNAKE_CASE_: List[str] =AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =-1
SCREAMING_SNAKE_CASE_: List[str] =ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =model.generate(lowerCAmelCase , max_new_tokens=10 , do_sample=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
SCREAMING_SNAKE_CASE_: Optional[Any] =TextStreamer(lowerCAmelCase )
model.generate(lowerCAmelCase , max_new_tokens=10 , do_sample=lowerCAmelCase , streamer=lowerCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
SCREAMING_SNAKE_CASE_: Optional[int] =cs.out[:-1]
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : Tuple ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
SCREAMING_SNAKE_CASE_: Dict =AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =-1
SCREAMING_SNAKE_CASE_: str =ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =model.generate(lowerCAmelCase , max_new_tokens=10 , do_sample=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =tokenizer.decode(greedy_ids[0] )
SCREAMING_SNAKE_CASE_: Optional[Any] =TextIteratorStreamer(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] ={"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
SCREAMING_SNAKE_CASE_: Dict =Thread(target=model.generate , kwargs=lowerCAmelCase )
thread.start()
SCREAMING_SNAKE_CASE_: Dict =""""""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
SCREAMING_SNAKE_CASE_: List[Any] =AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =-1
SCREAMING_SNAKE_CASE_: str =ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =model.generate(lowerCAmelCase , max_new_tokens=10 , do_sample=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =greedy_ids[:, input_ids.shape[1] :]
SCREAMING_SNAKE_CASE_: Any =tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
SCREAMING_SNAKE_CASE_: Tuple =TextStreamer(lowerCAmelCase , skip_prompt=lowerCAmelCase )
model.generate(lowerCAmelCase , max_new_tokens=10 , do_sample=lowerCAmelCase , streamer=lowerCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
SCREAMING_SNAKE_CASE_: Any =cs.out[:-1]
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : Any ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =AutoTokenizer.from_pretrained("""distilgpt2""" )
SCREAMING_SNAKE_CASE_: int =AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =-1
SCREAMING_SNAKE_CASE_: Optional[int] =torch.ones((1, 5) , device=lowerCAmelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
SCREAMING_SNAKE_CASE_: Optional[int] =TextStreamer(lowerCAmelCase , skip_special_tokens=lowerCAmelCase )
model.generate(lowerCAmelCase , max_new_tokens=1 , do_sample=lowerCAmelCase , streamer=lowerCAmelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
SCREAMING_SNAKE_CASE_: int =cs.out[:-1] # Remove the final "\n"
SCREAMING_SNAKE_CASE_: List[str] =tokenizer(lowerCAmelCase , return_tensors="""pt""" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def lowerCamelCase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
SCREAMING_SNAKE_CASE_: str =AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =-1
SCREAMING_SNAKE_CASE_: List[Any] =ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =TextIteratorStreamer(lowerCAmelCase , timeout=0.0_0_1 )
SCREAMING_SNAKE_CASE_: Optional[Any] ={"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
SCREAMING_SNAKE_CASE_: List[str] =Thread(target=model.generate , kwargs=lowerCAmelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: Tuple =""""""
for new_text in streamer:
streamer_text += new_text
| 36
|
"""simple docstring"""
def __magic_name__ ( lowercase , lowercase ):
return int((input_a, input_a).count(0 ) == 0 )
def __magic_name__ ( ):
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 36
| 1
|
"""simple docstring"""
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_UpperCAmelCase = ["""text""", """image""", """audio"""]
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: str =[]
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(lowercase , lowercase ):
inputs.append(create_inputs(lowercase ) )
else:
raise ValueError(f'''Invalid type requested: {input_type}''' )
return inputs
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: int =[]
for output in outputs:
if isinstance(lowercase , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(lowercase , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(lowercase , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(f'''Invalid output: {output}''' )
return output_types
@is_tool_test
class a :
def lowerCamelCase__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , """inputs""" ) )
self.assertTrue(hasattr(self.tool , """outputs""" ) )
SCREAMING_SNAKE_CASE_: Optional[int] =self.tool.inputs
for _input in inputs:
if isinstance(_input , lowerCAmelCase ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
SCREAMING_SNAKE_CASE_: Any =self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowerCamelCase__ ( self : str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_: List[Any] =self.tool(*lowerCAmelCase )
# There is a single output
if len(self.tool.outputs ) == 1:
SCREAMING_SNAKE_CASE_: str =[outputs]
self.assertListEqual(output_types(lowerCAmelCase ) , self.tool.outputs )
def lowerCamelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , """description""" ) )
self.assertTrue(hasattr(self.tool , """default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def lowerCamelCase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_: Tuple =self.tool(*lowerCAmelCase )
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: int =[outputs]
self.assertEqual(len(lowerCAmelCase ) , len(self.tool.outputs ) )
for output, output_type in zip(lowerCAmelCase , self.tool.outputs ):
SCREAMING_SNAKE_CASE_: int =AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(lowerCAmelCase , lowerCAmelCase ) )
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_: Union[str, Any] =[]
for _input, input_type in zip(lowerCAmelCase , self.tool.inputs ):
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
SCREAMING_SNAKE_CASE_: Dict =self.tool(*lowerCAmelCase )
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] =[outputs]
self.assertEqual(len(lowerCAmelCase ) , len(self.tool.outputs ) )
| 36
|
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger("""transformers.models.speecht5""")
def __magic_name__ ( lowercase , lowercase , lowercase ):
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE_: Tuple =checkpoint["""input_conv.weight_g"""]
SCREAMING_SNAKE_CASE_: Optional[int] =checkpoint["""input_conv.weight_v"""]
SCREAMING_SNAKE_CASE_: Optional[int] =checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''upsamples.{i}.1.weight_g''']
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''upsamples.{i}.1.weight_v''']
SCREAMING_SNAKE_CASE_: List[str] =checkpoint[f'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE_: Dict =checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_g''']
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_v''']
SCREAMING_SNAKE_CASE_: Union[str, Any] =checkpoint[f'''blocks.{i}.convs1.{j}.1.bias''']
SCREAMING_SNAKE_CASE_: Dict =checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_g''']
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_v''']
SCREAMING_SNAKE_CASE_: List[Any] =checkpoint[f'''blocks.{i}.convs2.{j}.1.bias''']
SCREAMING_SNAKE_CASE_: Tuple =checkpoint["""output_conv.1.weight_g"""]
SCREAMING_SNAKE_CASE_: List[str] =checkpoint["""output_conv.1.weight_v"""]
SCREAMING_SNAKE_CASE_: Optional[int] =checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase=None , lowercase=None , ):
if config_path is not None:
SCREAMING_SNAKE_CASE_: List[Any] =SpeechTaHifiGanConfig.from_pretrained(lowercase )
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE_: Union[str, Any] =SpeechTaHifiGan(lowercase )
SCREAMING_SNAKE_CASE_: Any =torch.load(lowercase )
load_weights(orig_checkpoint["""model"""]["""generator"""] , lowercase , lowercase )
SCREAMING_SNAKE_CASE_: List[Any] =np.load(lowercase )
SCREAMING_SNAKE_CASE_: Any =stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE_: str =stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE_: Dict =torch.from_numpy(lowercase ).float()
SCREAMING_SNAKE_CASE_: Dict =torch.from_numpy(lowercase ).float()
model.save_pretrained(lowercase )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
_UpperCAmelCase = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 36
| 1
|
"""simple docstring"""
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
_UpperCAmelCase = logging.get_logger(__name__)
class a :
def __init__( self : Any , lowerCAmelCase : str = None , lowerCAmelCase : uuid.UUID = None , lowerCAmelCase : List[str]=None , lowerCAmelCase : Any=None ) -> Optional[Any]:
'''simple docstring'''
if not conversation_id:
SCREAMING_SNAKE_CASE_: Optional[Any] =uuid.uuida()
if past_user_inputs is None:
SCREAMING_SNAKE_CASE_: Tuple =[]
if generated_responses is None:
SCREAMING_SNAKE_CASE_: Any =[]
SCREAMING_SNAKE_CASE_: uuid.UUID =conversation_id
SCREAMING_SNAKE_CASE_: List[str] =past_user_inputs
SCREAMING_SNAKE_CASE_: List[str] =generated_responses
SCREAMING_SNAKE_CASE_: Optional[str] =text
def __eq__( self : Optional[int] , lowerCAmelCase : List[str] ) -> Union[str, Any]:
'''simple docstring'''
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : bool = False ) -> Union[str, Any]:
'''simple docstring'''
if self.new_user_input:
if overwrite:
logger.warning(
f'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '''
f'''with: "{text}".''' )
SCREAMING_SNAKE_CASE_: Optional[int] =text
else:
logger.warning(
f'''User input added while unprocessed input was existing: "{self.new_user_input}" new input '''
f'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' )
else:
SCREAMING_SNAKE_CASE_: int =text
def lowerCamelCase__ ( self : Dict ) -> Any:
'''simple docstring'''
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
SCREAMING_SNAKE_CASE_: Optional[int] =None
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : str ) -> Any:
'''simple docstring'''
self.generated_responses.append(lowerCAmelCase )
def lowerCamelCase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =f'''Conversation id: {self.uuid} \n'''
for is_user, text in self.iter_texts():
SCREAMING_SNAKE_CASE_: List[Any] ="""user""" if is_user else """bot"""
output += f'''{name} >> {text} \n'''
return output
@add_end_docstrings(
UpperCAmelCase__ , R'\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ' , )
class a ( UpperCAmelCase__ ):
def __init__( self : Tuple , *lowerCAmelCase : str , **lowerCAmelCase : str ) -> Any:
'''simple docstring'''
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
if self.tokenizer.pad_token_id is None:
SCREAMING_SNAKE_CASE_: Tuple =self.tokenizer.eos_token
def lowerCamelCase__ ( self : int , lowerCAmelCase : List[str]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Any=None , **lowerCAmelCase : Dict ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple ={}
SCREAMING_SNAKE_CASE_: str ={}
SCREAMING_SNAKE_CASE_: str ={}
if min_length_for_response is not None:
SCREAMING_SNAKE_CASE_: List[Any] =min_length_for_response
if minimum_tokens is not None:
SCREAMING_SNAKE_CASE_: str =minimum_tokens
if "max_length" in generate_kwargs:
SCREAMING_SNAKE_CASE_: Optional[Any] =generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
SCREAMING_SNAKE_CASE_: Dict =clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(lowerCAmelCase )
return preprocess_params, forward_params, postprocess_params
def __call__( self : Optional[int] , lowerCAmelCase : Union[Conversation, List[Conversation]] , lowerCAmelCase : Any=0 , **lowerCAmelCase : Optional[int] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =super().__call__(lowerCAmelCase , num_workers=lowerCAmelCase , **lowerCAmelCase )
if isinstance(lowerCAmelCase , lowerCAmelCase ) and len(lowerCAmelCase ) == 1:
return outputs[0]
return outputs
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : Conversation , lowerCAmelCase : List[str]=32 ) -> Dict[str, Any]:
'''simple docstring'''
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
f'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '''
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer , """_build_conversation_input_ids""" ):
SCREAMING_SNAKE_CASE_: List[Any] =self.tokenizer._build_conversation_input_ids(lowerCAmelCase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
SCREAMING_SNAKE_CASE_: List[Any] =self._legacy_parse_and_tokenize(lowerCAmelCase )
if self.framework == "pt":
SCREAMING_SNAKE_CASE_: Optional[Any] =torch.LongTensor([input_ids] )
elif self.framework == "tf":
SCREAMING_SNAKE_CASE_: str =tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : str=10 , **lowerCAmelCase : int ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =generate_kwargs.get("""max_length""" , self.model.config.max_length )
SCREAMING_SNAKE_CASE_: str =model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' )
SCREAMING_SNAKE_CASE_: Tuple =max_length - minimum_tokens
SCREAMING_SNAKE_CASE_: int =model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
SCREAMING_SNAKE_CASE_: Optional[Any] =model_inputs["""attention_mask"""][:, -trim:]
SCREAMING_SNAKE_CASE_: Union[str, Any] =model_inputs.pop("""conversation""" )
SCREAMING_SNAKE_CASE_: List[Any] =max_length
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.model.generate(**lowerCAmelCase , **lowerCAmelCase )
if self.model.config.is_encoder_decoder:
SCREAMING_SNAKE_CASE_: List[str] =1
else:
SCREAMING_SNAKE_CASE_: Tuple =n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any]=True ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =model_outputs["""output_ids"""]
SCREAMING_SNAKE_CASE_: List[str] =self.tokenizer.decode(
output_ids[0] , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase , )
SCREAMING_SNAKE_CASE_: Optional[Any] =model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(lowerCAmelCase )
return conversation
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : Conversation ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =self.tokenizer.eos_token_id
SCREAMING_SNAKE_CASE_: List[Any] =[]
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) )
if len(lowerCAmelCase ) > self.tokenizer.model_max_length:
SCREAMING_SNAKE_CASE_: List[Any] =input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 36
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def __magic_name__ ( lowercase ):
if "cls_token" in name:
SCREAMING_SNAKE_CASE_: Optional[int] =name.replace("""cls_token""" , """vit.embeddings.cls_token""" )
if "mask_token" in name:
SCREAMING_SNAKE_CASE_: Optional[int] =name.replace("""mask_token""" , """decoder.mask_token""" )
if "decoder_pos_embed" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""pos_embed""" , """vit.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""patch_embed.proj""" , """vit.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""patch_embed.norm""" , """vit.embeddings.norm""" )
if "decoder_blocks" in name:
SCREAMING_SNAKE_CASE_: List[Any] =name.replace("""decoder_blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""blocks""" , """vit.encoder.layer""" )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE_: Optional[Any] =name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
SCREAMING_SNAKE_CASE_: Union[str, Any] =name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
SCREAMING_SNAKE_CASE_: Optional[Any] =name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE_: Dict =name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
SCREAMING_SNAKE_CASE_: Tuple =name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
SCREAMING_SNAKE_CASE_: Any =name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""norm.weight""" , """vit.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""norm.bias""" , """vit.layernorm.bias""" )
return name
def __magic_name__ ( lowercase , lowercase ):
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE_: Optional[int] =orig_state_dict.pop(lowercase )
if "qkv" in key:
SCREAMING_SNAKE_CASE_: Dict =key.split(""".""" )
SCREAMING_SNAKE_CASE_: Optional[Any] =int(key_split[1] )
if "decoder_blocks" in key:
SCREAMING_SNAKE_CASE_: int =config.decoder_hidden_size
SCREAMING_SNAKE_CASE_: Optional[int] ="""decoder.decoder_layers."""
if "weight" in key:
SCREAMING_SNAKE_CASE_: Dict =val[:dim, :]
SCREAMING_SNAKE_CASE_: Tuple =val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_: str =val[-dim:, :]
elif "bias" in key:
SCREAMING_SNAKE_CASE_: List[Any] =val[:dim]
SCREAMING_SNAKE_CASE_: Tuple =val[dim : dim * 2]
SCREAMING_SNAKE_CASE_: List[Any] =val[-dim:]
else:
SCREAMING_SNAKE_CASE_: Any =config.hidden_size
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""vit.encoder.layer."""
if "weight" in key:
SCREAMING_SNAKE_CASE_: Optional[Any] =val[:dim, :]
SCREAMING_SNAKE_CASE_: Optional[Any] =val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_: Dict =val[-dim:, :]
elif "bias" in key:
SCREAMING_SNAKE_CASE_: Optional[Any] =val[:dim]
SCREAMING_SNAKE_CASE_: Any =val[dim : dim * 2]
SCREAMING_SNAKE_CASE_: Optional[Any] =val[-dim:]
else:
SCREAMING_SNAKE_CASE_: Tuple =val
return orig_state_dict
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Dict =ViTMAEConfig()
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE_: List[Any] =1024
SCREAMING_SNAKE_CASE_: Dict =4096
SCREAMING_SNAKE_CASE_: Tuple =24
SCREAMING_SNAKE_CASE_: int =16
elif "huge" in checkpoint_url:
SCREAMING_SNAKE_CASE_: Union[str, Any] =14
SCREAMING_SNAKE_CASE_: Any =1280
SCREAMING_SNAKE_CASE_: Dict =5120
SCREAMING_SNAKE_CASE_: Optional[int] =32
SCREAMING_SNAKE_CASE_: Optional[Any] =16
SCREAMING_SNAKE_CASE_: Tuple =ViTMAEForPreTraining(lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =torch.hub.load_state_dict_from_url(lowercase , map_location="""cpu""" )["""model"""]
SCREAMING_SNAKE_CASE_: Optional[Any] =ViTMAEImageProcessor(size=config.image_size )
SCREAMING_SNAKE_CASE_: str =convert_state_dict(lowercase , lowercase )
model.load_state_dict(lowercase )
model.eval()
SCREAMING_SNAKE_CASE_: Tuple ="""https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"""
SCREAMING_SNAKE_CASE_: List[Any] =Image.open(requests.get(lowercase , stream=lowercase ).raw )
SCREAMING_SNAKE_CASE_: int =ViTMAEImageProcessor(size=config.image_size )
SCREAMING_SNAKE_CASE_: int =image_processor(images=lowercase , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE_: Optional[Any] =model(**lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =outputs.logits
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE_: Dict =torch.tensor(
[[-0.7_309, -0.7_128, -1.0_169], [-1.0_161, -0.9_058, -1.1_878], [-1.0_478, -0.9_411, -1.1_911]] )
elif "huge" in checkpoint_url:
SCREAMING_SNAKE_CASE_: Tuple =torch.tensor(
[[-1.1_599, -0.9_199, -1.2_221], [-1.1_952, -0.9_269, -1.2_307], [-1.2_143, -0.9_337, -1.2_262]] )
else:
SCREAMING_SNAKE_CASE_: Any =torch.tensor(
[[-0.9_192, -0.8_481, -1.1_259], [-1.1_349, -1.0_034, -1.2_599], [-1.1_757, -1.0_429, -1.2_726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , lowercase , atol=1e-4 )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_UpperCAmelCase = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 36
| 1
|
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def __magic_name__ ( lowercase = "AAPL" ):
SCREAMING_SNAKE_CASE_: List[str] =f'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =BeautifulSoup(requests.get(lowercase ).text , """html.parser""" )
SCREAMING_SNAKE_CASE_: str ="""My(6px) Pos(r) smartphone_Mt(6px)"""
return soup.find("""div""" , class_=class_ ).find("""span""" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 36
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_UpperCAmelCase = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 36
| 1
|
"""simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class a :
def __init__( self : Optional[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple=99 , lowerCAmelCase : List[str]=13 , lowerCAmelCase : str=7 , lowerCAmelCase : str=9 , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Union[str, Any]=False , lowerCAmelCase : Optional[Any]=32 , lowerCAmelCase : List[Any]=5 , lowerCAmelCase : int=4 , lowerCAmelCase : str=37 , lowerCAmelCase : Union[str, Any]=8 , lowerCAmelCase : List[Any]=0.1 , lowerCAmelCase : Optional[Any]=0.0_0_2 , lowerCAmelCase : Optional[int]=1 , lowerCAmelCase : Optional[Any]=0 , lowerCAmelCase : str=0 , lowerCAmelCase : Dict=None , lowerCAmelCase : Dict=None , ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =parent
SCREAMING_SNAKE_CASE_: Any =batch_size
SCREAMING_SNAKE_CASE_: Tuple =encoder_seq_length
SCREAMING_SNAKE_CASE_: str =decoder_seq_length
# For common tests
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.decoder_seq_length
SCREAMING_SNAKE_CASE_: List[Any] =is_training
SCREAMING_SNAKE_CASE_: Tuple =use_attention_mask
SCREAMING_SNAKE_CASE_: List[str] =use_labels
SCREAMING_SNAKE_CASE_: Tuple =vocab_size
SCREAMING_SNAKE_CASE_: Tuple =hidden_size
SCREAMING_SNAKE_CASE_: List[Any] =num_hidden_layers
SCREAMING_SNAKE_CASE_: Any =num_attention_heads
SCREAMING_SNAKE_CASE_: List[Any] =d_ff
SCREAMING_SNAKE_CASE_: Optional[int] =relative_attention_num_buckets
SCREAMING_SNAKE_CASE_: int =dropout_rate
SCREAMING_SNAKE_CASE_: Union[str, Any] =initializer_factor
SCREAMING_SNAKE_CASE_: List[str] =eos_token_id
SCREAMING_SNAKE_CASE_: Tuple =pad_token_id
SCREAMING_SNAKE_CASE_: List[Any] =decoder_start_token_id
SCREAMING_SNAKE_CASE_: Optional[int] =None
SCREAMING_SNAKE_CASE_: List[str] =decoder_layers
def lowerCamelCase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
return TaConfig.from_pretrained("""google/umt5-base""" )
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : int=None , lowerCAmelCase : str=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : List[Any]=None , ) -> Any:
'''simple docstring'''
if attention_mask is None:
SCREAMING_SNAKE_CASE_: Optional[Any] =input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE_: Dict =decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
SCREAMING_SNAKE_CASE_: Optional[int] =torch.ones(config.num_hidden_layers , config.num_attention_heads , device=lowerCAmelCase )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE_: str =torch.ones(config.num_decoder_layers , config.num_attention_heads , device=lowerCAmelCase )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE_: Tuple =torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=lowerCAmelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_: Any =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
SCREAMING_SNAKE_CASE_: Optional[Any] =input_ids.clamp(self.pad_token_id + 1 )
SCREAMING_SNAKE_CASE_: str =decoder_input_ids.clamp(self.pad_token_id + 1 )
SCREAMING_SNAKE_CASE_: Any =self.get_config()
SCREAMING_SNAKE_CASE_: Dict =config.num_attention_heads
SCREAMING_SNAKE_CASE_: Optional[Any] =self.prepare_inputs_dict(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return config, input_dict
def lowerCamelCase__ ( self : Dict ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCamelCase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowerCamelCase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowerCamelCase__ ( self : int , lowerCAmelCase : List[Any] , lowerCAmelCase : int , lowerCAmelCase : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Dict , ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =UMTaModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: List[str] =model(
input_ids=lowerCAmelCase , decoder_input_ids=lowerCAmelCase , attention_mask=lowerCAmelCase , decoder_attention_mask=lowerCAmelCase , )
SCREAMING_SNAKE_CASE_: Optional[Any] =model(input_ids=lowerCAmelCase , decoder_input_ids=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =result.last_hidden_state
SCREAMING_SNAKE_CASE_: Any =result.past_key_values
SCREAMING_SNAKE_CASE_: Dict =result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(lowerCAmelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] , ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =UMTaModel(config=lowerCAmelCase ).get_decoder().to(lowerCAmelCase ).eval()
# first forward pass
SCREAMING_SNAKE_CASE_: Optional[Any] =model(lowerCAmelCase , use_cache=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =model(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =model(lowerCAmelCase , use_cache=lowerCAmelCase )
self.parent.assertTrue(len(lowerCAmelCase ) == len(lowerCAmelCase ) )
self.parent.assertTrue(len(lowerCAmelCase ) == len(lowerCAmelCase ) + 1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE_: int =ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
SCREAMING_SNAKE_CASE_: Union[str, Any] =torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE_: Any =model(lowerCAmelCase )["""last_hidden_state"""]
SCREAMING_SNAKE_CASE_: List[Any] =model(lowerCAmelCase , past_key_values=lowerCAmelCase )["""last_hidden_state"""]
# select random slice
SCREAMING_SNAKE_CASE_: Tuple =ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE_: int =output_from_no_past[:, -1, random_slice_idx].detach()
SCREAMING_SNAKE_CASE_: str =output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-3 ) )
def lowerCamelCase__ ( self : int , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =UMTaModel(config=lowerCAmelCase ).to(lowerCAmelCase ).half().eval()
SCREAMING_SNAKE_CASE_: Union[str, Any] =model(**lowerCAmelCase )["""last_hidden_state"""]
self.parent.assertFalse(torch.isnan(lowerCAmelCase ).any().item() )
@require_torch
class a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : List[str] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
UpperCamelCase : Tuple = (UMTaForConditionalGeneration,) if is_torch_available() else ()
UpperCamelCase : Dict = (
{
'conversational': UMTaForConditionalGeneration,
'feature-extraction': UMTaModel,
'summarization': UMTaForConditionalGeneration,
'text2text-generation': UMTaForConditionalGeneration,
'translation': UMTaForConditionalGeneration,
'question-answering': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
UpperCamelCase : Optional[int] = True
UpperCamelCase : str = False
UpperCamelCase : List[str] = False
UpperCamelCase : Optional[Any] = True
UpperCamelCase : str = True
# The small UMT5 model needs higher percentages for CPU/MP tests
UpperCamelCase : int = [0.8, 0.9]
def lowerCamelCase__ ( self : Tuple ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =UMTaModelTester(self )
@unittest.skip("""Test has a segmentation fault on torch 1.8.0""" )
def lowerCamelCase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_: Dict =UMTaModel(config_and_inputs[0] ).to(lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
lowerCAmelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'''{tmpdirname}/t5_test.onnx''' , export_params=lowerCAmelCase , opset_version=9 , input_names=["""input_ids""", """decoder_input_ids"""] , )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def lowerCamelCase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*lowerCAmelCase )
def lowerCamelCase__ ( self : Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =["""encoder_attentions""", """decoder_attentions""", """cross_attentions"""]
SCREAMING_SNAKE_CASE_: List[Any] =self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_: List[str] =config_and_inputs[0]
SCREAMING_SNAKE_CASE_: Union[str, Any] =UMTaForConditionalGeneration(lowerCAmelCase ).eval()
model.to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int ={
"""head_mask""": torch.zeros(config.num_layers , config.num_heads , device=lowerCAmelCase ),
"""decoder_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=lowerCAmelCase ),
"""cross_attn_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=lowerCAmelCase ),
}
for attn_name, (name, mask) in zip(lowerCAmelCase , head_masking.items() ):
SCREAMING_SNAKE_CASE_: Union[str, Any] ={name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
SCREAMING_SNAKE_CASE_: Dict =torch.ones(
config.num_decoder_layers , config.num_heads , device=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =model.generate(
config_and_inputs[1]["""input_ids"""] , num_beams=1 , max_length=3 , output_attentions=lowerCAmelCase , return_dict_in_generate=lowerCAmelCase , **lowerCAmelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
SCREAMING_SNAKE_CASE_: Tuple =out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("""Does not work on the tiny model as we keep hitting edge cases.""" )
def lowerCamelCase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class a ( unittest.TestCase ):
@slow
@unittest.skip(
"""Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged""" )
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =UMTaForConditionalGeneration.from_pretrained("""google/umt5-small""" , return_dict=lowerCAmelCase ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =AutoTokenizer.from_pretrained("""google/umt5-small""" , use_fast=lowerCAmelCase , legacy=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =[
"""Bonjour monsieur <extra_id_0> bien <extra_id_1>.""",
"""No se como puedo <extra_id_0>.""",
"""This is the reason why we <extra_id_0> them.""",
"""The <extra_id_0> walks in <extra_id_1>, seats""",
"""A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""",
]
SCREAMING_SNAKE_CASE_: int =tokenizer(lowerCAmelCase , return_tensors="""pt""" , padding=lowerCAmelCase ).input_ids
# fmt: off
SCREAMING_SNAKE_CASE_: Any =torch.tensor(
[
[ 3_8530, 21_0703, 25_6299, 1410, 25_6298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 2_5922, 25_6299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 1_9014, 1_0620, 758, 25_6299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 25_6299, 1_4869, 281, 301, 25_6298, 275, 11_9983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 25_6299, 1_4869, 281, 2234, 289, 2275, 333,6_1391, 289, 25_6298, 543, 25_6297, 16_8714, 329, 25_6296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =model.generate(input_ids.to(lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: Dict =[
"""<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>""",
"""<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
]
SCREAMING_SNAKE_CASE_: Optional[int] =tokenizer.batch_decode(lowerCAmelCase )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
| 36
|
"""simple docstring"""
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: int =False
while is_sorted is False: # Until all the indices are traversed keep looping
SCREAMING_SNAKE_CASE_: Tuple =True
for i in range(0 , len(lowercase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =input_list[i + 1], input_list[i]
# swapping if elements not in order
SCREAMING_SNAKE_CASE_: Tuple =False
for i in range(1 , len(lowercase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str =input_list[i + 1], input_list[i]
# swapping if elements not in order
SCREAMING_SNAKE_CASE_: str =False
return input_list
if __name__ == "__main__":
print("""Enter list to be sorted""")
_UpperCAmelCase = [int(x) for x in input().split()]
# inputing elements of the list in one line
_UpperCAmelCase = odd_even_sort(input_list)
print("""The sorted list is""")
print(sorted_list)
| 36
| 1
|
"""simple docstring"""
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
_UpperCAmelCase = logging.get_logger(__name__)
def __magic_name__ ( ):
# Get the sagemaker specific mp parameters from smp_options variable.
SCREAMING_SNAKE_CASE_: List[str] =os.getenv("""SM_HP_MP_PARAMETERS""" , """{}""" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
SCREAMING_SNAKE_CASE_: List[Any] =json.loads(lowercase )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
SCREAMING_SNAKE_CASE_: str =os.getenv("""SM_FRAMEWORK_PARAMS""" , """{}""" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
SCREAMING_SNAKE_CASE_: Union[str, Any] =json.loads(lowercase )
if not mpi_options.get("""sagemaker_mpi_enabled""" , lowercase ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("""smdistributed""" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class a ( UpperCAmelCase__ ):
UpperCamelCase : str = field(
default='' , metadata={'help': 'Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'} , )
def lowerCamelCase__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
super().__post_init__()
warnings.warn(
"""`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use """
"""`TrainingArguments` instead.""" , lowerCAmelCase , )
@cached_property
def lowerCamelCase__ ( self : str ) -> "torch.device":
'''simple docstring'''
logger.info("""PyTorch: setting up devices""" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"""torch.distributed process group is initialized, but local_rank == -1. """
"""In order to use Torch DDP, launch your script with `python -m torch.distributed.launch""" )
if self.no_cuda:
SCREAMING_SNAKE_CASE_: str =torch.device("""cpu""" )
SCREAMING_SNAKE_CASE_: List[str] =0
elif is_sagemaker_model_parallel_available():
SCREAMING_SNAKE_CASE_: Optional[int] =smp.local_rank()
SCREAMING_SNAKE_CASE_: Tuple =torch.device("""cuda""" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="""smddp""" , timeout=self.ddp_timeout_delta )
SCREAMING_SNAKE_CASE_: str =int(os.getenv("""SMDATAPARALLEL_LOCAL_RANK""" ) )
SCREAMING_SNAKE_CASE_: Union[str, Any] =torch.device("""cuda""" , self.local_rank )
SCREAMING_SNAKE_CASE_: Union[str, Any] =1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
SCREAMING_SNAKE_CASE_: Union[str, Any] =torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
SCREAMING_SNAKE_CASE_: Any =torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="""nccl""" , timeout=self.ddp_timeout_delta )
SCREAMING_SNAKE_CASE_: Dict =torch.device("""cuda""" , self.local_rank )
SCREAMING_SNAKE_CASE_: Tuple =1
if device.type == "cuda":
torch.cuda.set_device(lowerCAmelCase )
return device
@property
def lowerCamelCase__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def lowerCamelCase__ ( self : str ) -> str:
'''simple docstring'''
return not is_sagemaker_model_parallel_available()
@property
def lowerCamelCase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
return False
| 36
|
"""simple docstring"""
def __magic_name__ ( lowercase ):
return str(lowercase ) == str(lowercase )[::-1]
def __magic_name__ ( lowercase ):
return int(lowercase ) + int(str(lowercase )[::-1] )
def __magic_name__ ( lowercase = 1_0000 ):
SCREAMING_SNAKE_CASE_: List[str] =[]
for num in range(1 , lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =0
SCREAMING_SNAKE_CASE_: int =num
while iterations < 50:
SCREAMING_SNAKE_CASE_: Optional[Any] =sum_reverse(lowercase )
iterations += 1
if is_palindrome(lowercase ):
break
else:
lychrel_nums.append(lowercase )
return len(lowercase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 36
| 1
|
"""simple docstring"""
def __magic_name__ ( lowercase = 3 , lowercase = 7 , lowercase = 100_0000 ):
SCREAMING_SNAKE_CASE_: Optional[int] =0
SCREAMING_SNAKE_CASE_: Optional[Any] =1
for current_denominator in range(1 , limit + 1 ):
SCREAMING_SNAKE_CASE_: str =current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
SCREAMING_SNAKE_CASE_: str =current_numerator
SCREAMING_SNAKE_CASE_: List[Any] =current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_0_0_0_0_0_0))
| 36
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
_UpperCAmelCase = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""DPTFeatureExtractor"""]
_UpperCAmelCase = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 36
| 1
|
"""simple docstring"""
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 36
|
"""simple docstring"""
from __future__ import annotations
import math
import random
from typing import Any
class a :
def __init__( self : str ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: list[Any] =[]
SCREAMING_SNAKE_CASE_: int =0
SCREAMING_SNAKE_CASE_: int =0
def lowerCamelCase__ ( self : Optional[Any] ) -> bool:
'''simple docstring'''
return self.head == self.tail
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
self.data.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =self.tail + 1
def lowerCamelCase__ ( self : int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self.data[self.head]
SCREAMING_SNAKE_CASE_: Optional[int] =self.head + 1
return ret
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
return self.tail - self.head
def lowerCamelCase__ ( self : str ) -> None:
'''simple docstring'''
print(self.data )
print("""**************""" )
print(self.data[self.head : self.tail] )
class a :
def __init__( self : Union[str, Any] , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =data
SCREAMING_SNAKE_CASE_: MyNode | None =None
SCREAMING_SNAKE_CASE_: MyNode | None =None
SCREAMING_SNAKE_CASE_: int =1
def lowerCamelCase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
return self.data
def lowerCamelCase__ ( self : List[Any] ) -> MyNode | None:
'''simple docstring'''
return self.left
def lowerCamelCase__ ( self : Dict ) -> MyNode | None:
'''simple docstring'''
return self.right
def lowerCamelCase__ ( self : Any ) -> int:
'''simple docstring'''
return self.height
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =data
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : MyNode | None ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =node
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : MyNode | None ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =node
def lowerCamelCase__ ( self : int , lowerCAmelCase : int ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =height
def __magic_name__ ( lowercase ):
if node is None:
return 0
return node.get_height()
def __magic_name__ ( lowercase , lowercase ):
if a > b:
return a
return b
def __magic_name__ ( lowercase ):
print("""left rotation node:""" , node.get_data() )
SCREAMING_SNAKE_CASE_: List[Any] =node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(lowercase )
SCREAMING_SNAKE_CASE_: Tuple =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowercase )
SCREAMING_SNAKE_CASE_: int =my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowercase )
return ret
def __magic_name__ ( lowercase ):
print("""right rotation node:""" , node.get_data() )
SCREAMING_SNAKE_CASE_: List[Any] =node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(lowercase )
SCREAMING_SNAKE_CASE_: List[Any] =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowercase )
return ret
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any =node.get_left()
assert left_child is not None
node.set_left(left_rotation(lowercase ) )
return right_rotation(lowercase )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Dict =node.get_right()
assert right_child is not None
node.set_right(right_rotation(lowercase ) )
return left_rotation(lowercase )
def __magic_name__ ( lowercase , lowercase ):
if node is None:
return MyNode(lowercase )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , lowercase ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
SCREAMING_SNAKE_CASE_: Union[str, Any] =node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
SCREAMING_SNAKE_CASE_: Any =right_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: List[Any] =lr_rotation(lowercase )
else:
node.set_right(insert_node(node.get_right() , lowercase ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
SCREAMING_SNAKE_CASE_: Tuple =node.get_right()
assert right_child is not None
if data < right_child.get_data():
SCREAMING_SNAKE_CASE_: Optional[Any] =rl_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: Optional[int] =left_rotation(lowercase )
SCREAMING_SNAKE_CASE_: Tuple =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowercase )
return node
def __magic_name__ ( lowercase ):
while True:
SCREAMING_SNAKE_CASE_: Dict =root.get_right()
if right_child is None:
break
SCREAMING_SNAKE_CASE_: str =right_child
return root.get_data()
def __magic_name__ ( lowercase ):
while True:
SCREAMING_SNAKE_CASE_: str =root.get_left()
if left_child is None:
break
SCREAMING_SNAKE_CASE_: Dict =left_child
return root.get_data()
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: str =root.get_left()
SCREAMING_SNAKE_CASE_: List[Any] =root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
SCREAMING_SNAKE_CASE_: Union[str, Any] =get_left_most(lowercase )
root.set_data(lowercase )
root.set_right(del_node(lowercase , lowercase ) )
elif left_child is not None:
SCREAMING_SNAKE_CASE_: Optional[int] =left_child
elif right_child is not None:
SCREAMING_SNAKE_CASE_: Any =right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("""No such data""" )
return root
else:
root.set_left(del_node(lowercase , lowercase ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(lowercase , lowercase ) )
if get_height(lowercase ) - get_height(lowercase ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
SCREAMING_SNAKE_CASE_: Tuple =left_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =rl_rotation(lowercase )
elif get_height(lowercase ) - get_height(lowercase ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
SCREAMING_SNAKE_CASE_: Optional[Any] =right_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: str =lr_rotation(lowercase )
SCREAMING_SNAKE_CASE_: str =my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(lowercase )
return root
class a :
def __init__( self : int ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: MyNode | None =None
def lowerCamelCase__ ( self : List[Any] ) -> int:
'''simple docstring'''
return get_height(self.root )
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
print("""insert:""" + str(lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: Tuple =insert_node(self.root , lowerCAmelCase )
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
print("""delete:""" + str(lowerCAmelCase ) )
if self.root is None:
print("""Tree is empty!""" )
return
SCREAMING_SNAKE_CASE_: Union[str, Any] =del_node(self.root , lowerCAmelCase )
def __str__( self : List[str] , ) -> str: # a level traversale, gives a more intuitive look on the tree
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =""""""
SCREAMING_SNAKE_CASE_: str =MyQueue()
q.push(self.root )
SCREAMING_SNAKE_CASE_: List[str] =self.get_height()
if layer == 0:
return output
SCREAMING_SNAKE_CASE_: int =0
while not q.is_empty():
SCREAMING_SNAKE_CASE_: int =q.pop()
SCREAMING_SNAKE_CASE_: List[Any] =""" """ * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(lowerCAmelCase )
q.push(lowerCAmelCase )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
SCREAMING_SNAKE_CASE_: List[Any] =cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , lowerCAmelCase ) - 1:
SCREAMING_SNAKE_CASE_: int =layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def __magic_name__ ( ):
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
_UpperCAmelCase = AVLtree()
_UpperCAmelCase = list(range(1_0))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 36
| 1
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class a :
UpperCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be trained.'} )
UpperCamelCase : Optional[str] = field(
default='./' , metadata={'help': 'Save dir where model repo is cloned and models updates are saved to.'} )
UpperCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path of training dataset.'} )
UpperCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} )
UpperCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size for training.'} )
UpperCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size for evaluation.'} )
UpperCamelCase : Optional[float] = field(default=0.1 , metadata={'help': 'Value of weight decay.'} )
UpperCamelCase : Optional[int] = field(
default=1_0_0_0_0 , metadata={'help': 'Size of buffer used to shuffle streaming dataset.'} )
UpperCamelCase : Optional[float] = field(default=2E-4 , metadata={'help': 'Learning rate fo training.'} )
UpperCamelCase : Optional[str] = field(default='cosine' , metadata={'help': 'Learning rate.'} )
UpperCamelCase : Optional[int] = field(
default=7_5_0 , metadata={'help': 'Number of warmup steps in the learning rate schedule.'} )
UpperCamelCase : Optional[int] = field(
default=1_6 , metadata={'help': 'Number of gradient accumulation steps.'} )
UpperCamelCase : Optional[bool] = field(
default=UpperCAmelCase__ , metadata={'help': 'Use gradient checkpointing to reduce memory footprint.'} )
UpperCamelCase : Optional[int] = field(default=5_0_0_0_0 , metadata={'help': 'Maximum number of training steps.'} )
UpperCamelCase : Optional[int] = field(
default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
UpperCamelCase : Optional[int] = field(default=1_0_2_4 , metadata={'help': 'Sequence lengths used for training.'} )
UpperCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Training seed.'} )
UpperCamelCase : Optional[int] = field(
default=1_0_2_4 , metadata={'help': 'Interval to save checkpoints. Measured as number of forward passes not training steps.'} , )
UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase__ , metadata={'help': 'States path if the training should continue from a checkpoint folder.'} )
UpperCamelCase : Optional[bool] = field(default=UpperCAmelCase__ , metadata={'help': 'If True the data is pretokenized.'} )
@dataclass
class a :
UpperCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} )
UpperCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} )
UpperCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size used for evaluation.'} )
UpperCamelCase : Optional[int] = field(
default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
UpperCamelCase : Optional[int] = field(default=1_0_2_4 , metadata={'help': 'Length of sequences to be evaluated.'} )
UpperCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} )
@dataclass
class a :
UpperCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} )
UpperCamelCase : Optional[int] = field(default=UpperCAmelCase__ , metadata={'help': 'Number of workers used for code evaluation.'} )
UpperCamelCase : Optional[int] = field(
default=UpperCAmelCase__ , metadata={'help': 'The number of human-eval tasks to run. If not included all tasks are evaluated.'} , )
UpperCamelCase : Optional[bool] = field(
default=UpperCAmelCase__ , metadata={'help': 'Sample from the language model\'s output distribution.'} )
UpperCamelCase : Optional[float] = field(default=0.2 , metadata={'help': 'Sampling temperature used for generation.'} )
UpperCamelCase : Optional[int] = field(default=2_5_6 , metadata={'help': 'Maximum number of newly generated tokens.'} )
UpperCamelCase : Optional[int] = field(default=0 , metadata={'help': 'Top-k parameter used for generation.'} )
UpperCamelCase : Optional[float] = field(default=0.95 , metadata={'help': 'Top-p parameter used for nucleus sampling.'} )
UpperCamelCase : Optional[int] = field(default=1_0 , metadata={'help': 'Number of generations to run in parallel.'} )
UpperCamelCase : Optional[int] = field(
default=2_0_0 , metadata={'help': 'Number of completions to generate for each sample.'} )
UpperCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} )
UpperCamelCase : Optional[str] = field(
default='eval_results.json' , metadata={'help': 'Random seed used for evaluation.'} )
UpperCamelCase : Optional[str] = field(
default='0' , metadata={'help': 'Allow `code_eval` to execute Python code on machine'} )
UpperCamelCase : Optional[int] = field(
default=-1 , metadata={
'help': (
'Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive'
' number corresponds to which GPU device id to run on.'
)
} , )
@dataclass
class a :
UpperCamelCase : Optional[int] = field(
default=UpperCAmelCase__ , metadata={
'help': 'The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.'
} , )
UpperCamelCase : Optional[str] = field(
default='transformersbook/codeparrot' , metadata={'help': 'Folder or name of dataset to process.'} )
UpperCamelCase : Optional[str] = field(
default='codeparrot-clean' , metadata={'help': 'Folder to save processed processed dataset.'} )
UpperCamelCase : Optional[int] = field(
default=1_0_0_0_0_0 , metadata={'help': 'Number of files to save per JSON output file.'} )
UpperCamelCase : Optional[str] = field(default='content' , metadata={'help': 'Column containing text data to process.'} )
UpperCamelCase : Optional[float] = field(
default=1_0_0_0 , metadata={'help': 'Maximum line length in file, otherwise file is filtered.'} )
UpperCamelCase : Optional[float] = field(
default=1_0_0 , metadata={'help': 'Maximum mean line length in file, otherwise file is filtered.'} )
UpperCamelCase : Optional[float] = field(
default=0.25 , metadata={'help': 'Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'} )
UpperCamelCase : Optional[float] = field(
default=1.5 , metadata={'help': 'Minimum character token ratio for the file, otherwise file is filtered.'} )
UpperCamelCase : Optional[float] = field(
default=0.7 , metadata={'help': 'Probability for filtering config, test and uncommon files.'} )
UpperCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} , )
UpperCamelCase : Optional[bool] = field(
default=UpperCAmelCase__ , metadata={'help': 'If True, near-duplicate samples are removed.'} )
UpperCamelCase : Optional[float] = field(
default=0.85 , metadata={'help': 'Jaccard threshold for near-duplicate samples.'} )
@dataclass
class a :
UpperCamelCase : Optional[str] = field(
default='gpt2' , metadata={'help': 'Base tokenizer to build new tokenizer from.'} )
UpperCamelCase : Optional[str] = field(
default='transformersbook/codeparrot-train' , metadata={'help': 'Dataset to train tokenizer on.'} )
UpperCamelCase : Optional[str] = field(default='content' , metadata={'help': 'Column containing text data to process.'} )
UpperCamelCase : Optional[int] = field(default=2_0_0_0_0_0 , metadata={'help': 'Number of examples to train tokenizer on.'} )
UpperCamelCase : Optional[int] = field(
default=3_2_7_6_8 , metadata={'help': 'Number of examples to train the tokenizer on.'} )
UpperCamelCase : Optional[str] = field(default='codeparrot' , metadata={'help': 'Name of new tokenizer.'} )
UpperCamelCase : Optional[bool] = field(default=UpperCAmelCase__ , metadata={'help': 'Push saved tokenizer to the hub.'} )
@dataclass
class a :
UpperCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} )
UpperCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path to the dataset to pretokenize.'} )
UpperCamelCase : Optional[str] = field(
default='tokenized-codeparrot-train' , metadata={'help': 'Repo name of the pretokenized data.'} )
UpperCamelCase : Optional[int] = field(default=UpperCAmelCase__ , metadata={'help': 'Number of workers used for code evaluation.'} )
@dataclass
class a :
UpperCamelCase : Optional[str] = field(
default='gpt2-large' , metadata={'help': 'Configuration to use for model initialization.'} )
UpperCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Tokenizer attached to model.'} )
UpperCamelCase : Optional[str] = field(default='codeparrot' , metadata={'help': 'Name of the created model.'} )
UpperCamelCase : Optional[bool] = field(default=UpperCAmelCase__ , metadata={'help': 'Push saved tokenizer to the hub.'} )
| 36
|
"""simple docstring"""
import string
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =""""""
for i in sequence:
SCREAMING_SNAKE_CASE_: List[Any] =ord(lowercase )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any =string.ascii_letters
SCREAMING_SNAKE_CASE_: Tuple =string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(lowercase )] if c in letters else c for c in sequence )
def __magic_name__ ( ):
from timeit import timeit
print("""Running performance benchmarks...""" )
SCREAMING_SNAKE_CASE_: int ="""from string import printable ; from __main__ import atbash, atbash_slow"""
print(f'''> atbash_slow(): {timeit("atbash_slow(printable)" , setup=lowercase )} seconds''' )
print(f'''> atbash(): {timeit("atbash(printable)" , setup=lowercase )} seconds''' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(f"""{example} encrypted in atbash: {atbash(example)}""")
benchmark()
| 36
| 1
|
"""simple docstring"""
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_UpperCAmelCase = argparse.ArgumentParser("""Stable Diffusion script with intel optimization""", add_help=False)
parser.add_argument("""--dpm""", action="""store_true""", help="""Enable DPMSolver or not""")
parser.add_argument("""--steps""", default=None, type=int, help="""Num inference steps""")
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = """cpu"""
_UpperCAmelCase = """a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"""
_UpperCAmelCase = """path-to-your-trained-model"""
_UpperCAmelCase = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_UpperCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_UpperCAmelCase = pipe.to(device)
# to channels last
_UpperCAmelCase = pipe.unet.to(memory_format=torch.channels_last)
_UpperCAmelCase = pipe.vae.to(memory_format=torch.channels_last)
_UpperCAmelCase = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_UpperCAmelCase = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_UpperCAmelCase = torch.randn(2, 4, 6_4, 6_4)
_UpperCAmelCase = torch.rand(1) * 9_9_9
_UpperCAmelCase = torch.randn(2, 7_7, 7_6_8)
_UpperCAmelCase = (sample, timestep, encoder_hidden_status)
try:
_UpperCAmelCase = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_UpperCAmelCase = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_UpperCAmelCase = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_UpperCAmelCase = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_UpperCAmelCase = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_UpperCAmelCase = 6_6_6
_UpperCAmelCase = torch.Generator(device).manual_seed(seed)
_UpperCAmelCase = {"""generator""": generator}
if args.steps is not None:
_UpperCAmelCase = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_UpperCAmelCase = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("""generated.png""")
| 36
|
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class a :
def __init__( self : Union[str, Any] , lowerCAmelCase : List[str]=2 , lowerCAmelCase : int=3 , lowerCAmelCase : Optional[Any]=64 , lowerCAmelCase : Union[str, Any]=None ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =np.random.default_rng(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =length
SCREAMING_SNAKE_CASE_: Union[str, Any] =rng.normal(size=(length,) ).astype(np.floataa )
SCREAMING_SNAKE_CASE_: Tuple =a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : List[Any] ) -> str:
'''simple docstring'''
return self.length
def __getitem__( self : Union[str, Any] , lowerCAmelCase : Any ) -> List[str]:
'''simple docstring'''
return {"x": self.x[i], "y": self.y[i]}
class a ( torch.nn.Module ):
def __init__( self : Optional[int] , lowerCAmelCase : str=0 , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : Optional[int]=False ) -> Tuple:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_: int =torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE_: int =torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE_: Dict =True
def lowerCamelCase__ ( self : str , lowerCAmelCase : Tuple=None ) -> int:
'''simple docstring'''
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
SCREAMING_SNAKE_CASE_: Union[str, Any] =False
return x * self.a[0] + self.b[0]
class a ( torch.nn.Module ):
def __init__( self : Union[str, Any] , lowerCAmelCase : Any=0 , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : List[Any]=False ) -> str:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_: List[str] =torch.nn.Parameter(torch.tensor(lowerCAmelCase ).float() )
SCREAMING_SNAKE_CASE_: Optional[int] =torch.nn.Parameter(torch.tensor(lowerCAmelCase ).float() )
SCREAMING_SNAKE_CASE_: List[Any] =True
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : int=None ) -> Any:
'''simple docstring'''
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
SCREAMING_SNAKE_CASE_: Optional[int] =False
return x * self.a + self.b
def __magic_name__ ( lowercase , lowercase = 16 ):
from datasets import load_dataset
from transformers import AutoTokenizer
SCREAMING_SNAKE_CASE_: Optional[Any] =AutoTokenizer.from_pretrained("""bert-base-cased""" )
SCREAMING_SNAKE_CASE_: Optional[int] ={"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""}
SCREAMING_SNAKE_CASE_: Any =load_dataset("""csv""" , data_files=lowercase )
SCREAMING_SNAKE_CASE_: Any =datasets["""train"""].unique("""label""" )
SCREAMING_SNAKE_CASE_: List[Any] ={v: i for i, v in enumerate(lowercase )}
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_: Dict =tokenizer(
examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase , max_length=lowercase , padding="""max_length""" )
if "label" in examples:
SCREAMING_SNAKE_CASE_: Optional[int] =[label_to_id[l] for l in examples["""label"""]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE_: List[Any] =datasets.map(
lowercase , batched=lowercase , remove_columns=["""sentence1""", """sentence2""", """label"""] , )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(lowercase , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_: Optional[int] =DataLoader(tokenized_datasets["""train"""] , shuffle=lowercase , collate_fn=lowercase , batch_size=2 )
SCREAMING_SNAKE_CASE_: Dict =DataLoader(tokenized_datasets["""validation"""] , shuffle=lowercase , collate_fn=lowercase , batch_size=1 )
return train_dataloader, eval_dataloader
| 36
| 1
|
"""simple docstring"""
from ... import PretrainedConfig
_UpperCAmelCase = {
"""sijunhe/nezha-cn-base""": """https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json""",
}
class a ( UpperCAmelCase__ ):
UpperCamelCase : str = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
UpperCamelCase : Optional[Any] = 'nezha'
def __init__( self : Dict , lowerCAmelCase : List[str]=2_1128 , lowerCAmelCase : Any=768 , lowerCAmelCase : Optional[Any]=12 , lowerCAmelCase : List[str]=12 , lowerCAmelCase : Optional[Any]=3072 , lowerCAmelCase : Optional[int]="gelu" , lowerCAmelCase : Any=0.1 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : Tuple=512 , lowerCAmelCase : Any=64 , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : Union[str, Any]=0.0_2 , lowerCAmelCase : List[Any]=1E-12 , lowerCAmelCase : int=0.1 , lowerCAmelCase : int=0 , lowerCAmelCase : Any=2 , lowerCAmelCase : Optional[int]=3 , lowerCAmelCase : int=True , **lowerCAmelCase : Dict , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =vocab_size
SCREAMING_SNAKE_CASE_: Optional[Any] =hidden_size
SCREAMING_SNAKE_CASE_: Tuple =num_hidden_layers
SCREAMING_SNAKE_CASE_: Any =num_attention_heads
SCREAMING_SNAKE_CASE_: Any =hidden_act
SCREAMING_SNAKE_CASE_: Optional[int] =intermediate_size
SCREAMING_SNAKE_CASE_: int =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Any =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: int =max_position_embeddings
SCREAMING_SNAKE_CASE_: Optional[int] =max_relative_position
SCREAMING_SNAKE_CASE_: Union[str, Any] =type_vocab_size
SCREAMING_SNAKE_CASE_: Optional[int] =initializer_range
SCREAMING_SNAKE_CASE_: Any =layer_norm_eps
SCREAMING_SNAKE_CASE_: List[str] =classifier_dropout
SCREAMING_SNAKE_CASE_: str =use_cache
| 36
|
"""simple docstring"""
def __magic_name__ ( lowercase ):
if upper_limit < 0:
raise ValueError("""Limit for the Catalan sequence must be ≥ 0""" )
SCREAMING_SNAKE_CASE_: Tuple =[0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
SCREAMING_SNAKE_CASE_: Any =1
if upper_limit > 0:
SCREAMING_SNAKE_CASE_: List[str] =1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(lowercase ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""")
print("""\n*** Enter -1 at any time to quit ***""")
print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""")
try:
while True:
_UpperCAmelCase = int(input().strip())
if N < 0:
print("""\n********* Goodbye!! ************""")
break
else:
print(f"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print("""Try another upper limit for the sequence: """, end="""""")
except (NameError, ValueError):
print("""\n********* Invalid input, goodbye! ************\n""")
import doctest
doctest.testmod()
| 36
| 1
|
"""simple docstring"""
from PIL import Image
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict =image.size
SCREAMING_SNAKE_CASE_: Union[str, Any] =0
SCREAMING_SNAKE_CASE_: Optional[int] =image.load()
for i in range(lowercase ):
for j in range(lowercase ):
SCREAMING_SNAKE_CASE_: List[str] =pixels[j, i]
mean += pixel
mean //= width * height
for j in range(lowercase ):
for i in range(lowercase ):
SCREAMING_SNAKE_CASE_: Tuple =255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
_UpperCAmelCase = mean_threshold(Image.open("""path_to_image""").convert("""L"""))
image.save("""output_image_path""")
| 36
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
_UpperCAmelCase = {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""",
}
class a ( UpperCAmelCase__ ):
UpperCamelCase : Any = 'albert'
def __init__( self : Dict , lowerCAmelCase : List[str]=3_0000 , lowerCAmelCase : List[Any]=128 , lowerCAmelCase : List[str]=4096 , lowerCAmelCase : str=12 , lowerCAmelCase : str=1 , lowerCAmelCase : Tuple=64 , lowerCAmelCase : Dict=1_6384 , lowerCAmelCase : int=1 , lowerCAmelCase : str="gelu_new" , lowerCAmelCase : Dict=0 , lowerCAmelCase : Optional[Any]=0 , lowerCAmelCase : str=512 , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : List[Any]=0.0_2 , lowerCAmelCase : Union[str, Any]=1E-12 , lowerCAmelCase : Tuple=0.1 , lowerCAmelCase : List[Any]="absolute" , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : int=2 , lowerCAmelCase : Optional[int]=3 , **lowerCAmelCase : int , ) -> Tuple:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =vocab_size
SCREAMING_SNAKE_CASE_: Optional[int] =embedding_size
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_size
SCREAMING_SNAKE_CASE_: Tuple =num_hidden_layers
SCREAMING_SNAKE_CASE_: Any =num_hidden_groups
SCREAMING_SNAKE_CASE_: List[Any] =num_attention_heads
SCREAMING_SNAKE_CASE_: List[Any] =inner_group_num
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_act
SCREAMING_SNAKE_CASE_: int =intermediate_size
SCREAMING_SNAKE_CASE_: Any =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Union[str, Any] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: int =max_position_embeddings
SCREAMING_SNAKE_CASE_: Any =type_vocab_size
SCREAMING_SNAKE_CASE_: int =initializer_range
SCREAMING_SNAKE_CASE_: List[Any] =layer_norm_eps
SCREAMING_SNAKE_CASE_: Dict =classifier_dropout_prob
SCREAMING_SNAKE_CASE_: int =position_embedding_type
class a ( UpperCAmelCase__ ):
@property
def lowerCamelCase__ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_: str ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
SCREAMING_SNAKE_CASE_: Dict ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 36
| 1
|
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
_UpperCAmelCase = [
"""good first issue""",
"""feature request""",
"""wip""",
]
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: List[Any] =Github(os.environ["""GITHUB_TOKEN"""] )
SCREAMING_SNAKE_CASE_: List[str] =g.get_repo("""huggingface/accelerate""" )
SCREAMING_SNAKE_CASE_: Tuple =repo.get_issues(state="""open""" )
for issue in open_issues:
SCREAMING_SNAKE_CASE_: int =sorted([comment for comment in issue.get_comments()] , key=lambda lowercase : i.created_at , reverse=lowercase )
SCREAMING_SNAKE_CASE_: int =comments[0] if len(lowercase ) > 0 else None
SCREAMING_SNAKE_CASE_: Optional[int] =dt.utcnow()
SCREAMING_SNAKE_CASE_: Optional[Any] =(current_time - issue.updated_at).days
SCREAMING_SNAKE_CASE_: Tuple =(current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state="""closed""" )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 36
|
"""simple docstring"""
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class a ( yaml.SafeLoader ):
def lowerCamelCase__ ( self : int , lowerCAmelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =[self.constructed_objects[key_node] for key_node, _ in node.value]
SCREAMING_SNAKE_CASE_: Any =[tuple(lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else key for key in keys]
SCREAMING_SNAKE_CASE_: Dict =Counter(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =[key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f'''Got duplicate yaml keys: {duplicate_keys}''' )
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : str , lowerCAmelCase : Optional[int]=False ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =super().construct_mapping(lowerCAmelCase , deep=lowerCAmelCase )
self._check_no_duplicates_on_constructed_node(lowerCAmelCase )
return mapping
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Dict =list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
SCREAMING_SNAKE_CASE_: Union[str, Any] =full_content[1:].index("""---""" ) + 1
SCREAMING_SNAKE_CASE_: List[str] ="""\n""".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(lowercase )
class a ( UpperCAmelCase__ ):
# class attributes
UpperCamelCase : Tuple = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def lowerCamelCase__ ( cls : List[Any] , lowerCAmelCase : Path ) -> "DatasetMetadata":
'''simple docstring'''
with open(lowerCAmelCase , encoding="""utf-8""" ) as readme_file:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] =_split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(lowerCAmelCase )
else:
return cls()
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Path ) -> List[str]:
'''simple docstring'''
if path.exists():
with open(lowerCAmelCase , encoding="""utf-8""" ) as readme_file:
SCREAMING_SNAKE_CASE_: str =readme_file.read()
else:
SCREAMING_SNAKE_CASE_: str =None
SCREAMING_SNAKE_CASE_: Tuple =self._to_readme(lowerCAmelCase )
with open(lowerCAmelCase , """w""" , encoding="""utf-8""" ) as readme_file:
readme_file.write(lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : Optional[str] = None ) -> str:
'''simple docstring'''
if readme_content is not None:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] =_split_yaml_from_readme(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] ="""---\n""" + self.to_yaml_string() + """---\n""" + content
else:
SCREAMING_SNAKE_CASE_: List[Any] ="""---\n""" + self.to_yaml_string() + """---\n"""
return full_content
@classmethod
def lowerCamelCase__ ( cls : Optional[int] , lowerCAmelCase : str ) -> "DatasetMetadata":
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =yaml.load(lowerCAmelCase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
SCREAMING_SNAKE_CASE_: List[Any] ={
(key.replace("""-""" , """_""" ) if key.replace("""-""" , """_""" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**lowerCAmelCase )
def lowerCamelCase__ ( self : Dict ) -> str:
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace("""_""" , """-""" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=lowerCAmelCase , allow_unicode=lowerCAmelCase , encoding="""utf-8""" , ).decode("""utf-8""" )
_UpperCAmelCase = {
"""image-classification""": [],
"""translation""": [],
"""image-segmentation""": [],
"""fill-mask""": [],
"""automatic-speech-recognition""": [],
"""token-classification""": [],
"""sentence-similarity""": [],
"""audio-classification""": [],
"""question-answering""": [],
"""summarization""": [],
"""zero-shot-classification""": [],
"""table-to-text""": [],
"""feature-extraction""": [],
"""other""": [],
"""multiple-choice""": [],
"""text-classification""": [],
"""text-to-image""": [],
"""text2text-generation""": [],
"""zero-shot-image-classification""": [],
"""tabular-classification""": [],
"""tabular-regression""": [],
"""image-to-image""": [],
"""tabular-to-text""": [],
"""unconditional-image-generation""": [],
"""text-retrieval""": [],
"""text-to-speech""": [],
"""object-detection""": [],
"""audio-to-audio""": [],
"""text-generation""": [],
"""conversational""": [],
"""table-question-answering""": [],
"""visual-question-answering""": [],
"""image-to-text""": [],
"""reinforcement-learning""": [],
"""voice-activity-detection""": [],
"""time-series-forecasting""": [],
"""document-question-answering""": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
_UpperCAmelCase = ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""")
ap.add_argument("""readme_filepath""")
_UpperCAmelCase = ap.parse_args()
_UpperCAmelCase = Path(args.readme_filepath)
_UpperCAmelCase = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 36
| 1
|
"""simple docstring"""
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
_UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase__ )
class a ( UpperCAmelCase__ ):
def __init__( self : Dict , **lowerCAmelCase : int ) -> int:
'''simple docstring'''
super().__init__(**lowerCAmelCase )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
self.check_model_type(lowerCAmelCase )
def lowerCamelCase__ ( self : Any , **lowerCAmelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] ={}
SCREAMING_SNAKE_CASE_: Union[str, Any] ={}
SCREAMING_SNAKE_CASE_: Union[str, Any] ={}
# preprocess args
if "points_per_batch" in kwargs:
SCREAMING_SNAKE_CASE_: Tuple =kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
SCREAMING_SNAKE_CASE_: Optional[Any] =kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
SCREAMING_SNAKE_CASE_: Union[str, Any] =kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
SCREAMING_SNAKE_CASE_: Optional[Any] =kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
SCREAMING_SNAKE_CASE_: Any =kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
SCREAMING_SNAKE_CASE_: List[Any] =kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
SCREAMING_SNAKE_CASE_: Tuple =kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
SCREAMING_SNAKE_CASE_: List[str] =kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
SCREAMING_SNAKE_CASE_: Optional[Any] =kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
SCREAMING_SNAKE_CASE_: List[str] =kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
SCREAMING_SNAKE_CASE_: Dict =kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
SCREAMING_SNAKE_CASE_: Union[str, Any] =kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self : List[Any] , lowerCAmelCase : Union[str, Any] , *lowerCAmelCase : int , lowerCAmelCase : Any=None , lowerCAmelCase : List[str]=None , **lowerCAmelCase : Optional[int] ) -> str:
'''simple docstring'''
return super().__call__(lowerCAmelCase , *lowerCAmelCase , num_workers=lowerCAmelCase , batch_size=lowerCAmelCase , **lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : List[Any] , lowerCAmelCase : List[Any]=64 , lowerCAmelCase : int = 0 , lowerCAmelCase : float = 512 / 1500 , lowerCAmelCase : Optional[int] = 32 , lowerCAmelCase : Optional[int] = 1 , ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =load_image(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =self.image_processor.size["""longest_edge"""]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int =self.image_processor.generate_crop_boxes(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =self.image_processor(images=lowerCAmelCase , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
SCREAMING_SNAKE_CASE_: List[Any] =self.get_inference_context()
with inference_context():
SCREAMING_SNAKE_CASE_: Union[str, Any] =self._ensure_tensor_on_device(lowerCAmelCase , device=self.device )
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
SCREAMING_SNAKE_CASE_: List[Any] =image_embeddings
SCREAMING_SNAKE_CASE_: Any =grid_points.shape[1]
SCREAMING_SNAKE_CASE_: List[str] =points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =grid_points[:, i : i + points_per_batch, :, :]
SCREAMING_SNAKE_CASE_: Dict =input_labels[:, i : i + points_per_batch]
SCREAMING_SNAKE_CASE_: Dict =i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def lowerCamelCase__ ( self : int , lowerCAmelCase : int , lowerCAmelCase : List[str]=0.8_8 , lowerCAmelCase : List[str]=0.9_5 , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : Dict=1 , ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =model_inputs.pop("""input_boxes""" )
SCREAMING_SNAKE_CASE_: List[str] =model_inputs.pop("""is_last""" )
SCREAMING_SNAKE_CASE_: Tuple =model_inputs.pop("""original_sizes""" ).tolist()
SCREAMING_SNAKE_CASE_: Tuple =model_inputs.pop("""reshaped_input_sizes""" ).tolist()
SCREAMING_SNAKE_CASE_: List[str] =self.model(**lowerCAmelCase )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
SCREAMING_SNAKE_CASE_: Tuple =model_outputs["""pred_masks"""]
SCREAMING_SNAKE_CASE_: str =self.image_processor.post_process_masks(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , binarize=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =model_outputs["""iou_scores"""]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict =self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : str=False , lowerCAmelCase : Optional[int]=False , lowerCAmelCase : str=0.7 , ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =[]
SCREAMING_SNAKE_CASE_: List[Any] =[]
SCREAMING_SNAKE_CASE_: List[str] =[]
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
SCREAMING_SNAKE_CASE_: List[str] =torch.cat(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =torch.cat(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] =self.image_processor.post_process_for_mask_generation(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =defaultdict(lowerCAmelCase )
for output in model_outputs:
for k, v in output.items():
extra[k].append(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] ={}
if output_rle_mask:
SCREAMING_SNAKE_CASE_: Dict =rle_mask
if output_bboxes_mask:
SCREAMING_SNAKE_CASE_: Union[str, Any] =bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 36
|
"""simple docstring"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def __magic_name__ ( lowercase ):
return (data["data"], data["target"])
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Dict =XGBClassifier()
classifier.fit(lowercase , lowercase )
return classifier
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Optional[Any] =load_iris()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =data_handling(lowercase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =train_test_split(
lowercase , lowercase , test_size=0.25 )
SCREAMING_SNAKE_CASE_: Tuple =iris["""target_names"""]
# Create an XGBoost Classifier from the training data
SCREAMING_SNAKE_CASE_: Optional[int] =xgboost(lowercase , lowercase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
lowercase , lowercase , lowercase , display_labels=lowercase , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 36
| 1
|
"""simple docstring"""
from math import pi
def __magic_name__ ( lowercase , lowercase ):
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(9_0, 1_0))
| 36
|
"""simple docstring"""
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =[]
SCREAMING_SNAKE_CASE_: List[str] =[]
SCREAMING_SNAKE_CASE_: Any =[]
for rt in rc.restypes:
SCREAMING_SNAKE_CASE_: Optional[int] =rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
SCREAMING_SNAKE_CASE_: Any ={name: i for i, name in enumerate(lowercase )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =torch.tensor(
lowercase , dtype=torch.intaa , device=protein["""aatype"""].device , )
SCREAMING_SNAKE_CASE_: Optional[Any] =torch.tensor(
lowercase , dtype=torch.intaa , device=protein["""aatype"""].device , )
SCREAMING_SNAKE_CASE_: List[str] =torch.tensor(
lowercase , dtype=torch.floataa , device=protein["""aatype"""].device , )
SCREAMING_SNAKE_CASE_: Optional[Any] =protein["""aatype"""].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
SCREAMING_SNAKE_CASE_: List[str] =restype_atomaa_to_atomaa[protein_aatype]
SCREAMING_SNAKE_CASE_: Any =restype_atomaa_mask[protein_aatype]
SCREAMING_SNAKE_CASE_: Tuple =residx_atomaa_mask
SCREAMING_SNAKE_CASE_: Dict =residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
SCREAMING_SNAKE_CASE_: Dict =restype_atomaa_to_atomaa[protein_aatype]
SCREAMING_SNAKE_CASE_: Optional[int] =residx_atomaa_to_atomaa.long()
# create the corresponding mask
SCREAMING_SNAKE_CASE_: Optional[int] =torch.zeros([21, 37] , dtype=torch.floataa , device=protein["""aatype"""].device )
for restype, restype_letter in enumerate(rc.restypes ):
SCREAMING_SNAKE_CASE_: int =rc.restype_atoa[restype_letter]
SCREAMING_SNAKE_CASE_: Any =rc.residue_atoms[restype_name]
for atom_name in atom_names:
SCREAMING_SNAKE_CASE_: Optional[int] =rc.atom_order[atom_name]
SCREAMING_SNAKE_CASE_: Dict =1
SCREAMING_SNAKE_CASE_: List[str] =restype_atomaa_mask[protein_aatype]
SCREAMING_SNAKE_CASE_: List[Any] =residx_atomaa_mask
return protein
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any =tree_map(lambda lowercase : torch.tensor(lowercase , device=batch["""aatype"""].device ) , lowercase , np.ndarray )
SCREAMING_SNAKE_CASE_: int =tensor_tree_map(lambda lowercase : np.array(lowercase ) , make_atomaa_masks(lowercase ) )
return out
| 36
| 1
|
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a :
def __init__( self : Any , lowerCAmelCase : Any , lowerCAmelCase : List[str]=13 , lowerCAmelCase : Dict=3 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Dict=True , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : str=0.1 , lowerCAmelCase : List[str]=224 , lowerCAmelCase : List[str]=1000 , lowerCAmelCase : Optional[Any]=[3, 3, 6, 4] , lowerCAmelCase : int=[48, 56, 112, 220] , ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =parent
SCREAMING_SNAKE_CASE_: Any =batch_size
SCREAMING_SNAKE_CASE_: Tuple =num_channels
SCREAMING_SNAKE_CASE_: Union[str, Any] =is_training
SCREAMING_SNAKE_CASE_: Tuple =use_labels
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Tuple =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: List[Any] =num_labels
SCREAMING_SNAKE_CASE_: int =image_size
SCREAMING_SNAKE_CASE_: Optional[Any] =layer_depths
SCREAMING_SNAKE_CASE_: List[Any] =embed_dims
def lowerCamelCase__ ( self : List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_: List[Any] =None
if self.use_labels:
SCREAMING_SNAKE_CASE_: List[str] =ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE_: Tuple =self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowerCAmelCase , layer_scale_init_value=1E-5 , )
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =SwiftFormerModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Any =model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =self.num_labels
SCREAMING_SNAKE_CASE_: Dict =SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Dict =model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
SCREAMING_SNAKE_CASE_: int =SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Tuple =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_: Dict =model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)): str =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_: Tuple ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : Optional[int] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
UpperCamelCase : Tuple = (
{'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase : Any = False
UpperCamelCase : Optional[int] = False
UpperCamelCase : Optional[Any] = False
UpperCamelCase : Dict = False
UpperCamelCase : List[str] = False
def lowerCamelCase__ ( self : Dict ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =SwiftFormerModelTester(self )
SCREAMING_SNAKE_CASE_: Union[str, Any] =ConfigTester(
self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def lowerCamelCase__ ( self : Tuple ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def lowerCamelCase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Any =model_class(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase , nn.Linear ) )
def lowerCamelCase__ ( self : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: int =model_class(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_: Any =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_: Tuple =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def lowerCamelCase__ ( self : int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def lowerCamelCase__ ( self : Any ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@slow
def lowerCamelCase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_: Optional[Any] =SwiftFormerModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def lowerCamelCase__ ( self : Optional[int] ) -> str:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
def check_hidden_states_output(lowerCAmelCase : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : Optional[int] ):
SCREAMING_SNAKE_CASE_: Optional[Any] =model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_: List[str] =model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: Dict =outputs.hidden_states
SCREAMING_SNAKE_CASE_: List[Any] =8
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowerCAmelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Dict =True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_: Any =True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
def _config_zero_init(lowerCAmelCase : str ):
SCREAMING_SNAKE_CASE_: Dict =copy.deepcopy(lowerCAmelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowerCAmelCase , lowerCAmelCase , 1E-10 )
if isinstance(getattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: Tuple =_config_zero_init(getattr(lowerCAmelCase , lowerCAmelCase ) )
setattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return configs_no_init
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_: List[Any] =_config_zero_init(lowerCAmelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Any =model_class(config=lowerCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
pass
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: List[Any] =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self : str ) -> str:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =self.default_image_processor
SCREAMING_SNAKE_CASE_: int =prepare_img()
SCREAMING_SNAKE_CASE_: Union[str, Any] =image_processor(images=lowerCAmelCase , return_tensors="""pt""" ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Dict =model(**lowerCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE_: Optional[Any] =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =torch.tensor([[-2.1_703E00, 2.1_107E00, -2.0_811E00]] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
| 36
|
"""simple docstring"""
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_UpperCAmelCase = ["""text""", """image""", """audio"""]
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: str =[]
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(lowercase , lowercase ):
inputs.append(create_inputs(lowercase ) )
else:
raise ValueError(f'''Invalid type requested: {input_type}''' )
return inputs
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: int =[]
for output in outputs:
if isinstance(lowercase , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(lowercase , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(lowercase , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(f'''Invalid output: {output}''' )
return output_types
@is_tool_test
class a :
def lowerCamelCase__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , """inputs""" ) )
self.assertTrue(hasattr(self.tool , """outputs""" ) )
SCREAMING_SNAKE_CASE_: Optional[int] =self.tool.inputs
for _input in inputs:
if isinstance(_input , lowerCAmelCase ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
SCREAMING_SNAKE_CASE_: Any =self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowerCamelCase__ ( self : str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_: List[Any] =self.tool(*lowerCAmelCase )
# There is a single output
if len(self.tool.outputs ) == 1:
SCREAMING_SNAKE_CASE_: str =[outputs]
self.assertListEqual(output_types(lowerCAmelCase ) , self.tool.outputs )
def lowerCamelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , """description""" ) )
self.assertTrue(hasattr(self.tool , """default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def lowerCamelCase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_: Tuple =self.tool(*lowerCAmelCase )
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: int =[outputs]
self.assertEqual(len(lowerCAmelCase ) , len(self.tool.outputs ) )
for output, output_type in zip(lowerCAmelCase , self.tool.outputs ):
SCREAMING_SNAKE_CASE_: int =AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(lowerCAmelCase , lowerCAmelCase ) )
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_: Union[str, Any] =[]
for _input, input_type in zip(lowerCAmelCase , self.tool.inputs ):
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
SCREAMING_SNAKE_CASE_: Dict =self.tool(*lowerCAmelCase )
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] =[outputs]
self.assertEqual(len(lowerCAmelCase ) , len(self.tool.outputs ) )
| 36
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
_UpperCAmelCase = logging.get_logger(__name__)
class a ( UpperCAmelCase__ ):
UpperCamelCase : Tuple = ['pixel_values']
def __init__( self : Any , lowerCAmelCase : bool = True , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase : bool = True , lowerCAmelCase : Union[int, float] = 1 / 255 , lowerCAmelCase : bool = True , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : bool = True , **lowerCAmelCase : str , ) -> None:
'''simple docstring'''
super().__init__(**lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =size if size is not None else {"""shortest_edge""": 224}
SCREAMING_SNAKE_CASE_: List[str] =get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =crop_size if crop_size is not None else {"""height""": 256, """width""": 256}
SCREAMING_SNAKE_CASE_: Any =get_size_dict(lowerCAmelCase , param_name="""crop_size""" )
SCREAMING_SNAKE_CASE_: Any =do_resize
SCREAMING_SNAKE_CASE_: List[Any] =size
SCREAMING_SNAKE_CASE_: str =resample
SCREAMING_SNAKE_CASE_: Tuple =do_rescale
SCREAMING_SNAKE_CASE_: Tuple =rescale_factor
SCREAMING_SNAKE_CASE_: Optional[Any] =do_center_crop
SCREAMING_SNAKE_CASE_: Any =crop_size
SCREAMING_SNAKE_CASE_: List[Any] =do_flip_channel_order
def lowerCamelCase__ ( self : int , lowerCAmelCase : np.ndarray , lowerCAmelCase : Dict[str, int] , lowerCAmelCase : PILImageResampling = PIL.Image.BILINEAR , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : str , ) -> np.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}''' )
SCREAMING_SNAKE_CASE_: Any =get_resize_output_image_size(lowerCAmelCase , size=size["""shortest_edge"""] , default_to_square=lowerCAmelCase )
return resize(lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : np.ndarray , lowerCAmelCase : Dict[str, int] , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : Any , ) -> np.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =get_size_dict(lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
return center_crop(lowerCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=lowerCAmelCase , **lowerCAmelCase )
def lowerCamelCase__ ( self : int , lowerCAmelCase : np.ndarray , lowerCAmelCase : Union[int, float] , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : Union[str, Any] , ) -> Optional[Any]:
'''simple docstring'''
return rescale(lowerCAmelCase , scale=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def lowerCamelCase__ ( self : str , lowerCAmelCase : np.ndarray , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None ) -> np.ndarray:
'''simple docstring'''
return flip_channel_order(lowerCAmelCase , data_format=lowerCAmelCase )
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : ImageInput , lowerCAmelCase : bool = None , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : PILImageResampling = None , lowerCAmelCase : bool = None , lowerCAmelCase : float = None , lowerCAmelCase : bool = None , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : bool = None , lowerCAmelCase : Optional[Union[str, TensorType]] = None , lowerCAmelCase : ChannelDimension = ChannelDimension.FIRST , **lowerCAmelCase : Dict , ) -> PIL.Image.Image:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_: List[str] =resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_: str =do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_: Optional[Any] =rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_: Tuple =do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE_: Union[str, Any] =(
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
SCREAMING_SNAKE_CASE_: List[str] =size if size is not None else self.size
SCREAMING_SNAKE_CASE_: Dict =get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE_: Optional[Any] =get_size_dict(lowerCAmelCase , param_name="""crop_size""" )
SCREAMING_SNAKE_CASE_: Tuple =make_list_of_images(lowerCAmelCase )
if not valid_images(lowerCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_: Dict =[to_numpy_array(lowerCAmelCase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_: Any =[self.resize(image=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE_: Union[str, Any] =[self.center_crop(image=lowerCAmelCase , size=lowerCAmelCase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE_: str =[self.rescale(image=lowerCAmelCase , scale=lowerCAmelCase ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
SCREAMING_SNAKE_CASE_: Optional[Any] =[self.flip_channel_order(image=lowerCAmelCase ) for image in images]
SCREAMING_SNAKE_CASE_: int =[to_channel_dimension_format(lowerCAmelCase , lowerCAmelCase ) for image in images]
SCREAMING_SNAKE_CASE_: Optional[Any] ={"""pixel_values""": images}
return BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
def lowerCamelCase__ ( self : int , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Tuple] = None ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] =target_sizes.numpy()
SCREAMING_SNAKE_CASE_: int =[]
for idx in range(len(lowerCAmelCase ) ):
SCREAMING_SNAKE_CASE_: Optional[int] =torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_: Any =logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE_: Optional[int] =[semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 36
|
"""simple docstring"""
from __future__ import annotations
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =sorted(numsa + numsa )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =divmod(len(lowercase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase = [float(x) for x in input("""Enter the elements of first array: """).split()]
_UpperCAmelCase = [float(x) for x in input("""Enter the elements of second array: """).split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 36
| 1
|
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
_UpperCAmelCase = True
except ImportError:
_UpperCAmelCase = False
try:
from torch.hub import _get_torch_home
_UpperCAmelCase = _get_torch_home()
except ImportError:
_UpperCAmelCase = os.path.expanduser(
os.getenv("""TORCH_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """torch"""))
)
_UpperCAmelCase = os.path.join(torch_cache_home, """transformers""")
_UpperCAmelCase = """https://cdn.huggingface.co"""
_UpperCAmelCase = """https://s3.amazonaws.com/models.huggingface.co/bert"""
_UpperCAmelCase = """/""".join(str(Path(__file__).resolve()).split("""/""")[:-1])
_UpperCAmelCase = os.path.join(PATH, """config.yaml""")
_UpperCAmelCase = os.path.join(PATH, """attributes.txt""")
_UpperCAmelCase = os.path.join(PATH, """objects.txt""")
_UpperCAmelCase = os.getenv("""PYTORCH_PRETRAINED_BERT_CACHE""", default_cache_path)
_UpperCAmelCase = os.getenv("""PYTORCH_TRANSFORMERS_CACHE""", PYTORCH_PRETRAINED_BERT_CACHE)
_UpperCAmelCase = os.getenv("""TRANSFORMERS_CACHE""", PYTORCH_TRANSFORMERS_CACHE)
_UpperCAmelCase = """pytorch_model.bin"""
_UpperCAmelCase = """config.yaml"""
def __magic_name__ ( lowercase=OBJECTS , lowercase=ATTRIBUTES ):
SCREAMING_SNAKE_CASE_: Optional[Any] =[]
with open(lowercase ) as f:
for object in f.readlines():
vg_classes.append(object.split(""",""" )[0].lower().strip() )
SCREAMING_SNAKE_CASE_: Optional[int] =[]
with open(lowercase ) as f:
for object in f.readlines():
vg_attrs.append(object.split(""",""" )[0].lower().strip() )
return vg_classes, vg_attrs
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any =OrderedDict()
with open(lowercase , """rb""" ) as f:
SCREAMING_SNAKE_CASE_: Any =pkl.load(lowercase )["""model"""]
for k in copy.deepcopy(list(ckp.keys() ) ):
SCREAMING_SNAKE_CASE_: Optional[int] =ckp.pop(lowercase )
if isinstance(lowercase , np.ndarray ):
SCREAMING_SNAKE_CASE_: List[Any] =torch.tensor(lowercase )
else:
assert isinstance(lowercase , torch.tensor ), type(lowercase )
SCREAMING_SNAKE_CASE_: str =v
return r
class a :
UpperCamelCase : Dict = {}
def __init__( self : Tuple , lowerCAmelCase : dict , lowerCAmelCase : str = "root" , lowerCAmelCase : Tuple=0 ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =name
SCREAMING_SNAKE_CASE_: Optional[Any] =level
SCREAMING_SNAKE_CASE_: Dict ={}
for k, v in dictionary.items():
if v is None:
raise ValueError()
SCREAMING_SNAKE_CASE_: List[Any] =copy.deepcopy(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =copy.deepcopy(lowerCAmelCase )
if isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[int] =Config(lowerCAmelCase , name=lowerCAmelCase , level=level + 1 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =v
setattr(self , lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =d
def __repr__( self : Any ) -> int:
'''simple docstring'''
return str(list((self._pointer.keys()) ) )
def __setattr__( self : Any , lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =val
SCREAMING_SNAKE_CASE_: str =val
SCREAMING_SNAKE_CASE_: Optional[int] =key.split(""".""" )
SCREAMING_SNAKE_CASE_: List[str] =len(lowerCAmelCase ) - 1
SCREAMING_SNAKE_CASE_: Tuple =self._pointer
if len(lowerCAmelCase ) > 1:
for i, l in enumerate(lowerCAmelCase ):
if hasattr(self , lowerCAmelCase ) and isinstance(getattr(self , lowerCAmelCase ) , lowerCAmelCase ):
setattr(getattr(self , lowerCAmelCase ) , """.""".join(levels[i:] ) , lowerCAmelCase )
if l == last_level:
SCREAMING_SNAKE_CASE_: Optional[Any] =val
else:
SCREAMING_SNAKE_CASE_: Tuple =pointer[l]
def lowerCamelCase__ ( self : Tuple ) -> Any:
'''simple docstring'''
return self._pointer
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] ) -> List[Any]:
'''simple docstring'''
with open(f'''{file_name}''' , """w""" ) as stream:
dump(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
with open(f'''{file_name}''' , """w""" ) as stream:
json.dump(lowerCAmelCase , lowerCAmelCase )
@staticmethod
def lowerCamelCase__ ( lowerCAmelCase : Dict ) -> Union[str, Any]:
'''simple docstring'''
with open(lowerCAmelCase ) as stream:
SCREAMING_SNAKE_CASE_: Any =load(lowerCAmelCase , Loader=lowerCAmelCase )
return data
def __str__( self : Dict ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =""" """
if self._name != "root":
SCREAMING_SNAKE_CASE_: Dict =f'''{t * (self._level-1)}{self._name}:\n'''
else:
SCREAMING_SNAKE_CASE_: Dict =""""""
SCREAMING_SNAKE_CASE_: List[Any] =self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(lowerCAmelCase , lowerCAmelCase ):
r += f'''{t * (self._level)}{v}\n'''
self._level += 1
else:
r += f'''{t * (self._level)}{k}: {v} ({type(lowerCAmelCase ).__name__})\n'''
SCREAMING_SNAKE_CASE_: Any =level
return r[:-1]
@classmethod
def lowerCamelCase__ ( cls : List[Any] , lowerCAmelCase : str , **lowerCAmelCase : List[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =cls.get_config_dict(lowerCAmelCase , **lowerCAmelCase )
return cls(lowerCAmelCase )
@classmethod
def lowerCamelCase__ ( cls : Tuple , lowerCAmelCase : str , **lowerCAmelCase : Optional[int] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =kwargs.pop("""cache_dir""" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =kwargs.pop("""force_download""" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =kwargs.pop("""resume_download""" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =kwargs.pop("""proxies""" , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =kwargs.pop("""local_files_only""" , lowerCAmelCase )
if os.path.isdir(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: int =os.path.join(lowerCAmelCase , lowerCAmelCase )
elif os.path.isfile(lowerCAmelCase ) or is_remote_url(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: int =pretrained_model_name_or_path
else:
SCREAMING_SNAKE_CASE_: List[str] =hf_bucket_url(lowerCAmelCase , filename=lowerCAmelCase , use_cdn=lowerCAmelCase )
try:
# Load from URL or cache if already cached
SCREAMING_SNAKE_CASE_: str =cached_path(
lowerCAmelCase , cache_dir=lowerCAmelCase , force_download=lowerCAmelCase , proxies=lowerCAmelCase , resume_download=lowerCAmelCase , local_files_only=lowerCAmelCase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
SCREAMING_SNAKE_CASE_: Tuple =Config.load_yaml(lowerCAmelCase )
except EnvironmentError:
SCREAMING_SNAKE_CASE_: Any ="""Can't load config for"""
raise EnvironmentError(lowerCAmelCase )
if resolved_config_file == config_file:
print("""loading configuration file from path""" )
else:
print("""loading configuration file cache""" )
return Config.load_yaml(lowerCAmelCase ), kwargs
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =torch.load("""dump.pt""" , map_location=in_tensor.device )
SCREAMING_SNAKE_CASE_: Optional[int] =in_tensor.numpy()
SCREAMING_SNAKE_CASE_: List[Any] =out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(lowercase , lowercase , rtol=0.01 , atol=0.1 ), (
f'''{sum([1 for x in np.isclose(lowercase , lowercase , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'''
" element-wise mismatch"
)
raise Exception("""tensors are all good""" )
# Hugging face functions below
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[str] =urlparse(lowercase )
return parsed.scheme in ("http", "https")
def __magic_name__ ( lowercase , lowercase , lowercase=True ):
SCREAMING_SNAKE_CASE_: List[Any] =CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
SCREAMING_SNAKE_CASE_: Optional[Any] ="""/""" not in model_id
if legacy_format:
return f'''{endpoint}/{model_id}-{filename}'''
else:
return f'''{endpoint}/{model_id}/{filename}'''
def __magic_name__ ( lowercase , lowercase , lowercase=None , lowercase=0 , lowercase=None , ):
SCREAMING_SNAKE_CASE_: Tuple ="""python/{}""".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(lowercase , lowercase ):
ua += "; " + "; ".join("""{}/{}""".format(lowercase , lowercase ) for k, v in user_agent.items() )
elif isinstance(lowercase , lowercase ):
ua += "; " + user_agent
SCREAMING_SNAKE_CASE_: Any ={"""user-agent""": ua}
if resume_size > 0:
SCREAMING_SNAKE_CASE_: List[str] ="""bytes=%d-""" % (resume_size,)
SCREAMING_SNAKE_CASE_: int =requests.get(lowercase , stream=lowercase , proxies=lowercase , headers=lowercase )
if response.status_code == 416: # Range not satisfiable
return
SCREAMING_SNAKE_CASE_: Optional[Any] =response.headers.get("""Content-Length""" )
SCREAMING_SNAKE_CASE_: int =resume_size + int(lowercase ) if content_length is not None else None
SCREAMING_SNAKE_CASE_: List[Any] =tqdm(
unit="""B""" , unit_scale=lowercase , total=lowercase , initial=lowercase , desc="""Downloading""" , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(lowercase ) )
temp_file.write(lowercase )
progress.close()
def __magic_name__ ( lowercase , lowercase=None , lowercase=False , lowercase=None , lowercase=10 , lowercase=False , lowercase=None , lowercase=False , ):
if cache_dir is None:
SCREAMING_SNAKE_CASE_: Tuple =TRANSFORMERS_CACHE
if isinstance(lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =str(lowercase )
os.makedirs(lowercase , exist_ok=lowercase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =None
if not local_files_only:
try:
SCREAMING_SNAKE_CASE_: int =requests.head(lowercase , allow_redirects=lowercase , proxies=lowercase , timeout=lowercase )
if response.status_code == 200:
SCREAMING_SNAKE_CASE_: Any =response.headers.get("""ETag""" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
SCREAMING_SNAKE_CASE_: int =url_to_filename(lowercase , lowercase )
# get cache path to put the file
SCREAMING_SNAKE_CASE_: Union[str, Any] =os.path.join(lowercase , lowercase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(lowercase ):
return cache_path
else:
SCREAMING_SNAKE_CASE_: Tuple =[
file
for file in fnmatch.filter(os.listdir(lowercase ) , filename + """.*""" )
if not file.endswith(""".json""" ) and not file.endswith(""".lock""" )
]
if len(lowercase ) > 0:
return os.path.join(lowercase , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"""Cannot find the requested files in the cached path and outgoing traffic has been"""
""" disabled. To enable model look-ups and downloads online, set 'local_files_only'"""
""" to False.""" )
return None
# From now on, etag is not None.
if os.path.exists(lowercase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
SCREAMING_SNAKE_CASE_: Tuple =cache_path + """.lock"""
with FileLock(lowercase ):
# If the download just completed while the lock was activated.
if os.path.exists(lowercase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
SCREAMING_SNAKE_CASE_: Optional[Any] =cache_path + """.incomplete"""
@contextmanager
def _resumable_file_manager():
with open(lowercase , """a+b""" ) as f:
yield f
SCREAMING_SNAKE_CASE_: Tuple =_resumable_file_manager
if os.path.exists(lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =os.stat(lowercase ).st_size
else:
SCREAMING_SNAKE_CASE_: Dict =0
else:
SCREAMING_SNAKE_CASE_: int =partial(tempfile.NamedTemporaryFile , dir=lowercase , delete=lowercase )
SCREAMING_SNAKE_CASE_: Dict =0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"""%s not found in cache or force_download set to True, downloading to %s""" , lowercase , temp_file.name , )
http_get(
lowercase , lowercase , proxies=lowercase , resume_size=lowercase , user_agent=lowercase , )
os.replace(temp_file.name , lowercase )
SCREAMING_SNAKE_CASE_: str ={"""url""": url, """etag""": etag}
SCREAMING_SNAKE_CASE_: Dict =cache_path + """.json"""
with open(lowercase , """w""" ) as meta_file:
json.dump(lowercase , lowercase )
return cache_path
def __magic_name__ ( lowercase , lowercase=None ):
SCREAMING_SNAKE_CASE_: Any =url.encode("""utf-8""" )
SCREAMING_SNAKE_CASE_: List[Any] =shaaaa(lowercase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =url_hash.hexdigest()
if etag:
SCREAMING_SNAKE_CASE_: str =etag.encode("""utf-8""" )
SCREAMING_SNAKE_CASE_: Dict =shaaaa(lowercase )
filename += "." + etag_hash.hexdigest()
if url.endswith(""".h5""" ):
filename += ".h5"
return filename
def __magic_name__ ( lowercase , lowercase=None , lowercase=False , lowercase=None , lowercase=False , lowercase=None , lowercase=False , lowercase=False , lowercase=False , ):
if cache_dir is None:
SCREAMING_SNAKE_CASE_: List[str] =TRANSFORMERS_CACHE
if isinstance(lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =str(lowercase )
if isinstance(lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: str =str(lowercase )
if is_remote_url(lowercase ):
# URL, so get it from the cache (downloading if necessary)
SCREAMING_SNAKE_CASE_: Optional[int] =get_from_cache(
lowercase , cache_dir=lowercase , force_download=lowercase , proxies=lowercase , resume_download=lowercase , user_agent=lowercase , local_files_only=lowercase , )
elif os.path.exists(lowercase ):
# File, and it exists.
SCREAMING_SNAKE_CASE_: Any =url_or_filename
elif urlparse(lowercase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("""file {} not found""".format(lowercase ) )
else:
# Something unknown
raise ValueError("""unable to parse {} as a URL or as a local path""".format(lowercase ) )
if extract_compressed_file:
if not is_zipfile(lowercase ) and not tarfile.is_tarfile(lowercase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str =os.path.split(lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =output_file.replace(""".""" , """-""" ) + """-extracted"""
SCREAMING_SNAKE_CASE_: List[str] =os.path.join(lowercase , lowercase )
if os.path.isdir(lowercase ) and os.listdir(lowercase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
SCREAMING_SNAKE_CASE_: Tuple =output_path + """.lock"""
with FileLock(lowercase ):
shutil.rmtree(lowercase , ignore_errors=lowercase )
os.makedirs(lowercase )
if is_zipfile(lowercase ):
with ZipFile(lowercase , """r""" ) as zip_file:
zip_file.extractall(lowercase )
zip_file.close()
elif tarfile.is_tarfile(lowercase ):
SCREAMING_SNAKE_CASE_: Tuple =tarfile.open(lowercase )
tar_file.extractall(lowercase )
tar_file.close()
else:
raise EnvironmentError("""Archive format of {} could not be identified""".format(lowercase ) )
return output_path_extracted
return output_path
def __magic_name__ ( lowercase , lowercase="," ):
assert isinstance(lowercase , lowercase )
if os.path.isfile(lowercase ):
with open(lowercase ) as f:
SCREAMING_SNAKE_CASE_: Dict =eval(f.read() )
else:
SCREAMING_SNAKE_CASE_: Dict =requests.get(lowercase )
try:
SCREAMING_SNAKE_CASE_: Optional[Any] =requests.json()
except Exception:
SCREAMING_SNAKE_CASE_: str =req.content.decode()
assert data is not None, "could not connect"
try:
SCREAMING_SNAKE_CASE_: Optional[Any] =eval(lowercase )
except Exception:
SCREAMING_SNAKE_CASE_: Optional[int] =data.split("""\n""" )
req.close()
return data
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: str =requests.get(lowercase )
SCREAMING_SNAKE_CASE_: Any =np.array(Image.open(BytesIO(response.content ) ) )
return img
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =url.split("""/""" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(lowercase )
with open(lowercase , """rb""" ) as stream:
SCREAMING_SNAKE_CASE_: Dict =pkl.load(lowercase )
SCREAMING_SNAKE_CASE_: Tuple =weights.pop("""model""" )
SCREAMING_SNAKE_CASE_: Union[str, Any] ={}
for k, v in model.items():
SCREAMING_SNAKE_CASE_: List[str] =torch.from_numpy(lowercase )
if "running_var" in k:
SCREAMING_SNAKE_CASE_: List[Any] =torch.tensor([0] )
SCREAMING_SNAKE_CASE_: Optional[int] =k.replace("""running_var""" , """num_batches_tracked""" )
SCREAMING_SNAKE_CASE_: Any =zero
return new
def __magic_name__ ( ):
print(f'''{os.path.abspath(os.path.join(lowercase , os.pardir ) )}/demo.ipynb''' )
def __magic_name__ ( lowercase , lowercase="RGB" ):
assert isinstance(lowercase , lowercase )
if os.path.isfile(lowercase ):
SCREAMING_SNAKE_CASE_: Tuple =cva.imread(lowercase )
else:
SCREAMING_SNAKE_CASE_: Optional[int] =get_image_from_url(lowercase )
assert img is not None, f'''could not connect to: {im}'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =cva.cvtColor(lowercase , cva.COLOR_BGR2RGB )
if input_format == "RGB":
SCREAMING_SNAKE_CASE_: Dict =img[:, :, ::-1]
return img
def __magic_name__ ( lowercase , lowercase=1 ):
return (images[i : i + batch] for i in range(0 , len(lowercase ) , lowercase ))
| 36
|
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a :
def __init__( self : Any , lowerCAmelCase : Any , lowerCAmelCase : List[str]=13 , lowerCAmelCase : Dict=3 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Dict=True , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : str=0.1 , lowerCAmelCase : List[str]=224 , lowerCAmelCase : List[str]=1000 , lowerCAmelCase : Optional[Any]=[3, 3, 6, 4] , lowerCAmelCase : int=[48, 56, 112, 220] , ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =parent
SCREAMING_SNAKE_CASE_: Any =batch_size
SCREAMING_SNAKE_CASE_: Tuple =num_channels
SCREAMING_SNAKE_CASE_: Union[str, Any] =is_training
SCREAMING_SNAKE_CASE_: Tuple =use_labels
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Tuple =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: List[Any] =num_labels
SCREAMING_SNAKE_CASE_: int =image_size
SCREAMING_SNAKE_CASE_: Optional[Any] =layer_depths
SCREAMING_SNAKE_CASE_: List[Any] =embed_dims
def lowerCamelCase__ ( self : List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_: List[Any] =None
if self.use_labels:
SCREAMING_SNAKE_CASE_: List[str] =ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE_: Tuple =self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowerCAmelCase , layer_scale_init_value=1E-5 , )
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =SwiftFormerModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Any =model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =self.num_labels
SCREAMING_SNAKE_CASE_: Dict =SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Dict =model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
SCREAMING_SNAKE_CASE_: int =SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Tuple =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_: Dict =model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)): str =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_: Tuple ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : Optional[int] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
UpperCamelCase : Tuple = (
{'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase : Any = False
UpperCamelCase : Optional[int] = False
UpperCamelCase : Optional[Any] = False
UpperCamelCase : Dict = False
UpperCamelCase : List[str] = False
def lowerCamelCase__ ( self : Dict ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =SwiftFormerModelTester(self )
SCREAMING_SNAKE_CASE_: Union[str, Any] =ConfigTester(
self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def lowerCamelCase__ ( self : Tuple ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def lowerCamelCase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Any =model_class(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase , nn.Linear ) )
def lowerCamelCase__ ( self : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: int =model_class(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_: Any =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_: Tuple =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def lowerCamelCase__ ( self : int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def lowerCamelCase__ ( self : Any ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@slow
def lowerCamelCase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_: Optional[Any] =SwiftFormerModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def lowerCamelCase__ ( self : Optional[int] ) -> str:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
def check_hidden_states_output(lowerCAmelCase : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : Optional[int] ):
SCREAMING_SNAKE_CASE_: Optional[Any] =model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_: List[str] =model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: Dict =outputs.hidden_states
SCREAMING_SNAKE_CASE_: List[Any] =8
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowerCAmelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Dict =True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_: Any =True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
def _config_zero_init(lowerCAmelCase : str ):
SCREAMING_SNAKE_CASE_: Dict =copy.deepcopy(lowerCAmelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowerCAmelCase , lowerCAmelCase , 1E-10 )
if isinstance(getattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: Tuple =_config_zero_init(getattr(lowerCAmelCase , lowerCAmelCase ) )
setattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return configs_no_init
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_: List[Any] =_config_zero_init(lowerCAmelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Any =model_class(config=lowerCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
pass
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: List[Any] =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self : str ) -> str:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =self.default_image_processor
SCREAMING_SNAKE_CASE_: int =prepare_img()
SCREAMING_SNAKE_CASE_: Union[str, Any] =image_processor(images=lowerCAmelCase , return_tensors="""pt""" ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Dict =model(**lowerCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE_: Optional[Any] =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =torch.tensor([[-2.1_703E00, 2.1_107E00, -2.0_811E00]] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
| 36
| 1
|
"""simple docstring"""
from __future__ import annotations
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =set(lowercase ), [start]
while stack:
SCREAMING_SNAKE_CASE_: int =stack.pop()
explored.add(lowercase )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(lowercase )
return explored
_UpperCAmelCase = {
"""A""": ["""B""", """C""", """D"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F"""],
"""D""": ["""B""", """D"""],
"""E""": ["""B""", """F"""],
"""F""": ["""C""", """E""", """G"""],
"""G""": ["""F"""],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, """A"""))
| 36
|
"""simple docstring"""
from math import pi
def __magic_name__ ( lowercase , lowercase ):
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(9_0, 1_0))
| 36
| 1
|
"""simple docstring"""
from __future__ import annotations
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =2
SCREAMING_SNAKE_CASE_: Optional[int] =[]
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(lowercase )
if n > 1:
factors.append(lowercase )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : Dict ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict =FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE_: Any =jax.device_count()
SCREAMING_SNAKE_CASE_: Dict =num_samples * [prompt]
SCREAMING_SNAKE_CASE_: List[Any] =sd_pipe.prepare_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =replicate(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =shard(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =jax.random.split(lowerCAmelCase , jax.device_count() )
SCREAMING_SNAKE_CASE_: Tuple =sd_pipe(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , num_inference_steps=25 , jit=lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
SCREAMING_SNAKE_CASE_: Dict =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE_: Dict =images[0, 253:256, 253:256, -1]
SCREAMING_SNAKE_CASE_: Optional[int] =jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE_: Optional[int] =jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int ="""stabilityai/stable-diffusion-2"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =FlaxDPMSolverMultistepScheduler.from_pretrained(lowerCAmelCase , subfolder="""scheduler""" )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =FlaxStableDiffusionPipeline.from_pretrained(
lowerCAmelCase , scheduler=lowerCAmelCase , revision="""bf16""" , dtype=jnp.bfloataa , )
SCREAMING_SNAKE_CASE_: Optional[int] =scheduler_params
SCREAMING_SNAKE_CASE_: Tuple ="""A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE_: Union[str, Any] =jax.device_count()
SCREAMING_SNAKE_CASE_: Optional[Any] =num_samples * [prompt]
SCREAMING_SNAKE_CASE_: List[Any] =sd_pipe.prepare_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =replicate(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =shard(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_: Any =jax.random.split(lowerCAmelCase , jax.device_count() )
SCREAMING_SNAKE_CASE_: Tuple =sd_pipe(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , num_inference_steps=25 , jit=lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
SCREAMING_SNAKE_CASE_: str =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE_: Any =images[0, 253:256, 253:256, -1]
SCREAMING_SNAKE_CASE_: Optional[Any] =jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE_: Optional[int] =jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 36
| 1
|
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class a ( UpperCAmelCase__ ):
UpperCamelCase : torch.FloatTensor
UpperCamelCase : torch.FloatTensor
class a ( UpperCAmelCase__ , UpperCAmelCase__ ):
UpperCamelCase : Optional[int] = 1
@register_to_config
def __init__( self : Tuple , lowerCAmelCase : int = 2000 , lowerCAmelCase : float = 0.1_5 , lowerCAmelCase : float = 0.0_1 , lowerCAmelCase : float = 1_3_4_8.0 , lowerCAmelCase : float = 1E-5 , lowerCAmelCase : int = 1 , ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =sigma_max
# setable values
SCREAMING_SNAKE_CASE_: Any =None
self.set_sigmas(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : Any , lowerCAmelCase : torch.FloatTensor , lowerCAmelCase : Optional[int] = None ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : int , lowerCAmelCase : float = None , lowerCAmelCase : Union[str, torch.device] = None ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =sampling_eps if sampling_eps is not None else self.config.sampling_eps
SCREAMING_SNAKE_CASE_: int =torch.linspace(1 , lowerCAmelCase , lowerCAmelCase , device=lowerCAmelCase )
def lowerCamelCase__ ( self : Any , lowerCAmelCase : int , lowerCAmelCase : float = None , lowerCAmelCase : float = None , lowerCAmelCase : float = None ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =sigma_min if sigma_min is not None else self.config.sigma_min
SCREAMING_SNAKE_CASE_: Any =sigma_max if sigma_max is not None else self.config.sigma_max
SCREAMING_SNAKE_CASE_: Any =sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
SCREAMING_SNAKE_CASE_: Any =torch.exp(torch.linspace(math.log(lowerCAmelCase ) , math.log(lowerCAmelCase ) , lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: List[str] =torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : str ) -> Tuple:
'''simple docstring'''
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : torch.FloatTensor , lowerCAmelCase : int , lowerCAmelCase : torch.FloatTensor , lowerCAmelCase : Optional[torch.Generator] = None , lowerCAmelCase : bool = True , ) -> Union[SdeVeOutput, Tuple]:
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
SCREAMING_SNAKE_CASE_: List[Any] =timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
SCREAMING_SNAKE_CASE_: Union[str, Any] =(timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
SCREAMING_SNAKE_CASE_: Optional[int] =timesteps.to(self.discrete_sigmas.device )
SCREAMING_SNAKE_CASE_: Optional[int] =self.discrete_sigmas[timesteps].to(sample.device )
SCREAMING_SNAKE_CASE_: Any =self.get_adjacent_sigma(lowerCAmelCase , lowerCAmelCase ).to(sample.device )
SCREAMING_SNAKE_CASE_: str =torch.zeros_like(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =(sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
SCREAMING_SNAKE_CASE_: List[str] =diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
SCREAMING_SNAKE_CASE_: Optional[Any] =diffusion.unsqueeze(-1 )
SCREAMING_SNAKE_CASE_: Tuple =drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
SCREAMING_SNAKE_CASE_: List[Any] =randn_tensor(
sample.shape , layout=sample.layout , generator=lowerCAmelCase , device=sample.device , dtype=sample.dtype )
SCREAMING_SNAKE_CASE_: str =sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
SCREAMING_SNAKE_CASE_: Dict =prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCAmelCase , prev_sample_mean=lowerCAmelCase )
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : torch.FloatTensor , lowerCAmelCase : torch.FloatTensor , lowerCAmelCase : Optional[torch.Generator] = None , lowerCAmelCase : bool = True , ) -> Union[SchedulerOutput, Tuple]:
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
SCREAMING_SNAKE_CASE_: int =randn_tensor(sample.shape , layout=sample.layout , generator=lowerCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
SCREAMING_SNAKE_CASE_: str =torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
SCREAMING_SNAKE_CASE_: Optional[int] =torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
SCREAMING_SNAKE_CASE_: Optional[int] =(self.config.snr * noise_norm / grad_norm) ** 2 * 2
SCREAMING_SNAKE_CASE_: Optional[int] =step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
SCREAMING_SNAKE_CASE_: List[Any] =step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
SCREAMING_SNAKE_CASE_: Optional[Any] =step_size.unsqueeze(-1 )
SCREAMING_SNAKE_CASE_: List[str] =sample + step_size * model_output
SCREAMING_SNAKE_CASE_: Any =prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase )
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : torch.FloatTensor , lowerCAmelCase : torch.FloatTensor , lowerCAmelCase : torch.FloatTensor , ) -> torch.FloatTensor:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =timesteps.to(original_samples.device )
SCREAMING_SNAKE_CASE_: Tuple =self.discrete_sigmas.to(original_samples.device )[timesteps]
SCREAMING_SNAKE_CASE_: str =(
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCAmelCase ) * sigmas[:, None, None, None]
)
SCREAMING_SNAKE_CASE_: Any =noise + original_samples
return noisy_samples
def __len__( self : List[Any] ) -> Dict:
'''simple docstring'''
return self.config.num_train_timesteps
| 36
|
"""simple docstring"""
def __magic_name__ ( lowercase = 200_0000 ):
SCREAMING_SNAKE_CASE_: List[Any] =[0 for i in range(n + 1 )]
SCREAMING_SNAKE_CASE_: Union[str, Any] =1
SCREAMING_SNAKE_CASE_: Optional[Any] =1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =1
SCREAMING_SNAKE_CASE_: Dict =0
for i in range(lowercase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f"""{solution() = }""")
| 36
| 1
|
"""simple docstring"""
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : str ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE_: List[str] =os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] )
SCREAMING_SNAKE_CASE_: int =os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def lowerCamelCase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =f'''
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
'''.split()
SCREAMING_SNAKE_CASE_: List[Any] =[sys.executable] + distributed_args
execute_subprocess_async(lowerCAmelCase , env=os.environ.copy() )
| 36
|
"""simple docstring"""
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""bert""", choices=["""bert"""])
parser.add_argument("""--model_name""", default="""bert-base-uncased""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_bert-base-uncased_0247911.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
_UpperCAmelCase = parser.parse_args()
if args.model_type == "bert":
_UpperCAmelCase = BertForMaskedLM.from_pretrained(args.model_name)
_UpperCAmelCase = """bert"""
else:
raise ValueError("""args.model_type should be \"bert\".""")
_UpperCAmelCase = model.state_dict()
_UpperCAmelCase = {}
for w in ["word_embeddings", "position_embeddings"]:
_UpperCAmelCase = state_dict[f"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""]
_UpperCAmelCase = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
_UpperCAmelCase = state_dict["""cls.predictions.decoder.weight"""]
_UpperCAmelCase = state_dict["""cls.predictions.bias"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[f"""cls.predictions.transform.dense.{w}"""]
_UpperCAmelCase = state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 36
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class a ( UpperCAmelCase__ ):
UpperCamelCase : torch.FloatTensor
UpperCamelCase : torch.FloatTensor
UpperCamelCase : Optional[torch.FloatTensor] = None
class a ( UpperCAmelCase__ , UpperCAmelCase__ ):
UpperCamelCase : List[Any] = 2
@register_to_config
def __init__( self : List[Any] , lowerCAmelCase : float = 0.0_2 , lowerCAmelCase : float = 100 , lowerCAmelCase : float = 1.0_0_7 , lowerCAmelCase : float = 80 , lowerCAmelCase : float = 0.0_5 , lowerCAmelCase : float = 50 , ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =sigma_max
# setable values
SCREAMING_SNAKE_CASE_: int =None
SCREAMING_SNAKE_CASE_: np.IntTensor =None
SCREAMING_SNAKE_CASE_: torch.FloatTensor =None # sigma(t_i)
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : torch.FloatTensor , lowerCAmelCase : Optional[int] = None ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : int , lowerCAmelCase : Union[str, torch.device] = None ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =num_inference_steps
SCREAMING_SNAKE_CASE_: Dict =np.arange(0 , self.num_inference_steps )[::-1].copy()
SCREAMING_SNAKE_CASE_: str =torch.from_numpy(lowerCAmelCase ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =[
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
SCREAMING_SNAKE_CASE_: Tuple =torch.tensor(lowerCAmelCase , dtype=torch.floataa , device=lowerCAmelCase )
def lowerCamelCase__ ( self : Any , lowerCAmelCase : torch.FloatTensor , lowerCAmelCase : float , lowerCAmelCase : Optional[torch.Generator] = None ) -> Tuple[torch.FloatTensor, float]:
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
SCREAMING_SNAKE_CASE_: str =min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
SCREAMING_SNAKE_CASE_: Union[str, Any] =0
# sample eps ~ N(0, S_noise^2 * I)
SCREAMING_SNAKE_CASE_: List[str] =self.config.s_noise * randn_tensor(sample.shape , generator=lowerCAmelCase ).to(sample.device )
SCREAMING_SNAKE_CASE_: Tuple =sigma + gamma * sigma
SCREAMING_SNAKE_CASE_: List[str] =sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def lowerCamelCase__ ( self : Any , lowerCAmelCase : torch.FloatTensor , lowerCAmelCase : float , lowerCAmelCase : float , lowerCAmelCase : torch.FloatTensor , lowerCAmelCase : bool = True , ) -> Union[KarrasVeOutput, Tuple]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =sample_hat + sigma_hat * model_output
SCREAMING_SNAKE_CASE_: Union[str, Any] =(sample_hat - pred_original_sample) / sigma_hat
SCREAMING_SNAKE_CASE_: Union[str, Any] =sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowerCAmelCase , derivative=lowerCAmelCase , pred_original_sample=lowerCAmelCase )
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : torch.FloatTensor , lowerCAmelCase : float , lowerCAmelCase : float , lowerCAmelCase : torch.FloatTensor , lowerCAmelCase : torch.FloatTensor , lowerCAmelCase : torch.FloatTensor , lowerCAmelCase : bool = True , ) -> Union[KarrasVeOutput, Tuple]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =sample_prev + sigma_prev * model_output
SCREAMING_SNAKE_CASE_: List[str] =(sample_prev - pred_original_sample) / sigma_prev
SCREAMING_SNAKE_CASE_: str =sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowerCAmelCase , derivative=lowerCAmelCase , pred_original_sample=lowerCAmelCase )
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple ) -> Any:
'''simple docstring'''
raise NotImplementedError()
| 36
|
"""simple docstring"""
def __magic_name__ ( lowercase , lowercase ):
return int((input_a, input_a).count(0 ) == 0 )
def __magic_name__ ( ):
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 36
| 1
|
"""simple docstring"""
from __future__ import annotations
def __magic_name__ ( lowercase , lowercase ):
if b == 0:
return (1, 0)
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)): List[str] =extended_euclid(lowercase , a % b )
SCREAMING_SNAKE_CASE_: Any =a // b
return (y, x - k * y)
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase ):
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)): Dict =extended_euclid(lowercase , lowercase )
SCREAMING_SNAKE_CASE_: List[Any] =na * na
SCREAMING_SNAKE_CASE_: int =ra * x * na + ra * y * na
return (n % m + m) % m
def __magic_name__ ( lowercase , lowercase ):
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)): str =extended_euclid(lowercase , lowercase )
if b < 0:
SCREAMING_SNAKE_CASE_: Union[str, Any] =(b % n + n) % n
return b
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str =invert_modulo(lowercase , lowercase ), invert_modulo(lowercase , lowercase )
SCREAMING_SNAKE_CASE_: Any =na * na
SCREAMING_SNAKE_CASE_: Optional[Any] =ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="""chinese_remainder_theorem""", verbose=True)
testmod(name="""chinese_remainder_theorem2""", verbose=True)
testmod(name="""invert_modulo""", verbose=True)
testmod(name="""extended_euclid""", verbose=True)
| 36
|
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger("""transformers.models.speecht5""")
def __magic_name__ ( lowercase , lowercase , lowercase ):
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE_: Tuple =checkpoint["""input_conv.weight_g"""]
SCREAMING_SNAKE_CASE_: Optional[int] =checkpoint["""input_conv.weight_v"""]
SCREAMING_SNAKE_CASE_: Optional[int] =checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''upsamples.{i}.1.weight_g''']
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''upsamples.{i}.1.weight_v''']
SCREAMING_SNAKE_CASE_: List[str] =checkpoint[f'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE_: Dict =checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_g''']
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_v''']
SCREAMING_SNAKE_CASE_: Union[str, Any] =checkpoint[f'''blocks.{i}.convs1.{j}.1.bias''']
SCREAMING_SNAKE_CASE_: Dict =checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_g''']
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_v''']
SCREAMING_SNAKE_CASE_: List[Any] =checkpoint[f'''blocks.{i}.convs2.{j}.1.bias''']
SCREAMING_SNAKE_CASE_: Tuple =checkpoint["""output_conv.1.weight_g"""]
SCREAMING_SNAKE_CASE_: List[str] =checkpoint["""output_conv.1.weight_v"""]
SCREAMING_SNAKE_CASE_: Optional[int] =checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase=None , lowercase=None , ):
if config_path is not None:
SCREAMING_SNAKE_CASE_: List[Any] =SpeechTaHifiGanConfig.from_pretrained(lowercase )
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE_: Union[str, Any] =SpeechTaHifiGan(lowercase )
SCREAMING_SNAKE_CASE_: Any =torch.load(lowercase )
load_weights(orig_checkpoint["""model"""]["""generator"""] , lowercase , lowercase )
SCREAMING_SNAKE_CASE_: List[Any] =np.load(lowercase )
SCREAMING_SNAKE_CASE_: Any =stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE_: str =stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE_: Dict =torch.from_numpy(lowercase ).float()
SCREAMING_SNAKE_CASE_: Dict =torch.from_numpy(lowercase ).float()
model.save_pretrained(lowercase )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
_UpperCAmelCase = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 36
| 1
|
"""simple docstring"""
import sys
_UpperCAmelCase = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: int =1
for digit in s:
product *= int(lowercase )
return product
def __magic_name__ ( lowercase = N ):
SCREAMING_SNAKE_CASE_: Dict =-sys.maxsize - 1
SCREAMING_SNAKE_CASE_: Dict =n[:13]
SCREAMING_SNAKE_CASE_: Dict =13
while cur_index < len(lowercase ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
SCREAMING_SNAKE_CASE_: int =substr[1:] + n[cur_index]
cur_index += 1
else:
SCREAMING_SNAKE_CASE_: Optional[int] =max(lowercase , str_eval(lowercase ) )
SCREAMING_SNAKE_CASE_: Optional[int] =n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(f"""{solution() = }""")
| 36
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def __magic_name__ ( lowercase ):
if "cls_token" in name:
SCREAMING_SNAKE_CASE_: Optional[int] =name.replace("""cls_token""" , """vit.embeddings.cls_token""" )
if "mask_token" in name:
SCREAMING_SNAKE_CASE_: Optional[int] =name.replace("""mask_token""" , """decoder.mask_token""" )
if "decoder_pos_embed" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""pos_embed""" , """vit.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""patch_embed.proj""" , """vit.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""patch_embed.norm""" , """vit.embeddings.norm""" )
if "decoder_blocks" in name:
SCREAMING_SNAKE_CASE_: List[Any] =name.replace("""decoder_blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""blocks""" , """vit.encoder.layer""" )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE_: Optional[Any] =name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
SCREAMING_SNAKE_CASE_: Union[str, Any] =name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
SCREAMING_SNAKE_CASE_: Optional[Any] =name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE_: Dict =name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
SCREAMING_SNAKE_CASE_: Tuple =name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
SCREAMING_SNAKE_CASE_: Any =name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""norm.weight""" , """vit.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""norm.bias""" , """vit.layernorm.bias""" )
return name
def __magic_name__ ( lowercase , lowercase ):
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE_: Optional[int] =orig_state_dict.pop(lowercase )
if "qkv" in key:
SCREAMING_SNAKE_CASE_: Dict =key.split(""".""" )
SCREAMING_SNAKE_CASE_: Optional[Any] =int(key_split[1] )
if "decoder_blocks" in key:
SCREAMING_SNAKE_CASE_: int =config.decoder_hidden_size
SCREAMING_SNAKE_CASE_: Optional[int] ="""decoder.decoder_layers."""
if "weight" in key:
SCREAMING_SNAKE_CASE_: Dict =val[:dim, :]
SCREAMING_SNAKE_CASE_: Tuple =val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_: str =val[-dim:, :]
elif "bias" in key:
SCREAMING_SNAKE_CASE_: List[Any] =val[:dim]
SCREAMING_SNAKE_CASE_: Tuple =val[dim : dim * 2]
SCREAMING_SNAKE_CASE_: List[Any] =val[-dim:]
else:
SCREAMING_SNAKE_CASE_: Any =config.hidden_size
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""vit.encoder.layer."""
if "weight" in key:
SCREAMING_SNAKE_CASE_: Optional[Any] =val[:dim, :]
SCREAMING_SNAKE_CASE_: Optional[Any] =val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_: Dict =val[-dim:, :]
elif "bias" in key:
SCREAMING_SNAKE_CASE_: Optional[Any] =val[:dim]
SCREAMING_SNAKE_CASE_: Any =val[dim : dim * 2]
SCREAMING_SNAKE_CASE_: Optional[Any] =val[-dim:]
else:
SCREAMING_SNAKE_CASE_: Tuple =val
return orig_state_dict
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Dict =ViTMAEConfig()
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE_: List[Any] =1024
SCREAMING_SNAKE_CASE_: Dict =4096
SCREAMING_SNAKE_CASE_: Tuple =24
SCREAMING_SNAKE_CASE_: int =16
elif "huge" in checkpoint_url:
SCREAMING_SNAKE_CASE_: Union[str, Any] =14
SCREAMING_SNAKE_CASE_: Any =1280
SCREAMING_SNAKE_CASE_: Dict =5120
SCREAMING_SNAKE_CASE_: Optional[int] =32
SCREAMING_SNAKE_CASE_: Optional[Any] =16
SCREAMING_SNAKE_CASE_: Tuple =ViTMAEForPreTraining(lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =torch.hub.load_state_dict_from_url(lowercase , map_location="""cpu""" )["""model"""]
SCREAMING_SNAKE_CASE_: Optional[Any] =ViTMAEImageProcessor(size=config.image_size )
SCREAMING_SNAKE_CASE_: str =convert_state_dict(lowercase , lowercase )
model.load_state_dict(lowercase )
model.eval()
SCREAMING_SNAKE_CASE_: Tuple ="""https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"""
SCREAMING_SNAKE_CASE_: List[Any] =Image.open(requests.get(lowercase , stream=lowercase ).raw )
SCREAMING_SNAKE_CASE_: int =ViTMAEImageProcessor(size=config.image_size )
SCREAMING_SNAKE_CASE_: int =image_processor(images=lowercase , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE_: Optional[Any] =model(**lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =outputs.logits
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE_: Dict =torch.tensor(
[[-0.7_309, -0.7_128, -1.0_169], [-1.0_161, -0.9_058, -1.1_878], [-1.0_478, -0.9_411, -1.1_911]] )
elif "huge" in checkpoint_url:
SCREAMING_SNAKE_CASE_: Tuple =torch.tensor(
[[-1.1_599, -0.9_199, -1.2_221], [-1.1_952, -0.9_269, -1.2_307], [-1.2_143, -0.9_337, -1.2_262]] )
else:
SCREAMING_SNAKE_CASE_: Any =torch.tensor(
[[-0.9_192, -0.8_481, -1.1_259], [-1.1_349, -1.0_034, -1.2_599], [-1.1_757, -1.0_429, -1.2_726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , lowercase , atol=1e-4 )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_UpperCAmelCase = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 36
| 1
|
"""simple docstring"""
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
_UpperCAmelCase = None
_UpperCAmelCase = """<""" if sys.byteorder == """little""" else """>"""
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
_UpperCAmelCase = [
np.dtype("""|b1"""),
np.dtype("""|u1"""),
np.dtype("""<u2"""),
np.dtype(""">u2"""),
np.dtype("""<i2"""),
np.dtype(""">i2"""),
np.dtype("""<u4"""),
np.dtype(""">u4"""),
np.dtype("""<i4"""),
np.dtype(""">i4"""),
np.dtype("""<f4"""),
np.dtype(""">f4"""),
np.dtype("""<f8"""),
np.dtype(""">f8"""),
]
@dataclass
class a :
UpperCamelCase : bool = True
UpperCamelCase : Optional[str] = None
# Automatically constructed
UpperCamelCase : ClassVar[str] = "PIL.Image.Image"
UpperCamelCase : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
UpperCamelCase : str = field(default='Image' , init=UpperCAmelCase__ , repr=UpperCAmelCase__ )
def __call__( self : List[str] ) -> Tuple:
'''simple docstring'''
return self.pa_type
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ) -> dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =np.array(lowerCAmelCase )
if isinstance(lowerCAmelCase , lowerCAmelCase ):
return {"path": value, "bytes": None}
elif isinstance(lowerCAmelCase , lowerCAmelCase ):
return {"path": None, "bytes": value}
elif isinstance(lowerCAmelCase , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(lowerCAmelCase )
elif isinstance(lowerCAmelCase , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(lowerCAmelCase )
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : dict , lowerCAmelCase : Tuple=None ) -> "PIL.Image.Image":
'''simple docstring'''
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support decoding images, please install 'Pillow'.""" )
if token_per_repo_id is None:
SCREAMING_SNAKE_CASE_: int ={}
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] =value["""path"""], value["""bytes"""]
if bytes_ is None:
if path is None:
raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: int =PIL.Image.open(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_: int =path.split("""::""" )[-1]
try:
SCREAMING_SNAKE_CASE_: Optional[int] =string_to_dict(lowerCAmelCase , config.HUB_DATASETS_URL )["""repo_id"""]
SCREAMING_SNAKE_CASE_: List[Any] =token_per_repo_id.get(lowerCAmelCase )
except ValueError:
SCREAMING_SNAKE_CASE_: List[Any] =None
with xopen(lowerCAmelCase , """rb""" , use_auth_token=lowerCAmelCase ) as f:
SCREAMING_SNAKE_CASE_: int =BytesIO(f.read() )
SCREAMING_SNAKE_CASE_: Union[str, Any] =PIL.Image.open(bytes_ )
else:
SCREAMING_SNAKE_CASE_: int =PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def lowerCamelCase__ ( self : Tuple ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
)
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : Union[pa.StringArray, pa.StructArray, pa.ListArray] ) -> pa.StructArray:
'''simple docstring'''
if pa.types.is_string(storage.type ):
SCREAMING_SNAKE_CASE_: Dict =pa.array([None] * len(lowerCAmelCase ) , type=pa.binary() )
SCREAMING_SNAKE_CASE_: int =pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
SCREAMING_SNAKE_CASE_: Tuple =pa.array([None] * len(lowerCAmelCase ) , type=pa.string() )
SCREAMING_SNAKE_CASE_: Optional[Any] =pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
SCREAMING_SNAKE_CASE_: Optional[int] =storage.field("""bytes""" )
else:
SCREAMING_SNAKE_CASE_: str =pa.array([None] * len(lowerCAmelCase ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
SCREAMING_SNAKE_CASE_: List[str] =storage.field("""path""" )
else:
SCREAMING_SNAKE_CASE_: Dict =pa.array([None] * len(lowerCAmelCase ) , type=pa.string() )
SCREAMING_SNAKE_CASE_: Optional[int] =pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
SCREAMING_SNAKE_CASE_: Any =pa.array(
[encode_np_array(np.array(lowerCAmelCase ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
SCREAMING_SNAKE_CASE_: int =pa.array([None] * len(lowerCAmelCase ) , type=pa.string() )
SCREAMING_SNAKE_CASE_: Tuple =pa.StructArray.from_arrays(
[bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(lowerCAmelCase , self.pa_type )
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : pa.StructArray ) -> pa.StructArray:
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(lowerCAmelCase : List[str] ):
with xopen(lowerCAmelCase , """rb""" ) as f:
SCREAMING_SNAKE_CASE_: List[Any] =f.read()
return bytes_
SCREAMING_SNAKE_CASE_: int =pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
SCREAMING_SNAKE_CASE_: Dict =pa.array(
[os.path.basename(lowerCAmelCase ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
SCREAMING_SNAKE_CASE_: Optional[int] =pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(lowerCAmelCase , self.pa_type )
def __magic_name__ ( ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
SCREAMING_SNAKE_CASE_: Any =list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =BytesIO()
if image.format in list_image_compression_formats():
SCREAMING_SNAKE_CASE_: Optional[int] =image.format
else:
SCREAMING_SNAKE_CASE_: Any ="""PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF"""
image.save(lowercase , format=lowercase )
return buffer.getvalue()
def __magic_name__ ( lowercase ):
if hasattr(lowercase , """filename""" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(lowercase )}
def __magic_name__ ( lowercase ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
SCREAMING_SNAKE_CASE_: List[Any] =array.dtype
SCREAMING_SNAKE_CASE_: List[Any] =dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER
SCREAMING_SNAKE_CASE_: int =dtype.kind
SCREAMING_SNAKE_CASE_: str =dtype.itemsize
SCREAMING_SNAKE_CASE_: Optional[Any] =None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
SCREAMING_SNAKE_CASE_: List[str] =np.dtype("""|u1""" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
SCREAMING_SNAKE_CASE_: int =dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
SCREAMING_SNAKE_CASE_: Any =dtype_byteorder + dtype_kind + str(lowercase )
SCREAMING_SNAKE_CASE_: List[str] =np.dtype(lowercase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
SCREAMING_SNAKE_CASE_: int =PIL.Image.fromarray(array.astype(lowercase ) )
return {"path": None, "bytes": image_to_bytes(lowercase )}
def __magic_name__ ( lowercase ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if objs:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] =first_non_null_value(lowercase )
if isinstance(lowercase , lowercase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(lowercase , np.ndarray ):
SCREAMING_SNAKE_CASE_: List[str] =no_op_if_value_is_null(lowercase )
return [obj_to_image_dict_func(lowercase ) for obj in objs]
elif isinstance(lowercase , PIL.Image.Image ):
SCREAMING_SNAKE_CASE_: List[Any] =no_op_if_value_is_null(lowercase )
return [obj_to_image_dict_func(lowercase ) for obj in objs]
else:
return objs
else:
return objs
| 36
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_UpperCAmelCase = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 36
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""xlm-mlm-en-2048""": """https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json""",
"""xlm-mlm-ende-1024""": """https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-enfr-1024""": """https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json""",
"""xlm-mlm-enro-1024""": """https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json""",
"""xlm-mlm-tlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json""",
"""xlm-mlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json""",
"""xlm-clm-enfr-1024""": """https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json""",
"""xlm-clm-ende-1024""": """https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-17-1280""": """https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json""",
"""xlm-mlm-100-1280""": """https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json""",
}
class a ( UpperCAmelCase__ ):
UpperCamelCase : int = 'xlm'
UpperCamelCase : Union[str, Any] = {
'hidden_size': 'emb_dim',
'num_attention_heads': 'n_heads',
'num_hidden_layers': 'n_layers',
'n_words': 'vocab_size', # For backward compatibility
}
def __init__( self : List[str] , lowerCAmelCase : Any=3_0145 , lowerCAmelCase : Union[str, Any]=2048 , lowerCAmelCase : Any=12 , lowerCAmelCase : List[Any]=16 , lowerCAmelCase : Tuple=0.1 , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : str=True , lowerCAmelCase : str=False , lowerCAmelCase : Optional[int]=False , lowerCAmelCase : List[Any]=False , lowerCAmelCase : Any=1 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Dict=512 , lowerCAmelCase : Any=2048**-0.5 , lowerCAmelCase : str=1E-12 , lowerCAmelCase : Dict=0.0_2 , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : Dict=1 , lowerCAmelCase : Any=2 , lowerCAmelCase : Optional[int]=3 , lowerCAmelCase : Tuple=5 , lowerCAmelCase : Tuple=True , lowerCAmelCase : str="first" , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : str=True , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Union[str, Any]=5 , lowerCAmelCase : List[str]=5 , lowerCAmelCase : Optional[int]=0 , lowerCAmelCase : int=0 , lowerCAmelCase : List[str]=2 , lowerCAmelCase : Union[str, Any]=0 , **lowerCAmelCase : Optional[Any] , ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =vocab_size
SCREAMING_SNAKE_CASE_: Union[str, Any] =emb_dim
SCREAMING_SNAKE_CASE_: Any =n_layers
SCREAMING_SNAKE_CASE_: Dict =n_heads
SCREAMING_SNAKE_CASE_: Any =dropout
SCREAMING_SNAKE_CASE_: Optional[int] =attention_dropout
SCREAMING_SNAKE_CASE_: Dict =gelu_activation
SCREAMING_SNAKE_CASE_: Optional[Any] =sinusoidal_embeddings
SCREAMING_SNAKE_CASE_: Optional[Any] =causal
SCREAMING_SNAKE_CASE_: Optional[int] =asm
SCREAMING_SNAKE_CASE_: Union[str, Any] =n_langs
SCREAMING_SNAKE_CASE_: Optional[Any] =use_lang_emb
SCREAMING_SNAKE_CASE_: str =layer_norm_eps
SCREAMING_SNAKE_CASE_: Dict =bos_index
SCREAMING_SNAKE_CASE_: Any =eos_index
SCREAMING_SNAKE_CASE_: List[Any] =pad_index
SCREAMING_SNAKE_CASE_: Tuple =unk_index
SCREAMING_SNAKE_CASE_: Any =mask_index
SCREAMING_SNAKE_CASE_: int =is_encoder
SCREAMING_SNAKE_CASE_: List[str] =max_position_embeddings
SCREAMING_SNAKE_CASE_: List[Any] =embed_init_std
SCREAMING_SNAKE_CASE_: Dict =init_std
SCREAMING_SNAKE_CASE_: List[Any] =summary_type
SCREAMING_SNAKE_CASE_: int =summary_use_proj
SCREAMING_SNAKE_CASE_: List[Any] =summary_activation
SCREAMING_SNAKE_CASE_: str =summary_proj_to_labels
SCREAMING_SNAKE_CASE_: int =summary_first_dropout
SCREAMING_SNAKE_CASE_: Any =start_n_top
SCREAMING_SNAKE_CASE_: Dict =end_n_top
SCREAMING_SNAKE_CASE_: Optional[int] =mask_token_id
SCREAMING_SNAKE_CASE_: Any =lang_id
if "n_words" in kwargs:
SCREAMING_SNAKE_CASE_: Tuple =kwargs["""n_words"""]
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , **lowerCAmelCase )
class a ( UpperCAmelCase__ ):
@property
def lowerCamelCase__ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_: Any ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
SCREAMING_SNAKE_CASE_: List[Any] ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 36
|
"""simple docstring"""
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: int =False
while is_sorted is False: # Until all the indices are traversed keep looping
SCREAMING_SNAKE_CASE_: Tuple =True
for i in range(0 , len(lowercase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =input_list[i + 1], input_list[i]
# swapping if elements not in order
SCREAMING_SNAKE_CASE_: Tuple =False
for i in range(1 , len(lowercase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str =input_list[i + 1], input_list[i]
# swapping if elements not in order
SCREAMING_SNAKE_CASE_: str =False
return input_list
if __name__ == "__main__":
print("""Enter list to be sorted""")
_UpperCAmelCase = [int(x) for x in input().split()]
# inputing elements of the list in one line
_UpperCAmelCase = odd_even_sort(input_list)
print("""The sorted list is""")
print(sorted_list)
| 36
| 1
|
"""simple docstring"""
def __magic_name__ ( lowercase , lowercase ):
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(lowercase , int(b / 2 ) ) * actual_power(lowercase , int(b / 2 ) )
else:
return a * actual_power(lowercase , int(b / 2 ) ) * actual_power(lowercase , int(b / 2 ) )
def __magic_name__ ( lowercase , lowercase ):
if b < 0:
return 1 / actual_power(lowercase , lowercase )
return actual_power(lowercase , lowercase )
if __name__ == "__main__":
print(power(-2, -3))
| 36
|
"""simple docstring"""
def __magic_name__ ( lowercase ):
return str(lowercase ) == str(lowercase )[::-1]
def __magic_name__ ( lowercase ):
return int(lowercase ) + int(str(lowercase )[::-1] )
def __magic_name__ ( lowercase = 1_0000 ):
SCREAMING_SNAKE_CASE_: List[str] =[]
for num in range(1 , lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =0
SCREAMING_SNAKE_CASE_: int =num
while iterations < 50:
SCREAMING_SNAKE_CASE_: Optional[Any] =sum_reverse(lowercase )
iterations += 1
if is_palindrome(lowercase ):
break
else:
lychrel_nums.append(lowercase )
return len(lowercase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 36
| 1
|
"""simple docstring"""
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def __magic_name__ ( lowercase , lowercase = True , lowercase = math.inf , lowercase = -math.inf , lowercase = math.inf , lowercase = -math.inf , lowercase = False , lowercase = 100 , lowercase = 0.01 , lowercase = 1 , ):
SCREAMING_SNAKE_CASE_: int =False
SCREAMING_SNAKE_CASE_: Optional[Any] =search_prob
SCREAMING_SNAKE_CASE_: Optional[Any] =start_temperate
SCREAMING_SNAKE_CASE_: List[str] =[]
SCREAMING_SNAKE_CASE_: str =0
SCREAMING_SNAKE_CASE_: Optional[int] =None
while not search_end:
SCREAMING_SNAKE_CASE_: int =current_state.score()
if best_state is None or current_score > best_state.score():
SCREAMING_SNAKE_CASE_: Optional[int] =current_state
scores.append(lowercase )
iterations += 1
SCREAMING_SNAKE_CASE_: int =None
SCREAMING_SNAKE_CASE_: int =current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
SCREAMING_SNAKE_CASE_: Dict =random.randint(0 , len(lowercase ) - 1 ) # picking a random neighbor
SCREAMING_SNAKE_CASE_: int =neighbors.pop(lowercase )
SCREAMING_SNAKE_CASE_: Tuple =picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
SCREAMING_SNAKE_CASE_: Tuple =change * -1 # in case we are finding minimum
if change > 0: # improves the solution
SCREAMING_SNAKE_CASE_: Optional[int] =picked_neighbor
else:
SCREAMING_SNAKE_CASE_: str =(math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
SCREAMING_SNAKE_CASE_: Optional[int] =picked_neighbor
SCREAMING_SNAKE_CASE_: Union[str, Any] =current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
SCREAMING_SNAKE_CASE_: str =True
else:
SCREAMING_SNAKE_CASE_: Union[str, Any] =next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(lowercase ) , lowercase )
plt.xlabel("""Iterations""" )
plt.ylabel("""Function values""" )
plt.show()
return best_state
if __name__ == "__main__":
def __magic_name__ ( lowercase , lowercase ):
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
_UpperCAmelCase = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
_UpperCAmelCase = simulated_annealing(
prob, find_max=False, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
f"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
_UpperCAmelCase = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
_UpperCAmelCase = simulated_annealing(
prob, find_max=True, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
f"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def __magic_name__ ( lowercase , lowercase ):
return (3 * x**2) - (6 * y)
_UpperCAmelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_UpperCAmelCase = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
f"""{local_min.score()}"""
)
_UpperCAmelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_UpperCAmelCase = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
f"""{local_min.score()}"""
)
| 36
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
_UpperCAmelCase = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""DPTFeatureExtractor"""]
_UpperCAmelCase = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 36
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase = {
"""configuration_instructblip""": [
"""INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InstructBlipConfig""",
"""InstructBlipQFormerConfig""",
"""InstructBlipVisionConfig""",
],
"""processing_instructblip""": ["""InstructBlipProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InstructBlipQFormerModel""",
"""InstructBlipPreTrainedModel""",
"""InstructBlipForConditionalGeneration""",
"""InstructBlipVisionModel""",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 36
|
"""simple docstring"""
from __future__ import annotations
import math
import random
from typing import Any
class a :
def __init__( self : str ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: list[Any] =[]
SCREAMING_SNAKE_CASE_: int =0
SCREAMING_SNAKE_CASE_: int =0
def lowerCamelCase__ ( self : Optional[Any] ) -> bool:
'''simple docstring'''
return self.head == self.tail
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
self.data.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =self.tail + 1
def lowerCamelCase__ ( self : int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self.data[self.head]
SCREAMING_SNAKE_CASE_: Optional[int] =self.head + 1
return ret
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
return self.tail - self.head
def lowerCamelCase__ ( self : str ) -> None:
'''simple docstring'''
print(self.data )
print("""**************""" )
print(self.data[self.head : self.tail] )
class a :
def __init__( self : Union[str, Any] , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =data
SCREAMING_SNAKE_CASE_: MyNode | None =None
SCREAMING_SNAKE_CASE_: MyNode | None =None
SCREAMING_SNAKE_CASE_: int =1
def lowerCamelCase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
return self.data
def lowerCamelCase__ ( self : List[Any] ) -> MyNode | None:
'''simple docstring'''
return self.left
def lowerCamelCase__ ( self : Dict ) -> MyNode | None:
'''simple docstring'''
return self.right
def lowerCamelCase__ ( self : Any ) -> int:
'''simple docstring'''
return self.height
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =data
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : MyNode | None ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =node
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : MyNode | None ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =node
def lowerCamelCase__ ( self : int , lowerCAmelCase : int ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =height
def __magic_name__ ( lowercase ):
if node is None:
return 0
return node.get_height()
def __magic_name__ ( lowercase , lowercase ):
if a > b:
return a
return b
def __magic_name__ ( lowercase ):
print("""left rotation node:""" , node.get_data() )
SCREAMING_SNAKE_CASE_: List[Any] =node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(lowercase )
SCREAMING_SNAKE_CASE_: Tuple =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowercase )
SCREAMING_SNAKE_CASE_: int =my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowercase )
return ret
def __magic_name__ ( lowercase ):
print("""right rotation node:""" , node.get_data() )
SCREAMING_SNAKE_CASE_: List[Any] =node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(lowercase )
SCREAMING_SNAKE_CASE_: List[Any] =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowercase )
return ret
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any =node.get_left()
assert left_child is not None
node.set_left(left_rotation(lowercase ) )
return right_rotation(lowercase )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Dict =node.get_right()
assert right_child is not None
node.set_right(right_rotation(lowercase ) )
return left_rotation(lowercase )
def __magic_name__ ( lowercase , lowercase ):
if node is None:
return MyNode(lowercase )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , lowercase ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
SCREAMING_SNAKE_CASE_: Union[str, Any] =node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
SCREAMING_SNAKE_CASE_: Any =right_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: List[Any] =lr_rotation(lowercase )
else:
node.set_right(insert_node(node.get_right() , lowercase ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
SCREAMING_SNAKE_CASE_: Tuple =node.get_right()
assert right_child is not None
if data < right_child.get_data():
SCREAMING_SNAKE_CASE_: Optional[Any] =rl_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: Optional[int] =left_rotation(lowercase )
SCREAMING_SNAKE_CASE_: Tuple =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowercase )
return node
def __magic_name__ ( lowercase ):
while True:
SCREAMING_SNAKE_CASE_: Dict =root.get_right()
if right_child is None:
break
SCREAMING_SNAKE_CASE_: str =right_child
return root.get_data()
def __magic_name__ ( lowercase ):
while True:
SCREAMING_SNAKE_CASE_: str =root.get_left()
if left_child is None:
break
SCREAMING_SNAKE_CASE_: Dict =left_child
return root.get_data()
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: str =root.get_left()
SCREAMING_SNAKE_CASE_: List[Any] =root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
SCREAMING_SNAKE_CASE_: Union[str, Any] =get_left_most(lowercase )
root.set_data(lowercase )
root.set_right(del_node(lowercase , lowercase ) )
elif left_child is not None:
SCREAMING_SNAKE_CASE_: Optional[int] =left_child
elif right_child is not None:
SCREAMING_SNAKE_CASE_: Any =right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("""No such data""" )
return root
else:
root.set_left(del_node(lowercase , lowercase ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(lowercase , lowercase ) )
if get_height(lowercase ) - get_height(lowercase ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
SCREAMING_SNAKE_CASE_: Tuple =left_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =rl_rotation(lowercase )
elif get_height(lowercase ) - get_height(lowercase ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
SCREAMING_SNAKE_CASE_: Optional[Any] =right_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: str =lr_rotation(lowercase )
SCREAMING_SNAKE_CASE_: str =my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(lowercase )
return root
class a :
def __init__( self : int ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: MyNode | None =None
def lowerCamelCase__ ( self : List[Any] ) -> int:
'''simple docstring'''
return get_height(self.root )
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
print("""insert:""" + str(lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: Tuple =insert_node(self.root , lowerCAmelCase )
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
print("""delete:""" + str(lowerCAmelCase ) )
if self.root is None:
print("""Tree is empty!""" )
return
SCREAMING_SNAKE_CASE_: Union[str, Any] =del_node(self.root , lowerCAmelCase )
def __str__( self : List[str] , ) -> str: # a level traversale, gives a more intuitive look on the tree
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =""""""
SCREAMING_SNAKE_CASE_: str =MyQueue()
q.push(self.root )
SCREAMING_SNAKE_CASE_: List[str] =self.get_height()
if layer == 0:
return output
SCREAMING_SNAKE_CASE_: int =0
while not q.is_empty():
SCREAMING_SNAKE_CASE_: int =q.pop()
SCREAMING_SNAKE_CASE_: List[Any] =""" """ * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(lowerCAmelCase )
q.push(lowerCAmelCase )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
SCREAMING_SNAKE_CASE_: List[Any] =cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , lowerCAmelCase ) - 1:
SCREAMING_SNAKE_CASE_: int =layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def __magic_name__ ( ):
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
_UpperCAmelCase = AVLtree()
_UpperCAmelCase = list(range(1_0))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 36
| 1
|
"""simple docstring"""
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
_UpperCAmelCase = re.compile(r"""\b(a|an|the)\b""", re.UNICODE)
_UpperCAmelCase = None
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Dict =argparse.ArgumentParser("""Official evaluation script for SQuAD version 2.0.""" )
parser.add_argument("""data_file""" , metavar="""data.json""" , help="""Input data JSON file.""" )
parser.add_argument("""pred_file""" , metavar="""pred.json""" , help="""Model predictions.""" )
parser.add_argument(
"""--out-file""" , """-o""" , metavar="""eval.json""" , help="""Write accuracy metrics to file (default is stdout).""" )
parser.add_argument(
"""--na-prob-file""" , """-n""" , metavar="""na_prob.json""" , help="""Model estimates of probability of no answer.""" )
parser.add_argument(
"""--na-prob-thresh""" , """-t""" , type=lowercase , default=1.0 , help="""Predict \"\" if no-answer probability exceeds this (default = 1.0).""" , )
parser.add_argument(
"""--out-image-dir""" , """-p""" , metavar="""out_images""" , default=lowercase , help="""Save precision-recall curves to directory.""" )
parser.add_argument("""--verbose""" , """-v""" , action="""store_true""" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: str ={}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
SCREAMING_SNAKE_CASE_: Dict =bool(qa["""answers"""]["""text"""] )
return qid_to_has_ans
def __magic_name__ ( lowercase ):
def remove_articles(lowercase ):
return ARTICLES_REGEX.sub(""" """ , lowercase )
def white_space_fix(lowercase ):
return " ".join(text.split() )
def remove_punc(lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowercase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowercase ) ) ) )
def __magic_name__ ( lowercase ):
if not s:
return []
return normalize_answer(lowercase ).split()
def __magic_name__ ( lowercase , lowercase ):
return int(normalize_answer(lowercase ) == normalize_answer(lowercase ) )
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =get_tokens(lowercase )
SCREAMING_SNAKE_CASE_: str =get_tokens(lowercase )
SCREAMING_SNAKE_CASE_: Tuple =collections.Counter(lowercase ) & collections.Counter(lowercase )
SCREAMING_SNAKE_CASE_: Optional[Any] =sum(common.values() )
if len(lowercase ) == 0 or len(lowercase ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
SCREAMING_SNAKE_CASE_: Optional[int] =1.0 * num_same / len(lowercase )
SCREAMING_SNAKE_CASE_: Any =1.0 * num_same / len(lowercase )
SCREAMING_SNAKE_CASE_: str =(2 * precision * recall) / (precision + recall)
return fa
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Dict ={}
SCREAMING_SNAKE_CASE_: List[Any] ={}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
SCREAMING_SNAKE_CASE_: Union[str, Any] =qa["""id"""]
SCREAMING_SNAKE_CASE_: Tuple =[t for t in qa["""answers"""]["""text"""] if normalize_answer(lowercase )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
SCREAMING_SNAKE_CASE_: Optional[int] =[""""""]
if qid not in preds:
print(f'''Missing prediction for {qid}''' )
continue
SCREAMING_SNAKE_CASE_: Dict =preds[qid]
# Take max over all gold answers
SCREAMING_SNAKE_CASE_: Tuple =max(compute_exact(lowercase , lowercase ) for a in gold_answers )
SCREAMING_SNAKE_CASE_: Tuple =max(compute_fa(lowercase , lowercase ) for a in gold_answers )
return exact_scores, fa_scores
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] ={}
for qid, s in scores.items():
SCREAMING_SNAKE_CASE_: str =na_probs[qid] > na_prob_thresh
if pred_na:
SCREAMING_SNAKE_CASE_: Tuple =float(not qid_to_has_ans[qid] )
else:
SCREAMING_SNAKE_CASE_: Optional[int] =s
return new_scores
def __magic_name__ ( lowercase , lowercase , lowercase=None ):
if not qid_list:
SCREAMING_SNAKE_CASE_: Dict =len(lowercase )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores.values() ) / total),
("""f1""", 100.0 * sum(fa_scores.values() ) / total),
("""total""", total),
] )
else:
SCREAMING_SNAKE_CASE_: Dict =len(lowercase )
return collections.OrderedDict(
[
("""exact""", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("""f1""", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("""total""", total),
] )
def __magic_name__ ( lowercase , lowercase , lowercase ):
for k in new_eval:
SCREAMING_SNAKE_CASE_: Tuple =new_eval[k]
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase ):
plt.step(lowercase , lowercase , color="""b""" , alpha=0.2 , where="""post""" )
plt.fill_between(lowercase , lowercase , step="""post""" , alpha=0.2 , color="""b""" )
plt.xlabel("""Recall""" )
plt.ylabel("""Precision""" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(lowercase )
plt.savefig(lowercase )
plt.clf()
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase , lowercase=None , lowercase=None ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =sorted(lowercase , key=lambda lowercase : na_probs[k] )
SCREAMING_SNAKE_CASE_: List[Any] =0.0
SCREAMING_SNAKE_CASE_: Dict =1.0
SCREAMING_SNAKE_CASE_: Tuple =0.0
SCREAMING_SNAKE_CASE_: Any =[1.0]
SCREAMING_SNAKE_CASE_: str =[0.0]
SCREAMING_SNAKE_CASE_: Optional[int] =0.0
for i, qid in enumerate(lowercase ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
SCREAMING_SNAKE_CASE_: List[Any] =true_pos / float(i + 1 )
SCREAMING_SNAKE_CASE_: List[Any] =true_pos / float(lowercase )
if i == len(lowercase ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(lowercase )
recalls.append(lowercase )
if out_image:
plot_pr_curve(lowercase , lowercase , lowercase , lowercase )
return {"ap": 100.0 * avg_prec}
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
if out_image_dir and not os.path.exists(lowercase ):
os.makedirs(lowercase )
SCREAMING_SNAKE_CASE_: Dict =sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
SCREAMING_SNAKE_CASE_: Optional[int] =make_precision_recall_eval(
lowercase , lowercase , lowercase , lowercase , out_image=os.path.join(lowercase , """pr_exact.png""" ) , title="""Precision-Recall curve for Exact Match score""" , )
SCREAMING_SNAKE_CASE_: Dict =make_precision_recall_eval(
lowercase , lowercase , lowercase , lowercase , out_image=os.path.join(lowercase , """pr_f1.png""" ) , title="""Precision-Recall curve for F1 score""" , )
SCREAMING_SNAKE_CASE_: Any ={k: float(lowercase ) for k, v in qid_to_has_ans.items()}
SCREAMING_SNAKE_CASE_: str =make_precision_recall_eval(
lowercase , lowercase , lowercase , lowercase , out_image=os.path.join(lowercase , """pr_oracle.png""" ) , title="""Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)""" , )
merge_eval(lowercase , lowercase , """pr_exact""" )
merge_eval(lowercase , lowercase , """pr_f1""" )
merge_eval(lowercase , lowercase , """pr_oracle""" )
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase ):
if not qid_list:
return
SCREAMING_SNAKE_CASE_: Union[str, Any] =[na_probs[k] for k in qid_list]
SCREAMING_SNAKE_CASE_: Any =np.ones_like(lowercase ) / float(len(lowercase ) )
plt.hist(lowercase , weights=lowercase , bins=20 , range=(0.0, 1.0) )
plt.xlabel("""Model probability of no-answer""" )
plt.ylabel("""Proportion of dataset""" )
plt.title(f'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(lowercase , f'''na_prob_hist_{name}.png''' ) )
plt.clf()
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: int =sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
SCREAMING_SNAKE_CASE_: str =num_no_ans
SCREAMING_SNAKE_CASE_: Dict =cur_score
SCREAMING_SNAKE_CASE_: Optional[Any] =0.0
SCREAMING_SNAKE_CASE_: Dict =sorted(lowercase , key=lambda lowercase : na_probs[k] )
for i, qid in enumerate(lowercase ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
SCREAMING_SNAKE_CASE_: Dict =scores[qid]
else:
if preds[qid]:
SCREAMING_SNAKE_CASE_: Union[str, Any] =-1
else:
SCREAMING_SNAKE_CASE_: Tuple =0
cur_score += diff
if cur_score > best_score:
SCREAMING_SNAKE_CASE_: int =cur_score
SCREAMING_SNAKE_CASE_: int =na_probs[qid]
return 100.0 * best_score / len(lowercase ), best_thresh
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] =find_best_thresh(lowercase , lowercase , lowercase , lowercase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] =find_best_thresh(lowercase , lowercase , lowercase , lowercase )
SCREAMING_SNAKE_CASE_: int =best_exact
SCREAMING_SNAKE_CASE_: Any =exact_thresh
SCREAMING_SNAKE_CASE_: str =best_fa
SCREAMING_SNAKE_CASE_: Any =fa_thresh
def __magic_name__ ( ):
with open(OPTS.data_file ) as f:
SCREAMING_SNAKE_CASE_: Any =json.load(lowercase )
SCREAMING_SNAKE_CASE_: Optional[Any] =dataset_json["""data"""]
with open(OPTS.pred_file ) as f:
SCREAMING_SNAKE_CASE_: List[Any] =json.load(lowercase )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
SCREAMING_SNAKE_CASE_: Union[str, Any] =json.load(lowercase )
else:
SCREAMING_SNAKE_CASE_: int ={k: 0.0 for k in preds}
SCREAMING_SNAKE_CASE_: List[str] =make_qid_to_has_ans(lowercase ) # maps qid to True/False
SCREAMING_SNAKE_CASE_: str =[k for k, v in qid_to_has_ans.items() if v]
SCREAMING_SNAKE_CASE_: Optional[Any] =[k for k, v in qid_to_has_ans.items() if not v]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] =get_raw_scores(lowercase , lowercase )
SCREAMING_SNAKE_CASE_: Any =apply_no_ans_threshold(lowercase , lowercase , lowercase , OPTS.na_prob_thresh )
SCREAMING_SNAKE_CASE_: Optional[int] =apply_no_ans_threshold(lowercase , lowercase , lowercase , OPTS.na_prob_thresh )
SCREAMING_SNAKE_CASE_: Optional[Any] =make_eval_dict(lowercase , lowercase )
if has_ans_qids:
SCREAMING_SNAKE_CASE_: int =make_eval_dict(lowercase , lowercase , qid_list=lowercase )
merge_eval(lowercase , lowercase , """HasAns""" )
if no_ans_qids:
SCREAMING_SNAKE_CASE_: Union[str, Any] =make_eval_dict(lowercase , lowercase , qid_list=lowercase )
merge_eval(lowercase , lowercase , """NoAns""" )
if OPTS.na_prob_file:
find_all_best_thresh(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(lowercase , lowercase , lowercase , lowercase , lowercase , OPTS.out_image_dir )
histogram_na_prob(lowercase , lowercase , OPTS.out_image_dir , """hasAns""" )
histogram_na_prob(lowercase , lowercase , OPTS.out_image_dir , """noAns""" )
if OPTS.out_file:
with open(OPTS.out_file , """w""" ) as f:
json.dump(lowercase , lowercase )
else:
print(json.dumps(lowercase , indent=2 ) )
if __name__ == "__main__":
_UpperCAmelCase = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("""Agg""")
import matplotlib.pyplot as plt
main()
| 36
|
"""simple docstring"""
import string
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =""""""
for i in sequence:
SCREAMING_SNAKE_CASE_: List[Any] =ord(lowercase )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any =string.ascii_letters
SCREAMING_SNAKE_CASE_: Tuple =string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(lowercase )] if c in letters else c for c in sequence )
def __magic_name__ ( ):
from timeit import timeit
print("""Running performance benchmarks...""" )
SCREAMING_SNAKE_CASE_: int ="""from string import printable ; from __main__ import atbash, atbash_slow"""
print(f'''> atbash_slow(): {timeit("atbash_slow(printable)" , setup=lowercase )} seconds''' )
print(f'''> atbash(): {timeit("atbash(printable)" , setup=lowercase )} seconds''' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(f"""{example} encrypted in atbash: {atbash(example)}""")
benchmark()
| 36
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class a ( UpperCAmelCase__ ):
UpperCamelCase : torch.FloatTensor
class a ( UpperCAmelCase__ , UpperCAmelCase__ ):
@register_to_config
def __init__( self : int , lowerCAmelCase : int = 6_5536 , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : int = 2 , lowerCAmelCase : int = 2 , lowerCAmelCase : int = 0 , lowerCAmelCase : str = "fourier" , lowerCAmelCase : bool = True , lowerCAmelCase : bool = False , lowerCAmelCase : float = 0.0 , lowerCAmelCase : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , lowerCAmelCase : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , lowerCAmelCase : Tuple[str] = "UNetMidBlock1D" , lowerCAmelCase : str = None , lowerCAmelCase : Tuple[int] = (32, 32, 64) , lowerCAmelCase : str = None , lowerCAmelCase : int = 8 , lowerCAmelCase : int = 1 , lowerCAmelCase : bool = False , ) -> List[str]:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_: Union[str, Any] =sample_size
# time
if time_embedding_type == "fourier":
SCREAMING_SNAKE_CASE_: Tuple =GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=lowerCAmelCase , log=lowerCAmelCase , flip_sin_to_cos=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =2 * block_out_channels[0]
elif time_embedding_type == "positional":
SCREAMING_SNAKE_CASE_: int =Timesteps(
block_out_channels[0] , flip_sin_to_cos=lowerCAmelCase , downscale_freq_shift=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =block_out_channels[0]
if use_timestep_embedding:
SCREAMING_SNAKE_CASE_: Union[str, Any] =block_out_channels[0] * 4
SCREAMING_SNAKE_CASE_: Union[str, Any] =TimestepEmbedding(
in_channels=lowerCAmelCase , time_embed_dim=lowerCAmelCase , act_fn=lowerCAmelCase , out_dim=block_out_channels[0] , )
SCREAMING_SNAKE_CASE_: Any =nn.ModuleList([] )
SCREAMING_SNAKE_CASE_: Dict =None
SCREAMING_SNAKE_CASE_: Tuple =nn.ModuleList([] )
SCREAMING_SNAKE_CASE_: Optional[Any] =None
# down
SCREAMING_SNAKE_CASE_: str =in_channels
for i, down_block_type in enumerate(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[int] =output_channel
SCREAMING_SNAKE_CASE_: List[str] =block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
SCREAMING_SNAKE_CASE_: Optional[Any] =i == len(lowerCAmelCase ) - 1
SCREAMING_SNAKE_CASE_: Tuple =get_down_block(
lowerCAmelCase , num_layers=lowerCAmelCase , in_channels=lowerCAmelCase , out_channels=lowerCAmelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(lowerCAmelCase )
# mid
SCREAMING_SNAKE_CASE_: Optional[Any] =get_mid_block(
lowerCAmelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=lowerCAmelCase , add_downsample=lowerCAmelCase , )
# up
SCREAMING_SNAKE_CASE_: List[str] =list(reversed(lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: Tuple =reversed_block_out_channels[0]
if out_block_type is None:
SCREAMING_SNAKE_CASE_: Tuple =out_channels
else:
SCREAMING_SNAKE_CASE_: Tuple =block_out_channels[0]
for i, up_block_type in enumerate(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: Dict =output_channel
SCREAMING_SNAKE_CASE_: Optional[Any] =(
reversed_block_out_channels[i + 1] if i < len(lowerCAmelCase ) - 1 else final_upsample_channels
)
SCREAMING_SNAKE_CASE_: Dict =i == len(lowerCAmelCase ) - 1
SCREAMING_SNAKE_CASE_: Union[str, Any] =get_up_block(
lowerCAmelCase , num_layers=lowerCAmelCase , in_channels=lowerCAmelCase , out_channels=lowerCAmelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =output_channel
# out
SCREAMING_SNAKE_CASE_: Union[str, Any] =norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
SCREAMING_SNAKE_CASE_: Optional[int] =get_out_block(
out_block_type=lowerCAmelCase , num_groups_out=lowerCAmelCase , embed_dim=block_out_channels[0] , out_channels=lowerCAmelCase , act_fn=lowerCAmelCase , fc_dim=block_out_channels[-1] // 4 , )
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : torch.FloatTensor , lowerCAmelCase : Union[torch.Tensor, float, int] , lowerCAmelCase : bool = True , ) -> Union[UNetaDOutput, Tuple]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =timestep
if not torch.is_tensor(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] =torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(lowerCAmelCase ) and len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE_: Union[str, Any] =timesteps[None].to(sample.device )
SCREAMING_SNAKE_CASE_: int =self.time_proj(lowerCAmelCase )
if self.config.use_timestep_embedding:
SCREAMING_SNAKE_CASE_: Dict =self.time_mlp(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_: Optional[int] =timestep_embed[..., None]
SCREAMING_SNAKE_CASE_: List[str] =timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
SCREAMING_SNAKE_CASE_: Dict =timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
SCREAMING_SNAKE_CASE_: str =()
for downsample_block in self.down_blocks:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =downsample_block(hidden_states=lowerCAmelCase , temb=lowerCAmelCase )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
SCREAMING_SNAKE_CASE_: List[Any] =self.mid_block(lowerCAmelCase , lowerCAmelCase )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
SCREAMING_SNAKE_CASE_: Dict =down_block_res_samples[-1:]
SCREAMING_SNAKE_CASE_: int =down_block_res_samples[:-1]
SCREAMING_SNAKE_CASE_: Union[str, Any] =upsample_block(lowerCAmelCase , res_hidden_states_tuple=lowerCAmelCase , temb=lowerCAmelCase )
# 5. post-process
if self.out_block:
SCREAMING_SNAKE_CASE_: Any =self.out_block(lowerCAmelCase , lowerCAmelCase )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=lowerCAmelCase )
| 36
|
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class a :
def __init__( self : Union[str, Any] , lowerCAmelCase : List[str]=2 , lowerCAmelCase : int=3 , lowerCAmelCase : Optional[Any]=64 , lowerCAmelCase : Union[str, Any]=None ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =np.random.default_rng(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =length
SCREAMING_SNAKE_CASE_: Union[str, Any] =rng.normal(size=(length,) ).astype(np.floataa )
SCREAMING_SNAKE_CASE_: Tuple =a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : List[Any] ) -> str:
'''simple docstring'''
return self.length
def __getitem__( self : Union[str, Any] , lowerCAmelCase : Any ) -> List[str]:
'''simple docstring'''
return {"x": self.x[i], "y": self.y[i]}
class a ( torch.nn.Module ):
def __init__( self : Optional[int] , lowerCAmelCase : str=0 , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : Optional[int]=False ) -> Tuple:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_: int =torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE_: int =torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE_: Dict =True
def lowerCamelCase__ ( self : str , lowerCAmelCase : Tuple=None ) -> int:
'''simple docstring'''
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
SCREAMING_SNAKE_CASE_: Union[str, Any] =False
return x * self.a[0] + self.b[0]
class a ( torch.nn.Module ):
def __init__( self : Union[str, Any] , lowerCAmelCase : Any=0 , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : List[Any]=False ) -> str:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_: List[str] =torch.nn.Parameter(torch.tensor(lowerCAmelCase ).float() )
SCREAMING_SNAKE_CASE_: Optional[int] =torch.nn.Parameter(torch.tensor(lowerCAmelCase ).float() )
SCREAMING_SNAKE_CASE_: List[Any] =True
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : int=None ) -> Any:
'''simple docstring'''
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
SCREAMING_SNAKE_CASE_: Optional[int] =False
return x * self.a + self.b
def __magic_name__ ( lowercase , lowercase = 16 ):
from datasets import load_dataset
from transformers import AutoTokenizer
SCREAMING_SNAKE_CASE_: Optional[Any] =AutoTokenizer.from_pretrained("""bert-base-cased""" )
SCREAMING_SNAKE_CASE_: Optional[int] ={"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""}
SCREAMING_SNAKE_CASE_: Any =load_dataset("""csv""" , data_files=lowercase )
SCREAMING_SNAKE_CASE_: Any =datasets["""train"""].unique("""label""" )
SCREAMING_SNAKE_CASE_: List[Any] ={v: i for i, v in enumerate(lowercase )}
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_: Dict =tokenizer(
examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase , max_length=lowercase , padding="""max_length""" )
if "label" in examples:
SCREAMING_SNAKE_CASE_: Optional[int] =[label_to_id[l] for l in examples["""label"""]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE_: List[Any] =datasets.map(
lowercase , batched=lowercase , remove_columns=["""sentence1""", """sentence2""", """label"""] , )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(lowercase , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_: Optional[int] =DataLoader(tokenized_datasets["""train"""] , shuffle=lowercase , collate_fn=lowercase , batch_size=2 )
SCREAMING_SNAKE_CASE_: Dict =DataLoader(tokenized_datasets["""validation"""] , shuffle=lowercase , collate_fn=lowercase , batch_size=1 )
return train_dataloader, eval_dataloader
| 36
| 1
|
"""simple docstring"""
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
set_seed(7_7_0)
_UpperCAmelCase = {
"""c_attn""": """att_proj""",
"""c_proj""": """out_proj""",
"""c_fc""": """in_proj""",
"""transformer.""": """""",
"""h.""": """layers.""",
"""ln_1""": """layernorm_1""",
"""ln_2""": """layernorm_2""",
"""ln_f""": """layernorm_final""",
"""wpe""": """position_embeds_layer""",
"""wte""": """input_embeds_layer""",
}
_UpperCAmelCase = {
"""text_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """text.pt""",
},
"""coarse_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """coarse.pt""",
},
"""fine_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """fine.pt""",
},
"""text""": {
"""repo_id""": """suno/bark""",
"""file_name""": """text_2.pt""",
},
"""coarse""": {
"""repo_id""": """suno/bark""",
"""file_name""": """coarse_2.pt""",
},
"""fine""": {
"""repo_id""": """suno/bark""",
"""file_name""": """fine_2.pt""",
},
}
_UpperCAmelCase = os.path.dirname(os.path.abspath(__file__))
_UpperCAmelCase = os.path.join(os.path.expanduser("""~"""), """.cache""")
_UpperCAmelCase = os.path.join(os.getenv("""XDG_CACHE_HOME""", default_cache_dir), """suno""", """bark_v0""")
def __magic_name__ ( lowercase , lowercase=False ):
SCREAMING_SNAKE_CASE_: str =model_type
if use_small:
key += "_small"
return os.path.join(lowercase , REMOTE_MODEL_PATHS[key]["""file_name"""] )
def __magic_name__ ( lowercase , lowercase ):
os.makedirs(lowercase , exist_ok=lowercase )
hf_hub_download(repo_id=lowercase , filename=lowercase , local_dir=lowercase )
def __magic_name__ ( lowercase , lowercase , lowercase=False , lowercase="text" ):
if model_type == "text":
SCREAMING_SNAKE_CASE_: Dict =BarkSemanticModel
SCREAMING_SNAKE_CASE_: Any =BarkSemanticConfig
SCREAMING_SNAKE_CASE_: Union[str, Any] =BarkSemanticGenerationConfig
elif model_type == "coarse":
SCREAMING_SNAKE_CASE_: str =BarkCoarseModel
SCREAMING_SNAKE_CASE_: Optional[Any] =BarkCoarseConfig
SCREAMING_SNAKE_CASE_: List[str] =BarkCoarseGenerationConfig
elif model_type == "fine":
SCREAMING_SNAKE_CASE_: int =BarkFineModel
SCREAMING_SNAKE_CASE_: Dict =BarkFineConfig
SCREAMING_SNAKE_CASE_: Dict =BarkFineGenerationConfig
else:
raise NotImplementedError()
SCREAMING_SNAKE_CASE_: Tuple =f'''{model_type}_small''' if use_small else model_type
SCREAMING_SNAKE_CASE_: Optional[Any] =REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(lowercase ):
logger.info(f'''{model_type} model not found, downloading into `{CACHE_DIR}`.''' )
_download(model_info["""repo_id"""] , model_info["""file_name"""] )
SCREAMING_SNAKE_CASE_: Dict =torch.load(lowercase , map_location=lowercase )
# this is a hack
SCREAMING_SNAKE_CASE_: str =checkpoint["""model_args"""]
if "input_vocab_size" not in model_args:
SCREAMING_SNAKE_CASE_: Union[str, Any] =model_args["""vocab_size"""]
SCREAMING_SNAKE_CASE_: Optional[int] =model_args["""vocab_size"""]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
SCREAMING_SNAKE_CASE_: Optional[Any] =model_args.pop("""n_head""" )
SCREAMING_SNAKE_CASE_: Optional[int] =model_args.pop("""n_embd""" )
SCREAMING_SNAKE_CASE_: Optional[Any] =model_args.pop("""n_layer""" )
SCREAMING_SNAKE_CASE_: str =ConfigClass(**checkpoint["""model_args"""] )
SCREAMING_SNAKE_CASE_: Any =ModelClass(config=lowercase )
SCREAMING_SNAKE_CASE_: str =GenerationConfigClass()
SCREAMING_SNAKE_CASE_: List[str] =model_generation_config
SCREAMING_SNAKE_CASE_: Any =checkpoint["""model"""]
# fixup checkpoint
SCREAMING_SNAKE_CASE_: Dict ="""_orig_mod."""
for k, v in list(state_dict.items() ):
if k.startswith(lowercase ):
# replace part of the key with corresponding layer name in HF implementation
SCREAMING_SNAKE_CASE_: List[Any] =k[len(lowercase ) :]
for old_layer_name in new_layer_name_dict:
SCREAMING_SNAKE_CASE_: str =new_k.replace(lowercase , new_layer_name_dict[old_layer_name] )
SCREAMING_SNAKE_CASE_: Optional[int] =state_dict.pop(lowercase )
SCREAMING_SNAKE_CASE_: Dict =set(state_dict.keys() ) - set(model.state_dict().keys() )
SCREAMING_SNAKE_CASE_: List[str] ={k for k in extra_keys if not k.endswith(""".attn.bias""" )}
SCREAMING_SNAKE_CASE_: str =set(model.state_dict().keys() ) - set(state_dict.keys() )
SCREAMING_SNAKE_CASE_: Tuple ={k for k in missing_keys if not k.endswith(""".attn.bias""" )}
if len(lowercase ) != 0:
raise ValueError(f'''extra keys found: {extra_keys}''' )
if len(lowercase ) != 0:
raise ValueError(f'''missing keys: {missing_keys}''' )
model.load_state_dict(lowercase , strict=lowercase )
SCREAMING_SNAKE_CASE_: List[Any] =model.num_parameters(exclude_embeddings=lowercase )
SCREAMING_SNAKE_CASE_: Tuple =checkpoint["""best_val_loss"""].item()
logger.info(f'''model loaded: {round(n_params/1e6 , 1 )}M params, {round(lowercase , 3 )} loss''' )
model.eval()
model.to(lowercase )
del checkpoint, state_dict
return model
def __magic_name__ ( lowercase , lowercase=False , lowercase="text" ):
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
SCREAMING_SNAKE_CASE_: Optional[int] ="""cpu""" # do conversion on cpu
SCREAMING_SNAKE_CASE_: Union[str, Any] =_get_ckpt_path(lowercase , use_small=lowercase )
SCREAMING_SNAKE_CASE_: Dict =_load_model(lowercase , lowercase , model_type=lowercase , use_small=lowercase )
# load bark initial model
SCREAMING_SNAKE_CASE_: Union[str, Any] =_bark_load_model(lowercase , """cpu""" , model_type=lowercase , use_small=lowercase )
if model_type == "text":
SCREAMING_SNAKE_CASE_: Any =bark_model["""model"""]
if model.num_parameters(exclude_embeddings=lowercase ) != bark_model.get_num_params():
raise ValueError("""initial and new models don't have the same number of parameters""" )
# check if same output as the bark model
SCREAMING_SNAKE_CASE_: List[str] =5
SCREAMING_SNAKE_CASE_: Any =10
if model_type in ["text", "coarse"]:
SCREAMING_SNAKE_CASE_: Optional[int] =torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
SCREAMING_SNAKE_CASE_: Dict =bark_model(lowercase )[0]
SCREAMING_SNAKE_CASE_: Optional[Any] =model(lowercase )
# take last logits
SCREAMING_SNAKE_CASE_: List[str] =output_new_model_total.logits[:, [-1], :]
else:
SCREAMING_SNAKE_CASE_: str =3
SCREAMING_SNAKE_CASE_: Any =8
SCREAMING_SNAKE_CASE_: Tuple =torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
SCREAMING_SNAKE_CASE_: Optional[int] =model(lowercase , lowercase )
SCREAMING_SNAKE_CASE_: List[Any] =bark_model(lowercase , lowercase )
SCREAMING_SNAKE_CASE_: Optional[Any] =output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("""initial and new outputs don't have the same shape""" )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError("""initial and new outputs are not equal""" )
Path(lowercase ).mkdir(exist_ok=lowercase )
model.save_pretrained(lowercase )
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =os.path.join(lowercase , lowercase )
SCREAMING_SNAKE_CASE_: List[Any] =BarkSemanticConfig.from_pretrained(os.path.join(lowercase , """config.json""" ) )
SCREAMING_SNAKE_CASE_: Optional[int] =BarkCoarseConfig.from_pretrained(os.path.join(lowercase , """config.json""" ) )
SCREAMING_SNAKE_CASE_: Tuple =BarkFineConfig.from_pretrained(os.path.join(lowercase , """config.json""" ) )
SCREAMING_SNAKE_CASE_: Optional[Any] =EncodecConfig.from_pretrained("""facebook/encodec_24khz""" )
SCREAMING_SNAKE_CASE_: int =BarkSemanticModel.from_pretrained(lowercase )
SCREAMING_SNAKE_CASE_: Any =BarkCoarseModel.from_pretrained(lowercase )
SCREAMING_SNAKE_CASE_: int =BarkFineModel.from_pretrained(lowercase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =EncodecModel.from_pretrained("""facebook/encodec_24khz""" )
SCREAMING_SNAKE_CASE_: Optional[Any] =BarkConfig.from_sub_model_configs(
lowercase , lowercase , lowercase , lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
SCREAMING_SNAKE_CASE_: List[Any] =BarkModel(lowercase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =semantic
SCREAMING_SNAKE_CASE_: int =coarseAcoustic
SCREAMING_SNAKE_CASE_: str =fineAcoustic
SCREAMING_SNAKE_CASE_: List[Any] =codec
SCREAMING_SNAKE_CASE_: str =bark_generation_config
Path(lowercase ).mkdir(exist_ok=lowercase )
bark.save_pretrained(lowercase , repo_id=lowercase , push_to_hub=lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""model_type""", type=str, help="""text, coarse or fine.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--is_small""", action="""store_true""", help="""convert the small version instead of the large.""")
_UpperCAmelCase = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 36
|
"""simple docstring"""
def __magic_name__ ( lowercase ):
if upper_limit < 0:
raise ValueError("""Limit for the Catalan sequence must be ≥ 0""" )
SCREAMING_SNAKE_CASE_: Tuple =[0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
SCREAMING_SNAKE_CASE_: Any =1
if upper_limit > 0:
SCREAMING_SNAKE_CASE_: List[str] =1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(lowercase ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""")
print("""\n*** Enter -1 at any time to quit ***""")
print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""")
try:
while True:
_UpperCAmelCase = int(input().strip())
if N < 0:
print("""\n********* Goodbye!! ************""")
break
else:
print(f"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print("""Try another upper limit for the sequence: """, end="""""")
except (NameError, ValueError):
print("""\n********* Invalid input, goodbye! ************\n""")
import doctest
doctest.testmod()
| 36
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {"""vocab_file""": """spm_char.model"""}
_UpperCAmelCase = {
"""vocab_file""": {
"""microsoft/speecht5_asr""": """https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model""",
"""microsoft/speecht5_tts""": """https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model""",
"""microsoft/speecht5_vc""": """https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model""",
}
}
_UpperCAmelCase = {
"""microsoft/speecht5_asr""": 1_0_2_4,
"""microsoft/speecht5_tts""": 1_0_2_4,
"""microsoft/speecht5_vc""": 1_0_2_4,
}
class a ( UpperCAmelCase__ ):
UpperCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : Tuple = ['input_ids', 'attention_mask']
def __init__( self : str , lowerCAmelCase : Any , lowerCAmelCase : Union[str, Any]="<s>" , lowerCAmelCase : Union[str, Any]="</s>" , lowerCAmelCase : Any="<unk>" , lowerCAmelCase : List[str]="<pad>" , lowerCAmelCase : Optional[Dict[str, Any]] = None , **lowerCAmelCase : Dict , ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase , )
SCREAMING_SNAKE_CASE_: str =vocab_file
SCREAMING_SNAKE_CASE_: str =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase )
@property
def lowerCamelCase__ ( self : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.get_piece_size()
def lowerCamelCase__ ( self : Tuple ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] ={self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self.__dict__.copy()
SCREAMING_SNAKE_CASE_: Optional[Any] =None
return state
def __setstate__( self : Union[str, Any] , lowerCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
SCREAMING_SNAKE_CASE_: Union[str, Any] ={}
SCREAMING_SNAKE_CASE_: Optional[int] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(lowerCAmelCase , out_type=lowerCAmelCase )
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : str ) -> Tuple:
'''simple docstring'''
return self.sp_model.piece_to_id(lowerCAmelCase )
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =self.sp_model.IdToPiece(lowerCAmelCase )
return token
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =[]
SCREAMING_SNAKE_CASE_: Any =""""""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCAmelCase ) + token
SCREAMING_SNAKE_CASE_: Dict =[]
else:
current_sub_tokens.append(lowerCAmelCase )
out_string += self.sp_model.decode(lowerCAmelCase )
return out_string.strip()
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : str=None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None , lowerCAmelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase , token_ids_a=lowerCAmelCase , already_has_special_tokens=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =[1]
if token_ids_a is None:
return ([0] * len(lowerCAmelCase )) + suffix_ones
return ([0] * len(lowerCAmelCase )) + ([0] * len(lowerCAmelCase )) + suffix_ones
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE_: int =os.path.join(
lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase , """wb""" ) as fi:
SCREAMING_SNAKE_CASE_: Optional[Any] =self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase )
return (out_vocab_file,)
| 36
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
_UpperCAmelCase = {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""",
}
class a ( UpperCAmelCase__ ):
UpperCamelCase : Any = 'albert'
def __init__( self : Dict , lowerCAmelCase : List[str]=3_0000 , lowerCAmelCase : List[Any]=128 , lowerCAmelCase : List[str]=4096 , lowerCAmelCase : str=12 , lowerCAmelCase : str=1 , lowerCAmelCase : Tuple=64 , lowerCAmelCase : Dict=1_6384 , lowerCAmelCase : int=1 , lowerCAmelCase : str="gelu_new" , lowerCAmelCase : Dict=0 , lowerCAmelCase : Optional[Any]=0 , lowerCAmelCase : str=512 , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : List[Any]=0.0_2 , lowerCAmelCase : Union[str, Any]=1E-12 , lowerCAmelCase : Tuple=0.1 , lowerCAmelCase : List[Any]="absolute" , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : int=2 , lowerCAmelCase : Optional[int]=3 , **lowerCAmelCase : int , ) -> Tuple:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =vocab_size
SCREAMING_SNAKE_CASE_: Optional[int] =embedding_size
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_size
SCREAMING_SNAKE_CASE_: Tuple =num_hidden_layers
SCREAMING_SNAKE_CASE_: Any =num_hidden_groups
SCREAMING_SNAKE_CASE_: List[Any] =num_attention_heads
SCREAMING_SNAKE_CASE_: List[Any] =inner_group_num
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_act
SCREAMING_SNAKE_CASE_: int =intermediate_size
SCREAMING_SNAKE_CASE_: Any =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Union[str, Any] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: int =max_position_embeddings
SCREAMING_SNAKE_CASE_: Any =type_vocab_size
SCREAMING_SNAKE_CASE_: int =initializer_range
SCREAMING_SNAKE_CASE_: List[Any] =layer_norm_eps
SCREAMING_SNAKE_CASE_: Dict =classifier_dropout_prob
SCREAMING_SNAKE_CASE_: int =position_embedding_type
class a ( UpperCAmelCase__ ):
@property
def lowerCamelCase__ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_: str ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
SCREAMING_SNAKE_CASE_: Dict ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 36
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""edbeeching/decision-transformer-gym-hopper-medium""": (
"""https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"""
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class a ( UpperCAmelCase__ ):
UpperCamelCase : Optional[Any] = 'decision_transformer'
UpperCamelCase : Tuple = ['past_key_values']
UpperCamelCase : Optional[int] = {
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : int , lowerCAmelCase : Tuple=17 , lowerCAmelCase : List[Any]=4 , lowerCAmelCase : Tuple=128 , lowerCAmelCase : Optional[Any]=4096 , lowerCAmelCase : Any=True , lowerCAmelCase : int=1 , lowerCAmelCase : str=1024 , lowerCAmelCase : Optional[Any]=3 , lowerCAmelCase : Tuple=1 , lowerCAmelCase : Any=None , lowerCAmelCase : Optional[Any]="relu" , lowerCAmelCase : List[Any]=0.1 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : List[Any]=0.1 , lowerCAmelCase : int=1E-5 , lowerCAmelCase : Union[str, Any]=0.0_2 , lowerCAmelCase : List[Any]=True , lowerCAmelCase : Any=True , lowerCAmelCase : Optional[int]=5_0256 , lowerCAmelCase : str=5_0256 , lowerCAmelCase : Optional[int]=False , lowerCAmelCase : Tuple=False , **lowerCAmelCase : str , ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =state_dim
SCREAMING_SNAKE_CASE_: List[str] =act_dim
SCREAMING_SNAKE_CASE_: Any =hidden_size
SCREAMING_SNAKE_CASE_: Dict =max_ep_len
SCREAMING_SNAKE_CASE_: Any =action_tanh
SCREAMING_SNAKE_CASE_: str =vocab_size
SCREAMING_SNAKE_CASE_: Optional[Any] =n_positions
SCREAMING_SNAKE_CASE_: str =n_layer
SCREAMING_SNAKE_CASE_: List[str] =n_head
SCREAMING_SNAKE_CASE_: Tuple =n_inner
SCREAMING_SNAKE_CASE_: Any =activation_function
SCREAMING_SNAKE_CASE_: Optional[int] =resid_pdrop
SCREAMING_SNAKE_CASE_: Optional[Any] =embd_pdrop
SCREAMING_SNAKE_CASE_: Any =attn_pdrop
SCREAMING_SNAKE_CASE_: List[str] =layer_norm_epsilon
SCREAMING_SNAKE_CASE_: int =initializer_range
SCREAMING_SNAKE_CASE_: Any =scale_attn_weights
SCREAMING_SNAKE_CASE_: Any =use_cache
SCREAMING_SNAKE_CASE_: Dict =scale_attn_by_inverse_layer_idx
SCREAMING_SNAKE_CASE_: Union[str, Any] =reorder_and_upcast_attn
SCREAMING_SNAKE_CASE_: Tuple =bos_token_id
SCREAMING_SNAKE_CASE_: List[Any] =eos_token_id
super().__init__(bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
| 36
|
"""simple docstring"""
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class a ( yaml.SafeLoader ):
def lowerCamelCase__ ( self : int , lowerCAmelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =[self.constructed_objects[key_node] for key_node, _ in node.value]
SCREAMING_SNAKE_CASE_: Any =[tuple(lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else key for key in keys]
SCREAMING_SNAKE_CASE_: Dict =Counter(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =[key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f'''Got duplicate yaml keys: {duplicate_keys}''' )
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : str , lowerCAmelCase : Optional[int]=False ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =super().construct_mapping(lowerCAmelCase , deep=lowerCAmelCase )
self._check_no_duplicates_on_constructed_node(lowerCAmelCase )
return mapping
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Dict =list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
SCREAMING_SNAKE_CASE_: Union[str, Any] =full_content[1:].index("""---""" ) + 1
SCREAMING_SNAKE_CASE_: List[str] ="""\n""".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(lowercase )
class a ( UpperCAmelCase__ ):
# class attributes
UpperCamelCase : Tuple = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def lowerCamelCase__ ( cls : List[Any] , lowerCAmelCase : Path ) -> "DatasetMetadata":
'''simple docstring'''
with open(lowerCAmelCase , encoding="""utf-8""" ) as readme_file:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] =_split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(lowerCAmelCase )
else:
return cls()
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Path ) -> List[str]:
'''simple docstring'''
if path.exists():
with open(lowerCAmelCase , encoding="""utf-8""" ) as readme_file:
SCREAMING_SNAKE_CASE_: str =readme_file.read()
else:
SCREAMING_SNAKE_CASE_: str =None
SCREAMING_SNAKE_CASE_: Tuple =self._to_readme(lowerCAmelCase )
with open(lowerCAmelCase , """w""" , encoding="""utf-8""" ) as readme_file:
readme_file.write(lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : Optional[str] = None ) -> str:
'''simple docstring'''
if readme_content is not None:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] =_split_yaml_from_readme(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] ="""---\n""" + self.to_yaml_string() + """---\n""" + content
else:
SCREAMING_SNAKE_CASE_: List[Any] ="""---\n""" + self.to_yaml_string() + """---\n"""
return full_content
@classmethod
def lowerCamelCase__ ( cls : Optional[int] , lowerCAmelCase : str ) -> "DatasetMetadata":
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =yaml.load(lowerCAmelCase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
SCREAMING_SNAKE_CASE_: List[Any] ={
(key.replace("""-""" , """_""" ) if key.replace("""-""" , """_""" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**lowerCAmelCase )
def lowerCamelCase__ ( self : Dict ) -> str:
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace("""_""" , """-""" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=lowerCAmelCase , allow_unicode=lowerCAmelCase , encoding="""utf-8""" , ).decode("""utf-8""" )
_UpperCAmelCase = {
"""image-classification""": [],
"""translation""": [],
"""image-segmentation""": [],
"""fill-mask""": [],
"""automatic-speech-recognition""": [],
"""token-classification""": [],
"""sentence-similarity""": [],
"""audio-classification""": [],
"""question-answering""": [],
"""summarization""": [],
"""zero-shot-classification""": [],
"""table-to-text""": [],
"""feature-extraction""": [],
"""other""": [],
"""multiple-choice""": [],
"""text-classification""": [],
"""text-to-image""": [],
"""text2text-generation""": [],
"""zero-shot-image-classification""": [],
"""tabular-classification""": [],
"""tabular-regression""": [],
"""image-to-image""": [],
"""tabular-to-text""": [],
"""unconditional-image-generation""": [],
"""text-retrieval""": [],
"""text-to-speech""": [],
"""object-detection""": [],
"""audio-to-audio""": [],
"""text-generation""": [],
"""conversational""": [],
"""table-question-answering""": [],
"""visual-question-answering""": [],
"""image-to-text""": [],
"""reinforcement-learning""": [],
"""voice-activity-detection""": [],
"""time-series-forecasting""": [],
"""document-question-answering""": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
_UpperCAmelCase = ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""")
ap.add_argument("""readme_filepath""")
_UpperCAmelCase = ap.parse_args()
_UpperCAmelCase = Path(args.readme_filepath)
_UpperCAmelCase = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 36
| 1
|
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class a :
def __init__( self : Tuple ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =psutil.Process()
SCREAMING_SNAKE_CASE_: Tuple =False
def lowerCamelCase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =-1
while True:
SCREAMING_SNAKE_CASE_: Optional[int] =max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def lowerCamelCase__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =True
SCREAMING_SNAKE_CASE_: int =threading.Thread(target=self.peak_monitor )
SCREAMING_SNAKE_CASE_: Optional[Any] =True
self.thread.start()
def lowerCamelCase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =False
self.thread.join()
return self.cpu_memory_peak
_UpperCAmelCase = PeakCPUMemory()
def __magic_name__ ( ):
# Time
SCREAMING_SNAKE_CASE_: Optional[int] ={"""time""": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
SCREAMING_SNAKE_CASE_: List[Any] =psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
SCREAMING_SNAKE_CASE_: List[Any] =torch.cuda.memory_allocated(lowercase )
torch.cuda.reset_peak_memory_stats()
return measures
def __magic_name__ ( lowercase ):
# Time
SCREAMING_SNAKE_CASE_: Union[str, Any] ={"""time""": time.time() - start_measures["""time"""]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
SCREAMING_SNAKE_CASE_: Any =(psutil.Process().memory_info().rss - start_measures["""cpu"""]) / 2**20
SCREAMING_SNAKE_CASE_: Dict =(cpu_peak_tracker.stop() - start_measures["""cpu"""]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
SCREAMING_SNAKE_CASE_: Dict =(torch.cuda.memory_allocated(lowercase ) - start_measures[str(lowercase )]) / 2**20
SCREAMING_SNAKE_CASE_: Optional[int] =(torch.cuda.max_memory_allocated(lowercase ) - start_measures[str(lowercase )]) / 2**20
return measures
def __magic_name__ ( lowercase , lowercase ):
print(f'''{description}:''' )
print(f'''- Time: {measures["time"]:.2f}s''' )
for i in range(torch.cuda.device_count() ):
print(f'''- GPU {i} allocated: {measures[str(lowercase )]:.2f}MiB''' )
SCREAMING_SNAKE_CASE_: int =measures[f'''{i}-peak''']
print(f'''- GPU {i} peak: {peak:.2f}MiB''' )
print(f'''- CPU RAM allocated: {measures["cpu"]:.2f}MiB''' )
print(f'''- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB''' )
| 36
|
"""simple docstring"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def __magic_name__ ( lowercase ):
return (data["data"], data["target"])
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Dict =XGBClassifier()
classifier.fit(lowercase , lowercase )
return classifier
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Optional[Any] =load_iris()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =data_handling(lowercase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =train_test_split(
lowercase , lowercase , test_size=0.25 )
SCREAMING_SNAKE_CASE_: Tuple =iris["""target_names"""]
# Create an XGBoost Classifier from the training data
SCREAMING_SNAKE_CASE_: Optional[int] =xgboost(lowercase , lowercase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
lowercase , lowercase , lowercase , display_labels=lowercase , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 36
| 1
|
"""simple docstring"""
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""bert""", choices=["""bert"""])
parser.add_argument("""--model_name""", default="""bert-base-uncased""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_bert-base-uncased_0247911.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
_UpperCAmelCase = parser.parse_args()
if args.model_type == "bert":
_UpperCAmelCase = BertForMaskedLM.from_pretrained(args.model_name)
_UpperCAmelCase = """bert"""
else:
raise ValueError("""args.model_type should be \"bert\".""")
_UpperCAmelCase = model.state_dict()
_UpperCAmelCase = {}
for w in ["word_embeddings", "position_embeddings"]:
_UpperCAmelCase = state_dict[f"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""]
_UpperCAmelCase = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
_UpperCAmelCase = state_dict["""cls.predictions.decoder.weight"""]
_UpperCAmelCase = state_dict["""cls.predictions.bias"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[f"""cls.predictions.transform.dense.{w}"""]
_UpperCAmelCase = state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 36
|
"""simple docstring"""
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =[]
SCREAMING_SNAKE_CASE_: List[str] =[]
SCREAMING_SNAKE_CASE_: Any =[]
for rt in rc.restypes:
SCREAMING_SNAKE_CASE_: Optional[int] =rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
SCREAMING_SNAKE_CASE_: Any ={name: i for i, name in enumerate(lowercase )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =torch.tensor(
lowercase , dtype=torch.intaa , device=protein["""aatype"""].device , )
SCREAMING_SNAKE_CASE_: Optional[Any] =torch.tensor(
lowercase , dtype=torch.intaa , device=protein["""aatype"""].device , )
SCREAMING_SNAKE_CASE_: List[str] =torch.tensor(
lowercase , dtype=torch.floataa , device=protein["""aatype"""].device , )
SCREAMING_SNAKE_CASE_: Optional[Any] =protein["""aatype"""].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
SCREAMING_SNAKE_CASE_: List[str] =restype_atomaa_to_atomaa[protein_aatype]
SCREAMING_SNAKE_CASE_: Any =restype_atomaa_mask[protein_aatype]
SCREAMING_SNAKE_CASE_: Tuple =residx_atomaa_mask
SCREAMING_SNAKE_CASE_: Dict =residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
SCREAMING_SNAKE_CASE_: Dict =restype_atomaa_to_atomaa[protein_aatype]
SCREAMING_SNAKE_CASE_: Optional[int] =residx_atomaa_to_atomaa.long()
# create the corresponding mask
SCREAMING_SNAKE_CASE_: Optional[int] =torch.zeros([21, 37] , dtype=torch.floataa , device=protein["""aatype"""].device )
for restype, restype_letter in enumerate(rc.restypes ):
SCREAMING_SNAKE_CASE_: int =rc.restype_atoa[restype_letter]
SCREAMING_SNAKE_CASE_: Any =rc.residue_atoms[restype_name]
for atom_name in atom_names:
SCREAMING_SNAKE_CASE_: Optional[int] =rc.atom_order[atom_name]
SCREAMING_SNAKE_CASE_: Dict =1
SCREAMING_SNAKE_CASE_: List[str] =restype_atomaa_mask[protein_aatype]
SCREAMING_SNAKE_CASE_: List[Any] =residx_atomaa_mask
return protein
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any =tree_map(lambda lowercase : torch.tensor(lowercase , device=batch["""aatype"""].device ) , lowercase , np.ndarray )
SCREAMING_SNAKE_CASE_: int =tensor_tree_map(lambda lowercase : np.array(lowercase ) , make_atomaa_masks(lowercase ) )
return out
| 36
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_UpperCAmelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class a ( UpperCAmelCase__ ):
UpperCamelCase : List[Any] = ['pixel_values']
def __init__( self : int , lowerCAmelCase : bool = True , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase : bool = True , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : bool = True , lowerCAmelCase : Union[int, float] = 1 / 255 , lowerCAmelCase : bool = True , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : bool = True , **lowerCAmelCase : List[Any] , ) -> None:
'''simple docstring'''
super().__init__(**lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =size if size is not None else {"""shortest_edge""": 224}
SCREAMING_SNAKE_CASE_: Tuple =get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
SCREAMING_SNAKE_CASE_: List[Any] =get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase , param_name="""crop_size""" )
SCREAMING_SNAKE_CASE_: int =do_resize
SCREAMING_SNAKE_CASE_: int =size
SCREAMING_SNAKE_CASE_: Any =resample
SCREAMING_SNAKE_CASE_: Optional[Any] =do_center_crop
SCREAMING_SNAKE_CASE_: Optional[Any] =crop_size
SCREAMING_SNAKE_CASE_: Dict =do_rescale
SCREAMING_SNAKE_CASE_: int =rescale_factor
SCREAMING_SNAKE_CASE_: Union[str, Any] =do_normalize
SCREAMING_SNAKE_CASE_: List[Any] =image_mean if image_mean is not None else OPENAI_CLIP_MEAN
SCREAMING_SNAKE_CASE_: str =image_std if image_std is not None else OPENAI_CLIP_STD
SCREAMING_SNAKE_CASE_: Optional[Any] =do_convert_rgb
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : np.ndarray , lowerCAmelCase : Dict[str, int] , lowerCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : List[Any] , ) -> np.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
SCREAMING_SNAKE_CASE_: List[str] =get_resize_output_image_size(lowerCAmelCase , size=size["""shortest_edge"""] , default_to_square=lowerCAmelCase )
return resize(lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : np.ndarray , lowerCAmelCase : Dict[str, int] , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : int , ) -> np.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =get_size_dict(lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(lowerCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=lowerCAmelCase , **lowerCAmelCase )
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : np.ndarray , lowerCAmelCase : Union[int, float] , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : Optional[int] , ) -> Optional[Any]:
'''simple docstring'''
return rescale(lowerCAmelCase , scale=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def lowerCamelCase__ ( self : Any , lowerCAmelCase : np.ndarray , lowerCAmelCase : Union[float, List[float]] , lowerCAmelCase : Union[float, List[float]] , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : str , ) -> np.ndarray:
'''simple docstring'''
return normalize(lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : ImageInput , lowerCAmelCase : bool = None , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : PILImageResampling = None , lowerCAmelCase : bool = None , lowerCAmelCase : int = None , lowerCAmelCase : bool = None , lowerCAmelCase : float = None , lowerCAmelCase : bool = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : bool = None , lowerCAmelCase : Optional[Union[str, TensorType]] = None , lowerCAmelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase : Optional[int] , ) -> PIL.Image.Image:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_: Tuple =size if size is not None else self.size
SCREAMING_SNAKE_CASE_: Optional[int] =get_size_dict(lowerCAmelCase , param_name="""size""" , default_to_square=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_: Dict =do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE_: Any =crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE_: List[Any] =get_size_dict(lowerCAmelCase , param_name="""crop_size""" , default_to_square=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_: str =rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_: Union[str, Any] =do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_: Tuple =image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_: Optional[Any] =image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_: Dict =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
SCREAMING_SNAKE_CASE_: Optional[int] =make_list_of_images(lowerCAmelCase )
if not valid_images(lowerCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
SCREAMING_SNAKE_CASE_: List[Any] =[convert_to_rgb(lowerCAmelCase ) for image in images]
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_: Union[str, Any] =[to_numpy_array(lowerCAmelCase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_: List[Any] =[self.resize(image=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE_: Union[str, Any] =[self.center_crop(image=lowerCAmelCase , size=lowerCAmelCase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE_: Tuple =[self.rescale(image=lowerCAmelCase , scale=lowerCAmelCase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_: Optional[Any] =[self.normalize(image=lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase ) for image in images]
SCREAMING_SNAKE_CASE_: Tuple =[to_channel_dimension_format(lowerCAmelCase , lowerCAmelCase ) for image in images]
SCREAMING_SNAKE_CASE_: Tuple ={"""pixel_values""": images}
return BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
| 36
|
"""simple docstring"""
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_UpperCAmelCase = ["""text""", """image""", """audio"""]
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: str =[]
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(lowercase , lowercase ):
inputs.append(create_inputs(lowercase ) )
else:
raise ValueError(f'''Invalid type requested: {input_type}''' )
return inputs
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: int =[]
for output in outputs:
if isinstance(lowercase , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(lowercase , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(lowercase , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(f'''Invalid output: {output}''' )
return output_types
@is_tool_test
class a :
def lowerCamelCase__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , """inputs""" ) )
self.assertTrue(hasattr(self.tool , """outputs""" ) )
SCREAMING_SNAKE_CASE_: Optional[int] =self.tool.inputs
for _input in inputs:
if isinstance(_input , lowerCAmelCase ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
SCREAMING_SNAKE_CASE_: Any =self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowerCamelCase__ ( self : str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_: List[Any] =self.tool(*lowerCAmelCase )
# There is a single output
if len(self.tool.outputs ) == 1:
SCREAMING_SNAKE_CASE_: str =[outputs]
self.assertListEqual(output_types(lowerCAmelCase ) , self.tool.outputs )
def lowerCamelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , """description""" ) )
self.assertTrue(hasattr(self.tool , """default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def lowerCamelCase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_: Tuple =self.tool(*lowerCAmelCase )
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: int =[outputs]
self.assertEqual(len(lowerCAmelCase ) , len(self.tool.outputs ) )
for output, output_type in zip(lowerCAmelCase , self.tool.outputs ):
SCREAMING_SNAKE_CASE_: int =AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(lowerCAmelCase , lowerCAmelCase ) )
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_: Union[str, Any] =[]
for _input, input_type in zip(lowerCAmelCase , self.tool.inputs ):
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
SCREAMING_SNAKE_CASE_: Dict =self.tool(*lowerCAmelCase )
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] =[outputs]
self.assertEqual(len(lowerCAmelCase ) , len(self.tool.outputs ) )
| 36
| 1
|
"""simple docstring"""
def __magic_name__ ( lowercase ):
if len(lowercase ) < 2:
return collection
def circle_sort_util(lowercase , lowercase , lowercase ) -> bool:
SCREAMING_SNAKE_CASE_: Optional[int] =False
if low == high:
return swapped
SCREAMING_SNAKE_CASE_: Union[str, Any] =low
SCREAMING_SNAKE_CASE_: Optional[int] =high
while left < right:
if collection[left] > collection[right]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] =(
collection[right],
collection[left],
)
SCREAMING_SNAKE_CASE_: List[Any] =True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] =(
collection[right + 1],
collection[left],
)
SCREAMING_SNAKE_CASE_: Tuple =True
SCREAMING_SNAKE_CASE_: Optional[int] =low + int((high - low) / 2 )
SCREAMING_SNAKE_CASE_: List[str] =circle_sort_util(lowercase , lowercase , lowercase )
SCREAMING_SNAKE_CASE_: Any =circle_sort_util(lowercase , mid + 1 , lowercase )
return swapped or left_swap or right_swap
SCREAMING_SNAKE_CASE_: Union[str, Any] =True
while is_not_sorted is True:
SCREAMING_SNAKE_CASE_: Dict =circle_sort_util(lowercase , 0 , len(lowercase ) - 1 )
return collection
if __name__ == "__main__":
_UpperCAmelCase = input("""Enter numbers separated by a comma:\n""").strip()
_UpperCAmelCase = [int(item) for item in user_input.split(""",""")]
print(circle_sort(unsorted))
| 36
|
"""simple docstring"""
from __future__ import annotations
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =sorted(numsa + numsa )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =divmod(len(lowercase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase = [float(x) for x in input("""Enter the elements of first array: """).split()]
_UpperCAmelCase = [float(x) for x in input("""Enter the elements of second array: """).split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 36
| 1
|
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_UpperCAmelCase = 1_6
_UpperCAmelCase = 3_2
def __magic_name__ ( lowercase , lowercase = 16 , lowercase = "bert-base-cased" ):
SCREAMING_SNAKE_CASE_: Dict =AutoTokenizer.from_pretrained(lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_: List[str] =tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase , max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE_: List[str] =datasets.map(
lowercase , batched=lowercase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowercase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE_: Optional[Any] =tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(lowercase , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_: Any =DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
SCREAMING_SNAKE_CASE_: int =DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
return train_dataloader, eval_dataloader
def __magic_name__ ( lowercase , lowercase ):
# Initialize accelerator
SCREAMING_SNAKE_CASE_: List[Any] =Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE_: Any =config["""lr"""]
SCREAMING_SNAKE_CASE_: Dict =int(config["""num_epochs"""] )
SCREAMING_SNAKE_CASE_: List[Any] =int(config["""seed"""] )
SCREAMING_SNAKE_CASE_: List[str] =int(config["""batch_size"""] )
SCREAMING_SNAKE_CASE_: List[Any] =args.model_name_or_path
set_seed(lowercase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] =get_dataloaders(lowercase , lowercase , lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE_: Tuple =AutoModelForSequenceClassification.from_pretrained(lowercase , return_dict=lowercase )
# Instantiate optimizer
SCREAMING_SNAKE_CASE_: Tuple =(
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
SCREAMING_SNAKE_CASE_: List[str] =optimizer_cls(params=model.parameters() , lr=lowercase )
if accelerator.state.deepspeed_plugin is not None:
SCREAMING_SNAKE_CASE_: Any =accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
SCREAMING_SNAKE_CASE_: Any =1
SCREAMING_SNAKE_CASE_: str =(len(lowercase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
SCREAMING_SNAKE_CASE_: List[Any] =get_linear_schedule_with_warmup(
optimizer=lowercase , num_warmup_steps=0 , num_training_steps=lowercase , )
else:
SCREAMING_SNAKE_CASE_: Dict =DummyScheduler(lowercase , total_num_steps=lowercase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict =accelerator.prepare(
lowercase , lowercase , lowercase , lowercase , lowercase )
# We need to keep track of how many total steps we have iterated over
SCREAMING_SNAKE_CASE_: str =0
# We also need to keep track of the stating epoch so files are named properly
SCREAMING_SNAKE_CASE_: Tuple =0
# Now we train the model
SCREAMING_SNAKE_CASE_: str =evaluate.load("""glue""" , """mrpc""" )
SCREAMING_SNAKE_CASE_: int =0
SCREAMING_SNAKE_CASE_: Tuple ={}
for epoch in range(lowercase , lowercase ):
model.train()
for step, batch in enumerate(lowercase ):
SCREAMING_SNAKE_CASE_: List[str] =model(**lowercase )
SCREAMING_SNAKE_CASE_: int =outputs.loss
SCREAMING_SNAKE_CASE_: List[Any] =loss / gradient_accumulation_steps
accelerator.backward(lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
SCREAMING_SNAKE_CASE_: List[Any] =0
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Optional[int] =model(**lowercase )
SCREAMING_SNAKE_CASE_: Dict =outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str =accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowercase ) - 1:
SCREAMING_SNAKE_CASE_: int =predictions[: len(eval_dataloader.dataset ) - samples_seen]
SCREAMING_SNAKE_CASE_: Union[str, Any] =references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowercase , references=lowercase , )
SCREAMING_SNAKE_CASE_: List[str] =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , lowercase )
SCREAMING_SNAKE_CASE_: str =eval_metric["""accuracy"""]
if best_performance < eval_metric["accuracy"]:
SCREAMING_SNAKE_CASE_: Dict =eval_metric["""accuracy"""]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """all_results.json""" ) , """w""" ) as f:
json.dump(lowercase , lowercase )
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: int =argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=lowercase , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowercase , )
parser.add_argument(
"""--output_dir""" , type=lowercase , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--performance_lower_bound""" , type=lowercase , default=lowercase , help="""Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowercase , default=3 , help="""Number of train epochs.""" , )
SCREAMING_SNAKE_CASE_: Optional[int] =parser.parse_args()
SCREAMING_SNAKE_CASE_: Any ={"""lr""": 2e-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(lowercase , lowercase )
if __name__ == "__main__":
main()
| 36
|
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a :
def __init__( self : Any , lowerCAmelCase : Any , lowerCAmelCase : List[str]=13 , lowerCAmelCase : Dict=3 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Dict=True , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : str=0.1 , lowerCAmelCase : List[str]=224 , lowerCAmelCase : List[str]=1000 , lowerCAmelCase : Optional[Any]=[3, 3, 6, 4] , lowerCAmelCase : int=[48, 56, 112, 220] , ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =parent
SCREAMING_SNAKE_CASE_: Any =batch_size
SCREAMING_SNAKE_CASE_: Tuple =num_channels
SCREAMING_SNAKE_CASE_: Union[str, Any] =is_training
SCREAMING_SNAKE_CASE_: Tuple =use_labels
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Tuple =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: List[Any] =num_labels
SCREAMING_SNAKE_CASE_: int =image_size
SCREAMING_SNAKE_CASE_: Optional[Any] =layer_depths
SCREAMING_SNAKE_CASE_: List[Any] =embed_dims
def lowerCamelCase__ ( self : List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_: List[Any] =None
if self.use_labels:
SCREAMING_SNAKE_CASE_: List[str] =ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE_: Tuple =self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowerCAmelCase , layer_scale_init_value=1E-5 , )
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =SwiftFormerModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Any =model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =self.num_labels
SCREAMING_SNAKE_CASE_: Dict =SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Dict =model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
SCREAMING_SNAKE_CASE_: int =SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Tuple =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_: Dict =model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)): str =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_: Tuple ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : Optional[int] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
UpperCamelCase : Tuple = (
{'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase : Any = False
UpperCamelCase : Optional[int] = False
UpperCamelCase : Optional[Any] = False
UpperCamelCase : Dict = False
UpperCamelCase : List[str] = False
def lowerCamelCase__ ( self : Dict ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =SwiftFormerModelTester(self )
SCREAMING_SNAKE_CASE_: Union[str, Any] =ConfigTester(
self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def lowerCamelCase__ ( self : Tuple ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def lowerCamelCase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Any =model_class(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase , nn.Linear ) )
def lowerCamelCase__ ( self : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: int =model_class(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_: Any =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_: Tuple =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def lowerCamelCase__ ( self : int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def lowerCamelCase__ ( self : Any ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@slow
def lowerCamelCase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_: Optional[Any] =SwiftFormerModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def lowerCamelCase__ ( self : Optional[int] ) -> str:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
def check_hidden_states_output(lowerCAmelCase : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : Optional[int] ):
SCREAMING_SNAKE_CASE_: Optional[Any] =model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_: List[str] =model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: Dict =outputs.hidden_states
SCREAMING_SNAKE_CASE_: List[Any] =8
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowerCAmelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Dict =True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_: Any =True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
def _config_zero_init(lowerCAmelCase : str ):
SCREAMING_SNAKE_CASE_: Dict =copy.deepcopy(lowerCAmelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowerCAmelCase , lowerCAmelCase , 1E-10 )
if isinstance(getattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: Tuple =_config_zero_init(getattr(lowerCAmelCase , lowerCAmelCase ) )
setattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return configs_no_init
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_: List[Any] =_config_zero_init(lowerCAmelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Any =model_class(config=lowerCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
pass
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: List[Any] =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self : str ) -> str:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =self.default_image_processor
SCREAMING_SNAKE_CASE_: int =prepare_img()
SCREAMING_SNAKE_CASE_: Union[str, Any] =image_processor(images=lowerCAmelCase , return_tensors="""pt""" ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Dict =model(**lowerCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE_: Optional[Any] =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =torch.tensor([[-2.1_703E00, 2.1_107E00, -2.0_811E00]] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
| 36
| 1
|
"""simple docstring"""
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class a ( UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : Dict = BertTokenizer
UpperCamelCase : Optional[int] = BertTokenizerFast
UpperCamelCase : Optional[Any] = True
UpperCamelCase : int = True
UpperCamelCase : Any = filter_non_english
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE_: List[str] =[
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
SCREAMING_SNAKE_CASE_: Any =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : Any ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""UNwant\u00E9d,running"""
SCREAMING_SNAKE_CASE_: Optional[Any] ="""unwanted, running"""
return input_text, output_text
def lowerCamelCase__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE_: Dict =tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(lowerCAmelCase , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , [9, 6, 7, 12, 10, 11] )
def lowerCamelCase__ ( self : Optional[int] ) -> str:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE_: Optional[Any] =self.get_tokenizer()
SCREAMING_SNAKE_CASE_: Any =self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_: str ="""UNwant\u00E9d,running"""
SCREAMING_SNAKE_CASE_: List[str] =tokenizer.tokenize(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =rust_tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_: Union[str, Any] =tokenizer.encode(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =rust_tokenizer.encode(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
# With lower casing
SCREAMING_SNAKE_CASE_: Optional[int] =self.get_tokenizer(do_lower_case=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =self.get_rust_tokenizer(do_lower_case=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str ="""UNwant\u00E9d,running"""
SCREAMING_SNAKE_CASE_: Any =tokenizer.tokenize(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =rust_tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_: Union[str, Any] =tokenizer.encode(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =rust_tokenizer.encode(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : List[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def lowerCamelCase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =BasicTokenizer(do_lower_case=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def lowerCamelCase__ ( self : List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =BasicTokenizer(do_lower_case=lowerCAmelCase , strip_accents=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def lowerCamelCase__ ( self : List[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =BasicTokenizer(do_lower_case=lowerCAmelCase , strip_accents=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def lowerCamelCase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =BasicTokenizer(do_lower_case=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def lowerCamelCase__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =BasicTokenizer(do_lower_case=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =BasicTokenizer(do_lower_case=lowerCAmelCase , strip_accents=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def lowerCamelCase__ ( self : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =BasicTokenizer(do_lower_case=lowerCAmelCase , strip_accents=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =BasicTokenizer(do_lower_case=lowerCAmelCase , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def lowerCamelCase__ ( self : Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =BasicTokenizer()
SCREAMING_SNAKE_CASE_: Tuple ="""a\n'll !!to?'d of, can't."""
SCREAMING_SNAKE_CASE_: List[Any] =["""a""", """'""", """ll""", """!""", """!""", """to""", """?""", """'""", """d""", """of""", """,""", """can""", """'""", """t""", """."""]
self.assertListEqual(tokenizer.tokenize(lowerCAmelCase ) , lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
SCREAMING_SNAKE_CASE_: Union[str, Any] ={}
for i, token in enumerate(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =i
SCREAMING_SNAKE_CASE_: Union[str, Any] =WordpieceTokenizer(vocab=lowerCAmelCase , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def lowerCamelCase__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def lowerCamelCase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def lowerCamelCase__ ( self : List[Any] ) -> int:
'''simple docstring'''
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def lowerCamelCase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =self.get_tokenizer()
SCREAMING_SNAKE_CASE_: Optional[int] =self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCAmelCase ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCAmelCase ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
@slow
def lowerCamelCase__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =self.tokenizer_class.from_pretrained("""bert-base-uncased""" )
SCREAMING_SNAKE_CASE_: int =tokenizer.encode("""sequence builders""" , add_special_tokens=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =tokenizer.build_inputs_with_special_tokens(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =tokenizer.build_inputs_with_special_tokens(lowerCAmelCase , lowerCAmelCase )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def lowerCamelCase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE_: List[Any] =self.rust_tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =f'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
SCREAMING_SNAKE_CASE_: Optional[Any] =tokenizer_r.encode_plus(
lowerCAmelCase , return_attention_mask=lowerCAmelCase , return_token_type_ids=lowerCAmelCase , return_offsets_mapping=lowerCAmelCase , add_special_tokens=lowerCAmelCase , )
SCREAMING_SNAKE_CASE_: Any =tokenizer_r.do_lower_case if hasattr(lowerCAmelCase , """do_lower_case""" ) else False
SCREAMING_SNAKE_CASE_: int =(
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def lowerCamelCase__ ( self : List[str] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =["""的""", """人""", """有"""]
SCREAMING_SNAKE_CASE_: List[str] ="""""".join(lowerCAmelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE_: Tuple =True
SCREAMING_SNAKE_CASE_: str =self.tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =self.rust_tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =tokenizer_p.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =tokenizer_r.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =tokenizer_r.convert_ids_to_tokens(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =tokenizer_p.convert_ids_to_tokens(lowerCAmelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =False
SCREAMING_SNAKE_CASE_: List[str] =self.rust_tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =self.tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =tokenizer_r.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =tokenizer_p.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =tokenizer_r.convert_ids_to_tokens(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =tokenizer_p.convert_ids_to_tokens(lowerCAmelCase )
# it is expected that only the first Chinese character is not preceded by "##".
SCREAMING_SNAKE_CASE_: int =[
f'''##{token}''' if idx != 0 else token for idx, token in enumerate(lowerCAmelCase )
]
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
| 36
|
"""simple docstring"""
from math import pi
def __magic_name__ ( lowercase , lowercase ):
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(9_0, 1_0))
| 36
| 1
|
"""simple docstring"""
import warnings
from ..trainer import Trainer
from ..utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
class a ( UpperCAmelCase__ ):
def __init__( self : int , lowerCAmelCase : Tuple=None , **lowerCAmelCase : Optional[int] ) -> Dict:
'''simple docstring'''
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" , lowerCAmelCase , )
super().__init__(args=lowerCAmelCase , **lowerCAmelCase )
| 36
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : Dict ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict =FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE_: Any =jax.device_count()
SCREAMING_SNAKE_CASE_: Dict =num_samples * [prompt]
SCREAMING_SNAKE_CASE_: List[Any] =sd_pipe.prepare_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =replicate(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =shard(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =jax.random.split(lowerCAmelCase , jax.device_count() )
SCREAMING_SNAKE_CASE_: Tuple =sd_pipe(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , num_inference_steps=25 , jit=lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
SCREAMING_SNAKE_CASE_: Dict =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE_: Dict =images[0, 253:256, 253:256, -1]
SCREAMING_SNAKE_CASE_: Optional[int] =jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE_: Optional[int] =jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int ="""stabilityai/stable-diffusion-2"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =FlaxDPMSolverMultistepScheduler.from_pretrained(lowerCAmelCase , subfolder="""scheduler""" )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =FlaxStableDiffusionPipeline.from_pretrained(
lowerCAmelCase , scheduler=lowerCAmelCase , revision="""bf16""" , dtype=jnp.bfloataa , )
SCREAMING_SNAKE_CASE_: Optional[int] =scheduler_params
SCREAMING_SNAKE_CASE_: Tuple ="""A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE_: Union[str, Any] =jax.device_count()
SCREAMING_SNAKE_CASE_: Optional[Any] =num_samples * [prompt]
SCREAMING_SNAKE_CASE_: List[Any] =sd_pipe.prepare_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =replicate(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =shard(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_: Any =jax.random.split(lowerCAmelCase , jax.device_count() )
SCREAMING_SNAKE_CASE_: Tuple =sd_pipe(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , num_inference_steps=25 , jit=lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
SCREAMING_SNAKE_CASE_: str =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE_: Any =images[0, 253:256, 253:256, -1]
SCREAMING_SNAKE_CASE_: Optional[Any] =jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE_: Optional[int] =jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 36
| 1
|
"""simple docstring"""
from collections.abc import Callable
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: float =a
SCREAMING_SNAKE_CASE_: float =b
if function(lowercase ) == 0: # one of the a or b is a root for the function
return a
elif function(lowercase ) == 0:
return b
elif (
function(lowercase ) * function(lowercase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("""could not find root in given interval.""" )
else:
SCREAMING_SNAKE_CASE_: float =start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(lowercase ) == 0:
return mid
elif function(lowercase ) * function(lowercase ) < 0:
SCREAMING_SNAKE_CASE_: Dict =mid
else:
SCREAMING_SNAKE_CASE_: Tuple =mid
SCREAMING_SNAKE_CASE_: int =start + (end - start) / 2.0
return mid
def __magic_name__ ( lowercase ):
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_0_0_0))
import doctest
doctest.testmod()
| 36
|
"""simple docstring"""
def __magic_name__ ( lowercase = 200_0000 ):
SCREAMING_SNAKE_CASE_: List[Any] =[0 for i in range(n + 1 )]
SCREAMING_SNAKE_CASE_: Union[str, Any] =1
SCREAMING_SNAKE_CASE_: Optional[Any] =1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =1
SCREAMING_SNAKE_CASE_: Dict =0
for i in range(lowercase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f"""{solution() = }""")
| 36
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_UpperCAmelCase = {
"""configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongT5EncoderModel""",
"""LongT5ForConditionalGeneration""",
"""LongT5Model""",
"""LongT5PreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""FlaxLongT5ForConditionalGeneration""",
"""FlaxLongT5Model""",
"""FlaxLongT5PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 36
|
"""simple docstring"""
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""bert""", choices=["""bert"""])
parser.add_argument("""--model_name""", default="""bert-base-uncased""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_bert-base-uncased_0247911.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
_UpperCAmelCase = parser.parse_args()
if args.model_type == "bert":
_UpperCAmelCase = BertForMaskedLM.from_pretrained(args.model_name)
_UpperCAmelCase = """bert"""
else:
raise ValueError("""args.model_type should be \"bert\".""")
_UpperCAmelCase = model.state_dict()
_UpperCAmelCase = {}
for w in ["word_embeddings", "position_embeddings"]:
_UpperCAmelCase = state_dict[f"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""]
_UpperCAmelCase = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
_UpperCAmelCase = state_dict["""cls.predictions.decoder.weight"""]
_UpperCAmelCase = state_dict["""cls.predictions.bias"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[f"""cls.predictions.transform.dense.{w}"""]
_UpperCAmelCase = state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 36
| 1
|
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class a :
def __init__( self : List[Any] , lowerCAmelCase : str , lowerCAmelCase : List[str]=13 , lowerCAmelCase : Optional[Any]=7 , lowerCAmelCase : str=False , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : List[Any]=False , lowerCAmelCase : Dict=False , lowerCAmelCase : Dict=19 , lowerCAmelCase : Dict=32 , lowerCAmelCase : Dict=5 , lowerCAmelCase : List[str]=4 , lowerCAmelCase : Any=37 , lowerCAmelCase : List[Any]="gelu" , lowerCAmelCase : Tuple=0.1 , lowerCAmelCase : str=0.1 , lowerCAmelCase : List[str]=512 , lowerCAmelCase : List[Any]=16 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Dict=0.0_2 , lowerCAmelCase : int=3 , lowerCAmelCase : Optional[int]=4 , lowerCAmelCase : Optional[int]=None , ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =parent
SCREAMING_SNAKE_CASE_: Tuple =batch_size
SCREAMING_SNAKE_CASE_: List[Any] =seq_length
SCREAMING_SNAKE_CASE_: int =is_training
SCREAMING_SNAKE_CASE_: Any =use_input_mask
SCREAMING_SNAKE_CASE_: List[str] =use_token_type_ids
SCREAMING_SNAKE_CASE_: List[Any] =use_labels
SCREAMING_SNAKE_CASE_: Tuple =vocab_size
SCREAMING_SNAKE_CASE_: Any =hidden_size
SCREAMING_SNAKE_CASE_: Optional[int] =num_hidden_layers
SCREAMING_SNAKE_CASE_: Optional[int] =num_attention_heads
SCREAMING_SNAKE_CASE_: Optional[Any] =intermediate_size
SCREAMING_SNAKE_CASE_: List[Any] =hidden_act
SCREAMING_SNAKE_CASE_: Union[str, Any] =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Optional[int] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: List[Any] =max_position_embeddings
SCREAMING_SNAKE_CASE_: Union[str, Any] =type_vocab_size
SCREAMING_SNAKE_CASE_: Optional[int] =type_sequence_label_size
SCREAMING_SNAKE_CASE_: Optional[Any] =initializer_range
SCREAMING_SNAKE_CASE_: Any =num_labels
SCREAMING_SNAKE_CASE_: Dict =num_choices
SCREAMING_SNAKE_CASE_: Any =scope
def lowerCamelCase__ ( self : int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_: Optional[int] =None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_: Optional[Any] =random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_: Any =None
SCREAMING_SNAKE_CASE_: Dict =None
SCREAMING_SNAKE_CASE_: int =None
if self.use_labels:
SCREAMING_SNAKE_CASE_: int =ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_: int =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_: Dict =ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE_: List[Any] =self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=lowerCAmelCase , esmfold_config={"""trunk""": {"""num_blocks""": 2}, """fp16_esm""": False} , )
return config
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Tuple , lowerCAmelCase : str , lowerCAmelCase : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =EsmForProteinFolding(config=lowerCAmelCase ).float()
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Union[str, Any] =model(lowerCAmelCase , attention_mask=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =model(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =model(lowerCAmelCase )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def lowerCamelCase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
): Any =config_and_inputs
SCREAMING_SNAKE_CASE_: int ={"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : List[str] = False
UpperCamelCase : List[str] = (EsmForProteinFolding,) if is_torch_available() else ()
UpperCamelCase : List[str] = ()
UpperCamelCase : List[Any] = {} if is_torch_available() else {}
UpperCamelCase : Tuple = False
def lowerCamelCase__ ( self : List[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =EsmFoldModelTester(self )
SCREAMING_SNAKE_CASE_: Tuple =ConfigTester(self , config_class=lowerCAmelCase , hidden_size=37 )
def lowerCamelCase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : str ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
@unittest.skip("""Does not support attention outputs""" )
def lowerCamelCase__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip
def lowerCamelCase__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def lowerCamelCase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def lowerCamelCase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support passing input embeds!""" )
def lowerCamelCase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def lowerCamelCase__ ( self : str ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def lowerCamelCase__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def lowerCamelCase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support head pruning.""" )
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not output hidden states in the normal way.""" )
def lowerCamelCase__ ( self : Any ) -> str:
'''simple docstring'''
pass
@unittest.skip("""ESMfold does not output hidden states in the normal way.""" )
def lowerCamelCase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip("""ESMFold only has one output format.""" )
def lowerCamelCase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip("""This test doesn't work for ESMFold and doesn't test core functionality""" )
def lowerCamelCase__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip("""ESMFold does not support input chunking.""" )
def lowerCamelCase__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip("""ESMFold doesn't respect you and it certainly doesn't respect your initialization arguments.""" )
def lowerCamelCase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip("""ESMFold doesn't support torchscript compilation.""" )
def lowerCamelCase__ ( self : str ) -> Dict:
'''simple docstring'''
pass
@unittest.skip("""ESMFold doesn't support torchscript compilation.""" )
def lowerCamelCase__ ( self : Any ) -> int:
'''simple docstring'''
pass
@unittest.skip("""ESMFold doesn't support torchscript compilation.""" )
def lowerCamelCase__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip("""ESMFold doesn't support data parallel.""" )
def lowerCamelCase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
pass
@require_torch
class a ( UpperCAmelCase__ ):
@slow
def lowerCamelCase__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =EsmForProteinFolding.from_pretrained("""facebook/esmfold_v1""" ).float()
model.eval()
SCREAMING_SNAKE_CASE_: str =torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
SCREAMING_SNAKE_CASE_: Optional[int] =model(lowerCAmelCase )["""positions"""]
SCREAMING_SNAKE_CASE_: List[str] =torch.tensor([2.5_8_2_8, 0.7_9_9_3, -1_0.9_3_3_4] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , lowerCAmelCase , atol=1E-4 ) )
| 36
|
"""simple docstring"""
def __magic_name__ ( lowercase , lowercase ):
return int((input_a, input_a).count(0 ) == 0 )
def __magic_name__ ( ):
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 36
| 1
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
def __magic_name__ ( lowercase , lowercase=False ):
SCREAMING_SNAKE_CASE_: Tuple =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
SCREAMING_SNAKE_CASE_: List[str] =[(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def __magic_name__ ( lowercase , lowercase , lowercase=False ):
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE_: List[Any] =""""""
else:
SCREAMING_SNAKE_CASE_: List[Any] ="""vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE_: Any =state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
SCREAMING_SNAKE_CASE_: Tuple =state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE_: List[Any] =in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE_: Tuple =in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE_: Dict =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE_: List[str] =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE_: Any =in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE_: Optional[Any] =in_proj_bias[-config.hidden_size :]
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Dict =["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(lowercase , lowercase )
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Tuple =dct.pop(lowercase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =val
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Optional[Any] ="""http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE_: List[str] =Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
@torch.no_grad()
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: int =ViTConfig()
SCREAMING_SNAKE_CASE_: Dict =False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
SCREAMING_SNAKE_CASE_: Union[str, Any] =True
SCREAMING_SNAKE_CASE_: List[str] =int(vit_name[-12:-10] )
SCREAMING_SNAKE_CASE_: int =int(vit_name[-9:-6] )
else:
SCREAMING_SNAKE_CASE_: int =1000
SCREAMING_SNAKE_CASE_: Optional[int] ="""huggingface/label-files"""
SCREAMING_SNAKE_CASE_: int ="""imagenet-1k-id2label.json"""
SCREAMING_SNAKE_CASE_: Optional[int] =json.load(open(hf_hub_download(lowercase , lowercase , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE_: Optional[int] ={int(lowercase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_: Optional[Any] =idalabel
SCREAMING_SNAKE_CASE_: Optional[int] ={v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_: List[Any] =int(vit_name[-6:-4] )
SCREAMING_SNAKE_CASE_: Optional[Any] =int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("""tiny""" ):
SCREAMING_SNAKE_CASE_: List[Any] =192
SCREAMING_SNAKE_CASE_: List[Any] =768
SCREAMING_SNAKE_CASE_: Dict =12
SCREAMING_SNAKE_CASE_: str =3
elif vit_name[9:].startswith("""small""" ):
SCREAMING_SNAKE_CASE_: Optional[int] =384
SCREAMING_SNAKE_CASE_: Union[str, Any] =1536
SCREAMING_SNAKE_CASE_: int =12
SCREAMING_SNAKE_CASE_: Optional[int] =6
else:
pass
else:
if vit_name[4:].startswith("""small""" ):
SCREAMING_SNAKE_CASE_: Any =768
SCREAMING_SNAKE_CASE_: Tuple =2304
SCREAMING_SNAKE_CASE_: Optional[int] =8
SCREAMING_SNAKE_CASE_: Dict =8
elif vit_name[4:].startswith("""base""" ):
pass
elif vit_name[4:].startswith("""large""" ):
SCREAMING_SNAKE_CASE_: Tuple =1024
SCREAMING_SNAKE_CASE_: int =4096
SCREAMING_SNAKE_CASE_: List[str] =24
SCREAMING_SNAKE_CASE_: Any =16
elif vit_name[4:].startswith("""huge""" ):
SCREAMING_SNAKE_CASE_: str =1280
SCREAMING_SNAKE_CASE_: List[Any] =5120
SCREAMING_SNAKE_CASE_: Any =32
SCREAMING_SNAKE_CASE_: Optional[Any] =16
# load original model from timm
SCREAMING_SNAKE_CASE_: Union[str, Any] =timm.create_model(lowercase , pretrained=lowercase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
SCREAMING_SNAKE_CASE_: Dict =timm_model.state_dict()
if base_model:
remove_classification_head_(lowercase )
SCREAMING_SNAKE_CASE_: str =create_rename_keys(lowercase , lowercase )
for src, dest in rename_keys:
rename_key(lowercase , lowercase , lowercase )
read_in_q_k_v(lowercase , lowercase , lowercase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
SCREAMING_SNAKE_CASE_: Optional[int] =ViTModel(lowercase ).eval()
else:
SCREAMING_SNAKE_CASE_: str =ViTForImageClassification(lowercase ).eval()
model.load_state_dict(lowercase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
SCREAMING_SNAKE_CASE_: Optional[int] =DeiTImageProcessor(size=config.image_size )
else:
SCREAMING_SNAKE_CASE_: Optional[int] =ViTImageProcessor(size=config.image_size )
SCREAMING_SNAKE_CASE_: Optional[int] =image_processor(images=prepare_img() , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE_: Union[str, Any] =encoding["""pixel_values"""]
SCREAMING_SNAKE_CASE_: Dict =model(lowercase )
if base_model:
SCREAMING_SNAKE_CASE_: List[str] =timm_model.forward_features(lowercase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(lowercase , outputs.pooler_output , atol=1e-3 )
else:
SCREAMING_SNAKE_CASE_: List[str] =timm_model(lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowercase , outputs.logits , atol=1e-3 )
Path(lowercase ).mkdir(exist_ok=lowercase )
print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_patch16_224""",
type=str,
help="""Name of the ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_UpperCAmelCase = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 36
|
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger("""transformers.models.speecht5""")
def __magic_name__ ( lowercase , lowercase , lowercase ):
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE_: Tuple =checkpoint["""input_conv.weight_g"""]
SCREAMING_SNAKE_CASE_: Optional[int] =checkpoint["""input_conv.weight_v"""]
SCREAMING_SNAKE_CASE_: Optional[int] =checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''upsamples.{i}.1.weight_g''']
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''upsamples.{i}.1.weight_v''']
SCREAMING_SNAKE_CASE_: List[str] =checkpoint[f'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE_: Dict =checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_g''']
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_v''']
SCREAMING_SNAKE_CASE_: Union[str, Any] =checkpoint[f'''blocks.{i}.convs1.{j}.1.bias''']
SCREAMING_SNAKE_CASE_: Dict =checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_g''']
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_v''']
SCREAMING_SNAKE_CASE_: List[Any] =checkpoint[f'''blocks.{i}.convs2.{j}.1.bias''']
SCREAMING_SNAKE_CASE_: Tuple =checkpoint["""output_conv.1.weight_g"""]
SCREAMING_SNAKE_CASE_: List[str] =checkpoint["""output_conv.1.weight_v"""]
SCREAMING_SNAKE_CASE_: Optional[int] =checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase=None , lowercase=None , ):
if config_path is not None:
SCREAMING_SNAKE_CASE_: List[Any] =SpeechTaHifiGanConfig.from_pretrained(lowercase )
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE_: Union[str, Any] =SpeechTaHifiGan(lowercase )
SCREAMING_SNAKE_CASE_: Any =torch.load(lowercase )
load_weights(orig_checkpoint["""model"""]["""generator"""] , lowercase , lowercase )
SCREAMING_SNAKE_CASE_: List[Any] =np.load(lowercase )
SCREAMING_SNAKE_CASE_: Any =stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE_: str =stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE_: Dict =torch.from_numpy(lowercase ).float()
SCREAMING_SNAKE_CASE_: Dict =torch.from_numpy(lowercase ).float()
model.save_pretrained(lowercase )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
_UpperCAmelCase = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 36
| 1
|
"""simple docstring"""
def __magic_name__ ( lowercase ):
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
SCREAMING_SNAKE_CASE_: Optional[int] =1
SCREAMING_SNAKE_CASE_: Tuple =1
while repunit:
SCREAMING_SNAKE_CASE_: List[str] =(10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def __magic_name__ ( lowercase = 100_0000 ):
SCREAMING_SNAKE_CASE_: List[str] =limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(lowercase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f"""{solution() = }""")
| 36
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def __magic_name__ ( lowercase ):
if "cls_token" in name:
SCREAMING_SNAKE_CASE_: Optional[int] =name.replace("""cls_token""" , """vit.embeddings.cls_token""" )
if "mask_token" in name:
SCREAMING_SNAKE_CASE_: Optional[int] =name.replace("""mask_token""" , """decoder.mask_token""" )
if "decoder_pos_embed" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""pos_embed""" , """vit.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""patch_embed.proj""" , """vit.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""patch_embed.norm""" , """vit.embeddings.norm""" )
if "decoder_blocks" in name:
SCREAMING_SNAKE_CASE_: List[Any] =name.replace("""decoder_blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""blocks""" , """vit.encoder.layer""" )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE_: Optional[Any] =name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
SCREAMING_SNAKE_CASE_: Union[str, Any] =name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
SCREAMING_SNAKE_CASE_: Optional[Any] =name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE_: Dict =name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
SCREAMING_SNAKE_CASE_: Tuple =name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
SCREAMING_SNAKE_CASE_: Any =name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""norm.weight""" , """vit.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""norm.bias""" , """vit.layernorm.bias""" )
return name
def __magic_name__ ( lowercase , lowercase ):
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE_: Optional[int] =orig_state_dict.pop(lowercase )
if "qkv" in key:
SCREAMING_SNAKE_CASE_: Dict =key.split(""".""" )
SCREAMING_SNAKE_CASE_: Optional[Any] =int(key_split[1] )
if "decoder_blocks" in key:
SCREAMING_SNAKE_CASE_: int =config.decoder_hidden_size
SCREAMING_SNAKE_CASE_: Optional[int] ="""decoder.decoder_layers."""
if "weight" in key:
SCREAMING_SNAKE_CASE_: Dict =val[:dim, :]
SCREAMING_SNAKE_CASE_: Tuple =val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_: str =val[-dim:, :]
elif "bias" in key:
SCREAMING_SNAKE_CASE_: List[Any] =val[:dim]
SCREAMING_SNAKE_CASE_: Tuple =val[dim : dim * 2]
SCREAMING_SNAKE_CASE_: List[Any] =val[-dim:]
else:
SCREAMING_SNAKE_CASE_: Any =config.hidden_size
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""vit.encoder.layer."""
if "weight" in key:
SCREAMING_SNAKE_CASE_: Optional[Any] =val[:dim, :]
SCREAMING_SNAKE_CASE_: Optional[Any] =val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_: Dict =val[-dim:, :]
elif "bias" in key:
SCREAMING_SNAKE_CASE_: Optional[Any] =val[:dim]
SCREAMING_SNAKE_CASE_: Any =val[dim : dim * 2]
SCREAMING_SNAKE_CASE_: Optional[Any] =val[-dim:]
else:
SCREAMING_SNAKE_CASE_: Tuple =val
return orig_state_dict
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Dict =ViTMAEConfig()
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE_: List[Any] =1024
SCREAMING_SNAKE_CASE_: Dict =4096
SCREAMING_SNAKE_CASE_: Tuple =24
SCREAMING_SNAKE_CASE_: int =16
elif "huge" in checkpoint_url:
SCREAMING_SNAKE_CASE_: Union[str, Any] =14
SCREAMING_SNAKE_CASE_: Any =1280
SCREAMING_SNAKE_CASE_: Dict =5120
SCREAMING_SNAKE_CASE_: Optional[int] =32
SCREAMING_SNAKE_CASE_: Optional[Any] =16
SCREAMING_SNAKE_CASE_: Tuple =ViTMAEForPreTraining(lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =torch.hub.load_state_dict_from_url(lowercase , map_location="""cpu""" )["""model"""]
SCREAMING_SNAKE_CASE_: Optional[Any] =ViTMAEImageProcessor(size=config.image_size )
SCREAMING_SNAKE_CASE_: str =convert_state_dict(lowercase , lowercase )
model.load_state_dict(lowercase )
model.eval()
SCREAMING_SNAKE_CASE_: Tuple ="""https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"""
SCREAMING_SNAKE_CASE_: List[Any] =Image.open(requests.get(lowercase , stream=lowercase ).raw )
SCREAMING_SNAKE_CASE_: int =ViTMAEImageProcessor(size=config.image_size )
SCREAMING_SNAKE_CASE_: int =image_processor(images=lowercase , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE_: Optional[Any] =model(**lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =outputs.logits
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE_: Dict =torch.tensor(
[[-0.7_309, -0.7_128, -1.0_169], [-1.0_161, -0.9_058, -1.1_878], [-1.0_478, -0.9_411, -1.1_911]] )
elif "huge" in checkpoint_url:
SCREAMING_SNAKE_CASE_: Tuple =torch.tensor(
[[-1.1_599, -0.9_199, -1.2_221], [-1.1_952, -0.9_269, -1.2_307], [-1.2_143, -0.9_337, -1.2_262]] )
else:
SCREAMING_SNAKE_CASE_: Any =torch.tensor(
[[-0.9_192, -0.8_481, -1.1_259], [-1.1_349, -1.0_034, -1.2_599], [-1.1_757, -1.0_429, -1.2_726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , lowercase , atol=1e-4 )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_UpperCAmelCase = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 36
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""Salesforce/codegen-350M-nl""": """https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json""",
"""Salesforce/codegen-350M-multi""": """https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json""",
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json""",
"""Salesforce/codegen-2B-nl""": """https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json""",
"""Salesforce/codegen-2B-multi""": """https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json""",
"""Salesforce/codegen-2B-mono""": """https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json""",
"""Salesforce/codegen-6B-nl""": """https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json""",
"""Salesforce/codegen-6B-multi""": """https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json""",
"""Salesforce/codegen-6B-mono""": """https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json""",
"""Salesforce/codegen-16B-nl""": """https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json""",
"""Salesforce/codegen-16B-multi""": """https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json""",
"""Salesforce/codegen-16B-mono""": """https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json""",
}
class a ( UpperCAmelCase__ ):
UpperCamelCase : Optional[int] = 'codegen'
UpperCamelCase : List[str] = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : List[Any] , lowerCAmelCase : Union[str, Any]=5_0400 , lowerCAmelCase : Tuple=2048 , lowerCAmelCase : Dict=2048 , lowerCAmelCase : List[Any]=4096 , lowerCAmelCase : str=28 , lowerCAmelCase : Union[str, Any]=16 , lowerCAmelCase : Tuple=64 , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : List[str]="gelu_new" , lowerCAmelCase : int=0.0 , lowerCAmelCase : Optional[int]=0.0 , lowerCAmelCase : int=0.0 , lowerCAmelCase : int=1E-5 , lowerCAmelCase : Dict=0.0_2 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : List[str]=5_0256 , lowerCAmelCase : Dict=5_0256 , lowerCAmelCase : int=False , **lowerCAmelCase : str , ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =vocab_size
SCREAMING_SNAKE_CASE_: Any =n_ctx
SCREAMING_SNAKE_CASE_: str =n_positions
SCREAMING_SNAKE_CASE_: Any =n_embd
SCREAMING_SNAKE_CASE_: List[Any] =n_layer
SCREAMING_SNAKE_CASE_: Tuple =n_head
SCREAMING_SNAKE_CASE_: List[Any] =n_inner
SCREAMING_SNAKE_CASE_: List[Any] =rotary_dim
SCREAMING_SNAKE_CASE_: Tuple =activation_function
SCREAMING_SNAKE_CASE_: Any =resid_pdrop
SCREAMING_SNAKE_CASE_: List[str] =embd_pdrop
SCREAMING_SNAKE_CASE_: List[Any] =attn_pdrop
SCREAMING_SNAKE_CASE_: Optional[Any] =layer_norm_epsilon
SCREAMING_SNAKE_CASE_: Optional[Any] =initializer_range
SCREAMING_SNAKE_CASE_: Tuple =use_cache
SCREAMING_SNAKE_CASE_: Any =bos_token_id
SCREAMING_SNAKE_CASE_: str =eos_token_id
super().__init__(
bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , tie_word_embeddings=lowerCAmelCase , **lowerCAmelCase )
class a ( UpperCAmelCase__ ):
def __init__( self : Union[str, Any] , lowerCAmelCase : PretrainedConfig , lowerCAmelCase : str = "default" , lowerCAmelCase : List[PatchingSpec] = None , lowerCAmelCase : bool = False , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(lowerCAmelCase , task=lowerCAmelCase , patching_specs=lowerCAmelCase , use_past=lowerCAmelCase )
if not getattr(self._config , """pad_token_id""" , lowerCAmelCase ):
# TODO: how to do that better?
SCREAMING_SNAKE_CASE_: Dict =0
@property
def lowerCamelCase__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase , direction="""inputs""" )
SCREAMING_SNAKE_CASE_: Any ={0: """batch""", 1: """past_sequence + sequence"""}
else:
SCREAMING_SNAKE_CASE_: Optional[Any] ={0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowerCamelCase__ ( self : int ) -> int:
'''simple docstring'''
return self._config.n_layer
@property
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
return self._config.n_head
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : PreTrainedTokenizer , lowerCAmelCase : int = -1 , lowerCAmelCase : int = -1 , lowerCAmelCase : bool = False , lowerCAmelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =super(lowerCAmelCase , self ).generate_dummy_inputs(
lowerCAmelCase , batch_size=lowerCAmelCase , seq_length=lowerCAmelCase , is_pair=lowerCAmelCase , framework=lowerCAmelCase )
# We need to order the input in the way they appears in the forward()
SCREAMING_SNAKE_CASE_: int =OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int =common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE_: Dict =seqlen + 2
SCREAMING_SNAKE_CASE_: List[str] =(
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
SCREAMING_SNAKE_CASE_: Any =[
(torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) for _ in range(self.num_layers )
]
SCREAMING_SNAKE_CASE_: Dict =common_inputs["""attention_mask"""]
if self.use_past:
SCREAMING_SNAKE_CASE_: int =ordered_inputs["""attention_mask"""].dtype
SCREAMING_SNAKE_CASE_: Any =torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(lowerCAmelCase , lowerCAmelCase , dtype=lowerCAmelCase )] , dim=1 )
return ordered_inputs
@property
def lowerCamelCase__ ( self : Dict ) -> int:
'''simple docstring'''
return 13
| 36
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_UpperCAmelCase = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 36
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_UpperCAmelCase = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 36
|
"""simple docstring"""
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: int =False
while is_sorted is False: # Until all the indices are traversed keep looping
SCREAMING_SNAKE_CASE_: Tuple =True
for i in range(0 , len(lowercase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =input_list[i + 1], input_list[i]
# swapping if elements not in order
SCREAMING_SNAKE_CASE_: Tuple =False
for i in range(1 , len(lowercase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str =input_list[i + 1], input_list[i]
# swapping if elements not in order
SCREAMING_SNAKE_CASE_: str =False
return input_list
if __name__ == "__main__":
print("""Enter list to be sorted""")
_UpperCAmelCase = [int(x) for x in input().split()]
# inputing elements of the list in one line
_UpperCAmelCase = odd_even_sort(input_list)
print("""The sorted list is""")
print(sorted_list)
| 36
| 1
|
"""simple docstring"""
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
_UpperCAmelCase = """\
Text data.
Second line of data."""
_UpperCAmelCase = """file"""
@pytest.fixture(scope="""session""" )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Tuple =tmp_path_factory.mktemp("""data""" ) / (FILE_PATH + """.zstd""")
SCREAMING_SNAKE_CASE_: Optional[int] =bytes(lowercase , """utf-8""" )
with zstd.open(lowercase , """wb""" ) as f:
f.write(lowercase )
return path
@pytest.fixture
def __magic_name__ ( lowercase ):
with open(os.path.join(tmpfs.local_root_dir , lowercase ) , """w""" ) as f:
f.write(lowercase )
return FILE_PATH
@pytest.mark.parametrize("""compression_format""" , ["""gzip""", """xz""", """zstd"""] )
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Dict ={"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path}
SCREAMING_SNAKE_CASE_: str =input_paths[compression_format]
SCREAMING_SNAKE_CASE_: Union[str, Any] =tmp_path / """cache"""
SCREAMING_SNAKE_CASE_: Dict =DownloadConfig(cache_dir=lowercase , extract_compressed_file=lowercase )
SCREAMING_SNAKE_CASE_: List[str] =cached_path(lowercase , download_config=lowercase )
with open(lowercase ) as f:
SCREAMING_SNAKE_CASE_: Union[str, Any] =f.read()
with open(lowercase ) as f:
SCREAMING_SNAKE_CASE_: int =f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("""default_extracted""" , [True, False] )
@pytest.mark.parametrize("""default_cache_dir""" , [True, False] )
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""custom_cache"""
SCREAMING_SNAKE_CASE_: Optional[Any] ="""custom_extracted_dir"""
SCREAMING_SNAKE_CASE_: Tuple =tmp_path / """custom_extracted_path"""
if default_extracted:
SCREAMING_SNAKE_CASE_: Optional[Any] =("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""")
else:
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_DIR""" , lowercase )
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(lowercase ) )
SCREAMING_SNAKE_CASE_: str =custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
SCREAMING_SNAKE_CASE_: str =xz_file
SCREAMING_SNAKE_CASE_: Dict =(
DownloadConfig(extract_compressed_file=lowercase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=lowercase )
)
SCREAMING_SNAKE_CASE_: int =cached_path(lowercase , download_config=lowercase )
assert Path(lowercase ).parent.parts[-2:] == expected
def __magic_name__ ( lowercase ):
# absolute path
SCREAMING_SNAKE_CASE_: Tuple =str(Path(lowercase ).resolve() )
assert cached_path(lowercase ) == text_file
# relative path
SCREAMING_SNAKE_CASE_: Optional[int] =str(Path(lowercase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(lowercase ) == text_file
def __magic_name__ ( lowercase ):
# absolute path
SCREAMING_SNAKE_CASE_: str =str(tmp_path.resolve() / """__missing_file__.txt""" )
with pytest.raises(lowercase ):
cached_path(lowercase )
# relative path
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""./__missing_file__.txt"""
with pytest.raises(lowercase ):
cached_path(lowercase )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =get_from_cache(f'''tmp://{tmpfs_file}''' )
with open(lowercase ) as f:
SCREAMING_SNAKE_CASE_: int =f.read()
assert output_file_content == FILE_CONTENT
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , lowercase )
def __magic_name__ ( ):
with pytest.raises(lowercase ):
cached_path("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , lowercase )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: str =tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(lowercase ):
http_get("""https://huggingface.co""" , temp_file=lowercase )
with pytest.raises(lowercase ):
http_head("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , lowercase )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any =tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(lowercase ):
ftp_get("""ftp://huggingface.co""" , temp_file=lowercase )
with pytest.raises(lowercase ):
ftp_head("""ftp://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , lowercase )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(lowercase ):
fsspec_get("""s3://huggingface.co""" , temp_file=lowercase )
with pytest.raises(lowercase ):
fsspec_head("""s3://huggingface.co""" )
| 36
|
"""simple docstring"""
def __magic_name__ ( lowercase ):
return str(lowercase ) == str(lowercase )[::-1]
def __magic_name__ ( lowercase ):
return int(lowercase ) + int(str(lowercase )[::-1] )
def __magic_name__ ( lowercase = 1_0000 ):
SCREAMING_SNAKE_CASE_: List[str] =[]
for num in range(1 , lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =0
SCREAMING_SNAKE_CASE_: int =num
while iterations < 50:
SCREAMING_SNAKE_CASE_: Optional[Any] =sum_reverse(lowercase )
iterations += 1
if is_palindrome(lowercase ):
break
else:
lychrel_nums.append(lowercase )
return len(lowercase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 36
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_UpperCAmelCase = {"""configuration_deit""": ["""DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DeiTConfig""", """DeiTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""DeiTFeatureExtractor"""]
_UpperCAmelCase = ["""DeiTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DeiTForImageClassification""",
"""DeiTForImageClassificationWithTeacher""",
"""DeiTForMaskedImageModeling""",
"""DeiTModel""",
"""DeiTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDeiTForImageClassification""",
"""TFDeiTForImageClassificationWithTeacher""",
"""TFDeiTForMaskedImageModeling""",
"""TFDeiTModel""",
"""TFDeiTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 36
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
_UpperCAmelCase = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""DPTFeatureExtractor"""]
_UpperCAmelCase = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 36
| 1
|
"""simple docstring"""
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def __magic_name__ ( ):
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(lowercase ):
requests.request("""GET""" , """https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 )
@pytest.mark.integration
def __magic_name__ ( ):
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" , """https://huggingface.co""" )
def __magic_name__ ( ):
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(lowercase ):
http_head("""https://huggingface.co""" )
| 36
|
"""simple docstring"""
from __future__ import annotations
import math
import random
from typing import Any
class a :
def __init__( self : str ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: list[Any] =[]
SCREAMING_SNAKE_CASE_: int =0
SCREAMING_SNAKE_CASE_: int =0
def lowerCamelCase__ ( self : Optional[Any] ) -> bool:
'''simple docstring'''
return self.head == self.tail
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
self.data.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =self.tail + 1
def lowerCamelCase__ ( self : int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self.data[self.head]
SCREAMING_SNAKE_CASE_: Optional[int] =self.head + 1
return ret
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
return self.tail - self.head
def lowerCamelCase__ ( self : str ) -> None:
'''simple docstring'''
print(self.data )
print("""**************""" )
print(self.data[self.head : self.tail] )
class a :
def __init__( self : Union[str, Any] , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =data
SCREAMING_SNAKE_CASE_: MyNode | None =None
SCREAMING_SNAKE_CASE_: MyNode | None =None
SCREAMING_SNAKE_CASE_: int =1
def lowerCamelCase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
return self.data
def lowerCamelCase__ ( self : List[Any] ) -> MyNode | None:
'''simple docstring'''
return self.left
def lowerCamelCase__ ( self : Dict ) -> MyNode | None:
'''simple docstring'''
return self.right
def lowerCamelCase__ ( self : Any ) -> int:
'''simple docstring'''
return self.height
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =data
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : MyNode | None ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =node
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : MyNode | None ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =node
def lowerCamelCase__ ( self : int , lowerCAmelCase : int ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =height
def __magic_name__ ( lowercase ):
if node is None:
return 0
return node.get_height()
def __magic_name__ ( lowercase , lowercase ):
if a > b:
return a
return b
def __magic_name__ ( lowercase ):
print("""left rotation node:""" , node.get_data() )
SCREAMING_SNAKE_CASE_: List[Any] =node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(lowercase )
SCREAMING_SNAKE_CASE_: Tuple =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowercase )
SCREAMING_SNAKE_CASE_: int =my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowercase )
return ret
def __magic_name__ ( lowercase ):
print("""right rotation node:""" , node.get_data() )
SCREAMING_SNAKE_CASE_: List[Any] =node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(lowercase )
SCREAMING_SNAKE_CASE_: List[Any] =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowercase )
return ret
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any =node.get_left()
assert left_child is not None
node.set_left(left_rotation(lowercase ) )
return right_rotation(lowercase )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Dict =node.get_right()
assert right_child is not None
node.set_right(right_rotation(lowercase ) )
return left_rotation(lowercase )
def __magic_name__ ( lowercase , lowercase ):
if node is None:
return MyNode(lowercase )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , lowercase ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
SCREAMING_SNAKE_CASE_: Union[str, Any] =node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
SCREAMING_SNAKE_CASE_: Any =right_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: List[Any] =lr_rotation(lowercase )
else:
node.set_right(insert_node(node.get_right() , lowercase ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
SCREAMING_SNAKE_CASE_: Tuple =node.get_right()
assert right_child is not None
if data < right_child.get_data():
SCREAMING_SNAKE_CASE_: Optional[Any] =rl_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: Optional[int] =left_rotation(lowercase )
SCREAMING_SNAKE_CASE_: Tuple =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowercase )
return node
def __magic_name__ ( lowercase ):
while True:
SCREAMING_SNAKE_CASE_: Dict =root.get_right()
if right_child is None:
break
SCREAMING_SNAKE_CASE_: str =right_child
return root.get_data()
def __magic_name__ ( lowercase ):
while True:
SCREAMING_SNAKE_CASE_: str =root.get_left()
if left_child is None:
break
SCREAMING_SNAKE_CASE_: Dict =left_child
return root.get_data()
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: str =root.get_left()
SCREAMING_SNAKE_CASE_: List[Any] =root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
SCREAMING_SNAKE_CASE_: Union[str, Any] =get_left_most(lowercase )
root.set_data(lowercase )
root.set_right(del_node(lowercase , lowercase ) )
elif left_child is not None:
SCREAMING_SNAKE_CASE_: Optional[int] =left_child
elif right_child is not None:
SCREAMING_SNAKE_CASE_: Any =right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("""No such data""" )
return root
else:
root.set_left(del_node(lowercase , lowercase ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(lowercase , lowercase ) )
if get_height(lowercase ) - get_height(lowercase ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
SCREAMING_SNAKE_CASE_: Tuple =left_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =rl_rotation(lowercase )
elif get_height(lowercase ) - get_height(lowercase ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
SCREAMING_SNAKE_CASE_: Optional[Any] =right_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: str =lr_rotation(lowercase )
SCREAMING_SNAKE_CASE_: str =my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(lowercase )
return root
class a :
def __init__( self : int ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: MyNode | None =None
def lowerCamelCase__ ( self : List[Any] ) -> int:
'''simple docstring'''
return get_height(self.root )
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
print("""insert:""" + str(lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: Tuple =insert_node(self.root , lowerCAmelCase )
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
print("""delete:""" + str(lowerCAmelCase ) )
if self.root is None:
print("""Tree is empty!""" )
return
SCREAMING_SNAKE_CASE_: Union[str, Any] =del_node(self.root , lowerCAmelCase )
def __str__( self : List[str] , ) -> str: # a level traversale, gives a more intuitive look on the tree
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =""""""
SCREAMING_SNAKE_CASE_: str =MyQueue()
q.push(self.root )
SCREAMING_SNAKE_CASE_: List[str] =self.get_height()
if layer == 0:
return output
SCREAMING_SNAKE_CASE_: int =0
while not q.is_empty():
SCREAMING_SNAKE_CASE_: int =q.pop()
SCREAMING_SNAKE_CASE_: List[Any] =""" """ * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(lowerCAmelCase )
q.push(lowerCAmelCase )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
SCREAMING_SNAKE_CASE_: List[Any] =cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , lowerCAmelCase ) - 1:
SCREAMING_SNAKE_CASE_: int =layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def __magic_name__ ( ):
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
_UpperCAmelCase = AVLtree()
_UpperCAmelCase = list(range(1_0))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 36
| 1
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""post_extract_proj""": """feature_projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.upsample.0""": """encoder.upsample.projection""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase , lowercase ):
for attribute in key.split(""".""" ):
SCREAMING_SNAKE_CASE_: int =getattr(lowercase , lowercase )
if weight_type is not None:
SCREAMING_SNAKE_CASE_: Union[str, Any] =getattr(lowercase , lowercase ).shape
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
SCREAMING_SNAKE_CASE_: Any =value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE_: List[Any] =value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE_: Any =value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE_: Optional[Any] =value
else:
SCREAMING_SNAKE_CASE_: List[str] =value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Dict =[]
SCREAMING_SNAKE_CASE_: Any =fairseq_model.state_dict()
SCREAMING_SNAKE_CASE_: List[str] =hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE_: Union[str, Any] =False
if "conv_layers" in name:
load_conv_layer(
lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == """group""" , )
SCREAMING_SNAKE_CASE_: List[str] =True
else:
for key, mapped_key in MAPPING.items():
SCREAMING_SNAKE_CASE_: str ="""sew.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
SCREAMING_SNAKE_CASE_: Union[str, Any] =True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE_: Optional[Any] =name.split(lowercase )[0].split(""".""" )[-2]
SCREAMING_SNAKE_CASE_: int =mapped_key.replace("""*""" , lowercase )
if "weight_g" in name:
SCREAMING_SNAKE_CASE_: Optional[int] ="""weight_g"""
elif "weight_v" in name:
SCREAMING_SNAKE_CASE_: List[Any] ="""weight_v"""
elif "weight" in name:
SCREAMING_SNAKE_CASE_: List[str] ="""weight"""
elif "bias" in name:
SCREAMING_SNAKE_CASE_: Optional[int] ="""bias"""
else:
SCREAMING_SNAKE_CASE_: List[Any] =None
set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase )
continue
if not is_used:
unused_weights.append(lowercase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Tuple =full_name.split("""conv_layers.""" )[-1]
SCREAMING_SNAKE_CASE_: Optional[Any] =name.split(""".""" )
SCREAMING_SNAKE_CASE_: int =int(items[0] )
SCREAMING_SNAKE_CASE_: Dict =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
SCREAMING_SNAKE_CASE_: List[Any] =value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
SCREAMING_SNAKE_CASE_: Any =value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
SCREAMING_SNAKE_CASE_: Any =value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
SCREAMING_SNAKE_CASE_: Optional[int] =value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowercase )
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Dict =SEWConfig()
if is_finetuned:
SCREAMING_SNAKE_CASE_: Union[str, Any] =model.wav_encoder.wav_model.cfg
else:
SCREAMING_SNAKE_CASE_: Tuple =model.cfg
SCREAMING_SNAKE_CASE_: List[str] =fs_config.conv_bias
SCREAMING_SNAKE_CASE_: List[Any] =eval(fs_config.conv_feature_layers )
SCREAMING_SNAKE_CASE_: Optional[Any] =[x[0] for x in conv_layers]
SCREAMING_SNAKE_CASE_: Dict =[x[1] for x in conv_layers]
SCREAMING_SNAKE_CASE_: List[str] =[x[2] for x in conv_layers]
SCREAMING_SNAKE_CASE_: str ="""gelu"""
SCREAMING_SNAKE_CASE_: Tuple ="""layer""" if fs_config.extractor_mode == """layer_norm""" else """group"""
SCREAMING_SNAKE_CASE_: int =0.0
SCREAMING_SNAKE_CASE_: Union[str, Any] =fs_config.activation_fn.name
SCREAMING_SNAKE_CASE_: str =fs_config.encoder_embed_dim
SCREAMING_SNAKE_CASE_: Union[str, Any] =0.02
SCREAMING_SNAKE_CASE_: str =fs_config.encoder_ffn_embed_dim
SCREAMING_SNAKE_CASE_: List[str] =1e-5
SCREAMING_SNAKE_CASE_: List[Any] =fs_config.encoder_layerdrop
SCREAMING_SNAKE_CASE_: Union[str, Any] =fs_config.encoder_attention_heads
SCREAMING_SNAKE_CASE_: Any =fs_config.conv_pos_groups
SCREAMING_SNAKE_CASE_: Optional[Any] =fs_config.conv_pos
SCREAMING_SNAKE_CASE_: List[str] =len(lowercase )
SCREAMING_SNAKE_CASE_: int =fs_config.encoder_layers
SCREAMING_SNAKE_CASE_: List[Any] =fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
SCREAMING_SNAKE_CASE_: int =model.cfg
SCREAMING_SNAKE_CASE_: Union[str, Any] =fs_config.final_dropout
SCREAMING_SNAKE_CASE_: Tuple =fs_config.layerdrop
SCREAMING_SNAKE_CASE_: Dict =fs_config.activation_dropout
SCREAMING_SNAKE_CASE_: Union[str, Any] =fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
SCREAMING_SNAKE_CASE_: str =fs_config.attention_dropout
SCREAMING_SNAKE_CASE_: Any =fs_config.dropout_input
SCREAMING_SNAKE_CASE_: Dict =fs_config.dropout
SCREAMING_SNAKE_CASE_: str =fs_config.mask_channel_length
SCREAMING_SNAKE_CASE_: Any =fs_config.mask_channel_prob
SCREAMING_SNAKE_CASE_: List[Any] =fs_config.mask_length
SCREAMING_SNAKE_CASE_: Dict =fs_config.mask_prob
SCREAMING_SNAKE_CASE_: List[str] ="""Wav2Vec2FeatureExtractor"""
SCREAMING_SNAKE_CASE_: Tuple ="""Wav2Vec2CTCTokenizer"""
return config
@torch.no_grad()
def __magic_name__ ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=True ):
if is_finetuned:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
SCREAMING_SNAKE_CASE_: int =SEWConfig.from_pretrained(lowercase )
else:
SCREAMING_SNAKE_CASE_: Dict =convert_config(model[0] , lowercase )
SCREAMING_SNAKE_CASE_: Any =model[0].eval()
SCREAMING_SNAKE_CASE_: Optional[int] =True if config.feat_extract_norm == """layer""" else False
SCREAMING_SNAKE_CASE_: Optional[int] =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=lowercase , return_attention_mask=lowercase , )
if is_finetuned:
if dict_path:
SCREAMING_SNAKE_CASE_: int =Dictionary.load(lowercase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
SCREAMING_SNAKE_CASE_: Optional[Any] =target_dict.pad_index
SCREAMING_SNAKE_CASE_: Any =target_dict.bos_index
SCREAMING_SNAKE_CASE_: List[str] =target_dict.pad_index
SCREAMING_SNAKE_CASE_: List[str] =target_dict.bos_index
SCREAMING_SNAKE_CASE_: str =target_dict.eos_index
SCREAMING_SNAKE_CASE_: Optional[Any] =len(target_dict.symbols )
SCREAMING_SNAKE_CASE_: int =os.path.join(lowercase , """vocab.json""" )
if not os.path.isdir(lowercase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowercase ) )
return
os.makedirs(lowercase , exist_ok=lowercase )
with open(lowercase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , lowercase )
SCREAMING_SNAKE_CASE_: Tuple =WavaVecaCTCTokenizer(
lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowercase , )
SCREAMING_SNAKE_CASE_: Any =WavaVecaProcessor(feature_extractor=lowercase , tokenizer=lowercase )
processor.save_pretrained(lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =SEWForCTC(lowercase )
else:
SCREAMING_SNAKE_CASE_: int =SEWModel(lowercase )
feature_extractor.save_pretrained(lowercase )
recursively_load_weights(lowercase , lowercase , lowercase )
hf_model.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
_UpperCAmelCase = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 36
|
"""simple docstring"""
import string
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =""""""
for i in sequence:
SCREAMING_SNAKE_CASE_: List[Any] =ord(lowercase )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any =string.ascii_letters
SCREAMING_SNAKE_CASE_: Tuple =string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(lowercase )] if c in letters else c for c in sequence )
def __magic_name__ ( ):
from timeit import timeit
print("""Running performance benchmarks...""" )
SCREAMING_SNAKE_CASE_: int ="""from string import printable ; from __main__ import atbash, atbash_slow"""
print(f'''> atbash_slow(): {timeit("atbash_slow(printable)" , setup=lowercase )} seconds''' )
print(f'''> atbash(): {timeit("atbash(printable)" , setup=lowercase )} seconds''' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(f"""{example} encrypted in atbash: {atbash(example)}""")
benchmark()
| 36
| 1
|
"""simple docstring"""
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: int =""""""
for word_or_phrase in separated:
if not isinstance(lowercase , lowercase ):
raise Exception("""join() accepts only strings to be joined""" )
joined += word_or_phrase + separator
return joined.strip(lowercase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 36
|
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class a :
def __init__( self : Union[str, Any] , lowerCAmelCase : List[str]=2 , lowerCAmelCase : int=3 , lowerCAmelCase : Optional[Any]=64 , lowerCAmelCase : Union[str, Any]=None ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =np.random.default_rng(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =length
SCREAMING_SNAKE_CASE_: Union[str, Any] =rng.normal(size=(length,) ).astype(np.floataa )
SCREAMING_SNAKE_CASE_: Tuple =a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : List[Any] ) -> str:
'''simple docstring'''
return self.length
def __getitem__( self : Union[str, Any] , lowerCAmelCase : Any ) -> List[str]:
'''simple docstring'''
return {"x": self.x[i], "y": self.y[i]}
class a ( torch.nn.Module ):
def __init__( self : Optional[int] , lowerCAmelCase : str=0 , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : Optional[int]=False ) -> Tuple:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_: int =torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE_: int =torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE_: Dict =True
def lowerCamelCase__ ( self : str , lowerCAmelCase : Tuple=None ) -> int:
'''simple docstring'''
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
SCREAMING_SNAKE_CASE_: Union[str, Any] =False
return x * self.a[0] + self.b[0]
class a ( torch.nn.Module ):
def __init__( self : Union[str, Any] , lowerCAmelCase : Any=0 , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : List[Any]=False ) -> str:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_: List[str] =torch.nn.Parameter(torch.tensor(lowerCAmelCase ).float() )
SCREAMING_SNAKE_CASE_: Optional[int] =torch.nn.Parameter(torch.tensor(lowerCAmelCase ).float() )
SCREAMING_SNAKE_CASE_: List[Any] =True
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : int=None ) -> Any:
'''simple docstring'''
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
SCREAMING_SNAKE_CASE_: Optional[int] =False
return x * self.a + self.b
def __magic_name__ ( lowercase , lowercase = 16 ):
from datasets import load_dataset
from transformers import AutoTokenizer
SCREAMING_SNAKE_CASE_: Optional[Any] =AutoTokenizer.from_pretrained("""bert-base-cased""" )
SCREAMING_SNAKE_CASE_: Optional[int] ={"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""}
SCREAMING_SNAKE_CASE_: Any =load_dataset("""csv""" , data_files=lowercase )
SCREAMING_SNAKE_CASE_: Any =datasets["""train"""].unique("""label""" )
SCREAMING_SNAKE_CASE_: List[Any] ={v: i for i, v in enumerate(lowercase )}
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_: Dict =tokenizer(
examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase , max_length=lowercase , padding="""max_length""" )
if "label" in examples:
SCREAMING_SNAKE_CASE_: Optional[int] =[label_to_id[l] for l in examples["""label"""]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE_: List[Any] =datasets.map(
lowercase , batched=lowercase , remove_columns=["""sentence1""", """sentence2""", """label"""] , )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(lowercase , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_: Optional[int] =DataLoader(tokenized_datasets["""train"""] , shuffle=lowercase , collate_fn=lowercase , batch_size=2 )
SCREAMING_SNAKE_CASE_: Dict =DataLoader(tokenized_datasets["""validation"""] , shuffle=lowercase , collate_fn=lowercase , batch_size=1 )
return train_dataloader, eval_dataloader
| 36
| 1
|
"""simple docstring"""
import math
def __magic_name__ ( lowercase , lowercase ):
if initial_intensity < 0:
raise ValueError("""The value of intensity cannot be negative""" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(lowercase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="""malus_law""")
| 36
|
"""simple docstring"""
def __magic_name__ ( lowercase ):
if upper_limit < 0:
raise ValueError("""Limit for the Catalan sequence must be ≥ 0""" )
SCREAMING_SNAKE_CASE_: Tuple =[0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
SCREAMING_SNAKE_CASE_: Any =1
if upper_limit > 0:
SCREAMING_SNAKE_CASE_: List[str] =1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(lowercase ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""")
print("""\n*** Enter -1 at any time to quit ***""")
print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""")
try:
while True:
_UpperCAmelCase = int(input().strip())
if N < 0:
print("""\n********* Goodbye!! ************""")
break
else:
print(f"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print("""Try another upper limit for the sequence: """, end="""""")
except (NameError, ValueError):
print("""\n********* Invalid input, goodbye! ************\n""")
import doctest
doctest.testmod()
| 36
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""microsoft/markuplm-base""": """https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json""",
"""microsoft/markuplm-large""": """https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json""",
}
class a ( UpperCAmelCase__ ):
UpperCamelCase : Union[str, Any] = 'markuplm'
def __init__( self : str , lowerCAmelCase : int=3_0522 , lowerCAmelCase : Optional[Any]=768 , lowerCAmelCase : Optional[int]=12 , lowerCAmelCase : Union[str, Any]=12 , lowerCAmelCase : int=3072 , lowerCAmelCase : Dict="gelu" , lowerCAmelCase : Optional[Any]=0.1 , lowerCAmelCase : str=0.1 , lowerCAmelCase : List[Any]=512 , lowerCAmelCase : int=2 , lowerCAmelCase : Optional[Any]=0.0_2 , lowerCAmelCase : Union[str, Any]=1E-12 , lowerCAmelCase : Tuple=0 , lowerCAmelCase : Any=0 , lowerCAmelCase : Dict=2 , lowerCAmelCase : List[Any]=256 , lowerCAmelCase : Any=1024 , lowerCAmelCase : str=216 , lowerCAmelCase : str=1001 , lowerCAmelCase : Optional[int]=32 , lowerCAmelCase : Tuple=50 , lowerCAmelCase : List[str]="absolute" , lowerCAmelCase : Any=True , lowerCAmelCase : Optional[int]=None , **lowerCAmelCase : Union[str, Any] , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase , )
SCREAMING_SNAKE_CASE_: Optional[int] =vocab_size
SCREAMING_SNAKE_CASE_: Optional[Any] =hidden_size
SCREAMING_SNAKE_CASE_: List[str] =num_hidden_layers
SCREAMING_SNAKE_CASE_: Optional[int] =num_attention_heads
SCREAMING_SNAKE_CASE_: List[str] =hidden_act
SCREAMING_SNAKE_CASE_: Union[str, Any] =intermediate_size
SCREAMING_SNAKE_CASE_: Dict =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Union[str, Any] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: str =max_position_embeddings
SCREAMING_SNAKE_CASE_: Optional[Any] =type_vocab_size
SCREAMING_SNAKE_CASE_: Tuple =initializer_range
SCREAMING_SNAKE_CASE_: str =layer_norm_eps
SCREAMING_SNAKE_CASE_: Any =position_embedding_type
SCREAMING_SNAKE_CASE_: Optional[Any] =use_cache
SCREAMING_SNAKE_CASE_: List[Any] =classifier_dropout
# additional properties
SCREAMING_SNAKE_CASE_: List[str] =max_depth
SCREAMING_SNAKE_CASE_: Optional[int] =max_xpath_tag_unit_embeddings
SCREAMING_SNAKE_CASE_: Any =max_xpath_subs_unit_embeddings
SCREAMING_SNAKE_CASE_: Tuple =tag_pad_id
SCREAMING_SNAKE_CASE_: int =subs_pad_id
SCREAMING_SNAKE_CASE_: Any =xpath_unit_hidden_size
| 36
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
_UpperCAmelCase = {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""",
}
class a ( UpperCAmelCase__ ):
UpperCamelCase : Any = 'albert'
def __init__( self : Dict , lowerCAmelCase : List[str]=3_0000 , lowerCAmelCase : List[Any]=128 , lowerCAmelCase : List[str]=4096 , lowerCAmelCase : str=12 , lowerCAmelCase : str=1 , lowerCAmelCase : Tuple=64 , lowerCAmelCase : Dict=1_6384 , lowerCAmelCase : int=1 , lowerCAmelCase : str="gelu_new" , lowerCAmelCase : Dict=0 , lowerCAmelCase : Optional[Any]=0 , lowerCAmelCase : str=512 , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : List[Any]=0.0_2 , lowerCAmelCase : Union[str, Any]=1E-12 , lowerCAmelCase : Tuple=0.1 , lowerCAmelCase : List[Any]="absolute" , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : int=2 , lowerCAmelCase : Optional[int]=3 , **lowerCAmelCase : int , ) -> Tuple:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =vocab_size
SCREAMING_SNAKE_CASE_: Optional[int] =embedding_size
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_size
SCREAMING_SNAKE_CASE_: Tuple =num_hidden_layers
SCREAMING_SNAKE_CASE_: Any =num_hidden_groups
SCREAMING_SNAKE_CASE_: List[Any] =num_attention_heads
SCREAMING_SNAKE_CASE_: List[Any] =inner_group_num
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_act
SCREAMING_SNAKE_CASE_: int =intermediate_size
SCREAMING_SNAKE_CASE_: Any =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Union[str, Any] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: int =max_position_embeddings
SCREAMING_SNAKE_CASE_: Any =type_vocab_size
SCREAMING_SNAKE_CASE_: int =initializer_range
SCREAMING_SNAKE_CASE_: List[Any] =layer_norm_eps
SCREAMING_SNAKE_CASE_: Dict =classifier_dropout_prob
SCREAMING_SNAKE_CASE_: int =position_embedding_type
class a ( UpperCAmelCase__ ):
@property
def lowerCamelCase__ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_: str ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
SCREAMING_SNAKE_CASE_: Dict ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 36
| 1
|
"""simple docstring"""
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
_UpperCAmelCase = re.compile(r"""^(?P<major>\d+)""" r"""\.(?P<minor>\d+)""" r"""\.(?P<patch>\d+)$""")
@total_ordering
@dataclass
class a :
UpperCamelCase : Tuple = 4_2
UpperCamelCase : Optional[int] = None
UpperCamelCase : Any = None
UpperCamelCase : Optional[int] = None
UpperCamelCase : List[Any] = None
def lowerCamelCase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =_str_to_version_tuple(self.version_str )
def __repr__( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return f'''{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'''
@property
def lowerCamelCase__ ( self : int ) -> Any:
'''simple docstring'''
return self.major, self.minor, self.patch
def lowerCamelCase__ ( self : Any , lowerCAmelCase : List[str] ) -> Dict:
'''simple docstring'''
if isinstance(A__ , A__ ):
return Version(A__ )
elif isinstance(A__ , A__ ):
return other
raise TypeError(f'''{other} (type {type(A__ )}) cannot be compared to version.''' )
def __eq__( self : List[str] , lowerCAmelCase : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
try:
SCREAMING_SNAKE_CASE_: List[Any] =self._validate_operand(A__ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : str , lowerCAmelCase : List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =self._validate_operand(A__ )
return self.tuple < other.tuple
def __hash__( self : Tuple ) -> List[str]:
'''simple docstring'''
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def lowerCamelCase__ ( cls : Optional[Any] , lowerCAmelCase : str ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int ={f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def lowerCamelCase__ ( self : List[Any] ) -> str:
'''simple docstring'''
return self.version_str
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: int =_VERSION_REG.match(SCREAMING_SNAKE_CASE_ )
if not res:
raise ValueError(f'''Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.''' )
return tuple(int(SCREAMING_SNAKE_CASE_ ) for v in [res.group("""major""" ), res.group("""minor""" ), res.group("""patch""" )] )
def __magic_name__ ( lowercase ):
return ".".join(str(SCREAMING_SNAKE_CASE_ ) for v in version_tuple )
| 700
|
"""simple docstring"""
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class a ( yaml.SafeLoader ):
def lowerCamelCase__ ( self : int , lowerCAmelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =[self.constructed_objects[key_node] for key_node, _ in node.value]
SCREAMING_SNAKE_CASE_: Any =[tuple(lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else key for key in keys]
SCREAMING_SNAKE_CASE_: Dict =Counter(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =[key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f'''Got duplicate yaml keys: {duplicate_keys}''' )
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : str , lowerCAmelCase : Optional[int]=False ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =super().construct_mapping(lowerCAmelCase , deep=lowerCAmelCase )
self._check_no_duplicates_on_constructed_node(lowerCAmelCase )
return mapping
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Dict =list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
SCREAMING_SNAKE_CASE_: Union[str, Any] =full_content[1:].index("""---""" ) + 1
SCREAMING_SNAKE_CASE_: List[str] ="""\n""".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(lowercase )
class a ( UpperCAmelCase__ ):
# class attributes
UpperCamelCase : Tuple = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def lowerCamelCase__ ( cls : List[Any] , lowerCAmelCase : Path ) -> "DatasetMetadata":
'''simple docstring'''
with open(lowerCAmelCase , encoding="""utf-8""" ) as readme_file:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] =_split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(lowerCAmelCase )
else:
return cls()
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Path ) -> List[str]:
'''simple docstring'''
if path.exists():
with open(lowerCAmelCase , encoding="""utf-8""" ) as readme_file:
SCREAMING_SNAKE_CASE_: str =readme_file.read()
else:
SCREAMING_SNAKE_CASE_: str =None
SCREAMING_SNAKE_CASE_: Tuple =self._to_readme(lowerCAmelCase )
with open(lowerCAmelCase , """w""" , encoding="""utf-8""" ) as readme_file:
readme_file.write(lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : Optional[str] = None ) -> str:
'''simple docstring'''
if readme_content is not None:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] =_split_yaml_from_readme(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] ="""---\n""" + self.to_yaml_string() + """---\n""" + content
else:
SCREAMING_SNAKE_CASE_: List[Any] ="""---\n""" + self.to_yaml_string() + """---\n"""
return full_content
@classmethod
def lowerCamelCase__ ( cls : Optional[int] , lowerCAmelCase : str ) -> "DatasetMetadata":
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =yaml.load(lowerCAmelCase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
SCREAMING_SNAKE_CASE_: List[Any] ={
(key.replace("""-""" , """_""" ) if key.replace("""-""" , """_""" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**lowerCAmelCase )
def lowerCamelCase__ ( self : Dict ) -> str:
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace("""_""" , """-""" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=lowerCAmelCase , allow_unicode=lowerCAmelCase , encoding="""utf-8""" , ).decode("""utf-8""" )
_UpperCAmelCase = {
"""image-classification""": [],
"""translation""": [],
"""image-segmentation""": [],
"""fill-mask""": [],
"""automatic-speech-recognition""": [],
"""token-classification""": [],
"""sentence-similarity""": [],
"""audio-classification""": [],
"""question-answering""": [],
"""summarization""": [],
"""zero-shot-classification""": [],
"""table-to-text""": [],
"""feature-extraction""": [],
"""other""": [],
"""multiple-choice""": [],
"""text-classification""": [],
"""text-to-image""": [],
"""text2text-generation""": [],
"""zero-shot-image-classification""": [],
"""tabular-classification""": [],
"""tabular-regression""": [],
"""image-to-image""": [],
"""tabular-to-text""": [],
"""unconditional-image-generation""": [],
"""text-retrieval""": [],
"""text-to-speech""": [],
"""object-detection""": [],
"""audio-to-audio""": [],
"""text-generation""": [],
"""conversational""": [],
"""table-question-answering""": [],
"""visual-question-answering""": [],
"""image-to-text""": [],
"""reinforcement-learning""": [],
"""voice-activity-detection""": [],
"""time-series-forecasting""": [],
"""document-question-answering""": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
_UpperCAmelCase = ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""")
ap.add_argument("""readme_filepath""")
_UpperCAmelCase = ap.parse_args()
_UpperCAmelCase = Path(args.readme_filepath)
_UpperCAmelCase = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 36
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
_UpperCAmelCase : Dict = {
"""alibaba-damo/mgp-str-base""": """https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json""",
}
class a ( _snake_case ):
UpperCamelCase : Union[str, Any] = 'mgp-str'
def __init__( self : Union[str, Any] , lowerCAmelCase : str=[32, 128] , lowerCAmelCase : int=4 , lowerCAmelCase : int=3 , lowerCAmelCase : List[str]=27 , lowerCAmelCase : int=38 , lowerCAmelCase : List[Any]=5_0257 , lowerCAmelCase : Any=3_0522 , lowerCAmelCase : Optional[int]=768 , lowerCAmelCase : Optional[int]=12 , lowerCAmelCase : Union[str, Any]=12 , lowerCAmelCase : Optional[int]=4.0 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Tuple=False , lowerCAmelCase : Tuple=1E-5 , lowerCAmelCase : Union[str, Any]=0.0 , lowerCAmelCase : Optional[int]=0.0 , lowerCAmelCase : int=0.0 , lowerCAmelCase : Dict=False , lowerCAmelCase : Optional[int]=0.0_2 , **lowerCAmelCase : str , ) -> Dict:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_: Optional[int] =image_size
SCREAMING_SNAKE_CASE_: Tuple =patch_size
SCREAMING_SNAKE_CASE_: str =num_channels
SCREAMING_SNAKE_CASE_: List[str] =max_token_length
SCREAMING_SNAKE_CASE_: Optional[int] =num_character_labels
SCREAMING_SNAKE_CASE_: List[str] =num_bpe_labels
SCREAMING_SNAKE_CASE_: Optional[int] =num_wordpiece_labels
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_size
SCREAMING_SNAKE_CASE_: Union[str, Any] =num_hidden_layers
SCREAMING_SNAKE_CASE_: int =num_attention_heads
SCREAMING_SNAKE_CASE_: Union[str, Any] =mlp_ratio
SCREAMING_SNAKE_CASE_: Optional[Any] =distilled
SCREAMING_SNAKE_CASE_: Optional[int] =layer_norm_eps
SCREAMING_SNAKE_CASE_: Optional[Any] =drop_rate
SCREAMING_SNAKE_CASE_: Optional[int] =qkv_bias
SCREAMING_SNAKE_CASE_: Optional[Any] =attn_drop_rate
SCREAMING_SNAKE_CASE_: List[Any] =drop_path_rate
SCREAMING_SNAKE_CASE_: Optional[int] =output_aa_attentions
SCREAMING_SNAKE_CASE_: Tuple =initializer_range
| 701
|
"""simple docstring"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def __magic_name__ ( lowercase ):
return (data["data"], data["target"])
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Dict =XGBClassifier()
classifier.fit(lowercase , lowercase )
return classifier
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Optional[Any] =load_iris()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =data_handling(lowercase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =train_test_split(
lowercase , lowercase , test_size=0.25 )
SCREAMING_SNAKE_CASE_: Tuple =iris["""target_names"""]
# Create an XGBoost Classifier from the training data
SCREAMING_SNAKE_CASE_: Optional[int] =xgboost(lowercase , lowercase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
lowercase , lowercase , lowercase , display_labels=lowercase , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 36
| 0
|
"""simple docstring"""
from collections.abc import Sequence
def __magic_name__ ( lowercase = None ):
if nums is None or not nums:
raise ValueError("""Input sequence should not be empty""" )
SCREAMING_SNAKE_CASE_: Optional[Any] =nums[0]
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
SCREAMING_SNAKE_CASE_: List[str] =nums[i]
SCREAMING_SNAKE_CASE_: Tuple =max(_SCREAMING_SNAKE_CASE , ans + num , _SCREAMING_SNAKE_CASE )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
_UpperCAmelCase = int(input("""Enter number of elements : """).strip())
_UpperCAmelCase = list(map(int, input("""\nEnter the numbers : """).strip().split()))[:n]
print(max_subsequence_sum(array))
| 702
|
"""simple docstring"""
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =[]
SCREAMING_SNAKE_CASE_: List[str] =[]
SCREAMING_SNAKE_CASE_: Any =[]
for rt in rc.restypes:
SCREAMING_SNAKE_CASE_: Optional[int] =rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
SCREAMING_SNAKE_CASE_: Any ={name: i for i, name in enumerate(lowercase )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =torch.tensor(
lowercase , dtype=torch.intaa , device=protein["""aatype"""].device , )
SCREAMING_SNAKE_CASE_: Optional[Any] =torch.tensor(
lowercase , dtype=torch.intaa , device=protein["""aatype"""].device , )
SCREAMING_SNAKE_CASE_: List[str] =torch.tensor(
lowercase , dtype=torch.floataa , device=protein["""aatype"""].device , )
SCREAMING_SNAKE_CASE_: Optional[Any] =protein["""aatype"""].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
SCREAMING_SNAKE_CASE_: List[str] =restype_atomaa_to_atomaa[protein_aatype]
SCREAMING_SNAKE_CASE_: Any =restype_atomaa_mask[protein_aatype]
SCREAMING_SNAKE_CASE_: Tuple =residx_atomaa_mask
SCREAMING_SNAKE_CASE_: Dict =residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
SCREAMING_SNAKE_CASE_: Dict =restype_atomaa_to_atomaa[protein_aatype]
SCREAMING_SNAKE_CASE_: Optional[int] =residx_atomaa_to_atomaa.long()
# create the corresponding mask
SCREAMING_SNAKE_CASE_: Optional[int] =torch.zeros([21, 37] , dtype=torch.floataa , device=protein["""aatype"""].device )
for restype, restype_letter in enumerate(rc.restypes ):
SCREAMING_SNAKE_CASE_: int =rc.restype_atoa[restype_letter]
SCREAMING_SNAKE_CASE_: Any =rc.residue_atoms[restype_name]
for atom_name in atom_names:
SCREAMING_SNAKE_CASE_: Optional[int] =rc.atom_order[atom_name]
SCREAMING_SNAKE_CASE_: Dict =1
SCREAMING_SNAKE_CASE_: List[str] =restype_atomaa_mask[protein_aatype]
SCREAMING_SNAKE_CASE_: List[Any] =residx_atomaa_mask
return protein
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any =tree_map(lambda lowercase : torch.tensor(lowercase , device=batch["""aatype"""].device ) , lowercase , np.ndarray )
SCREAMING_SNAKE_CASE_: int =tensor_tree_map(lambda lowercase : np.array(lowercase ) , make_atomaa_masks(lowercase ) )
return out
| 36
| 0
|
def __magic_name__ ( lowercase , lowercase ):
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
SCREAMING_SNAKE_CASE_: Dict =str(bin(lowercase ) )[2:] # remove the leading "0b"
SCREAMING_SNAKE_CASE_: Dict =str(bin(lowercase ) )[2:] # remove the leading "0b"
SCREAMING_SNAKE_CASE_: Union[str, Any] =max(len(lowercase ) , len(lowercase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(lowercase ) , b_binary.zfill(lowercase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703
|
"""simple docstring"""
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_UpperCAmelCase = ["""text""", """image""", """audio"""]
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: str =[]
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(lowercase , lowercase ):
inputs.append(create_inputs(lowercase ) )
else:
raise ValueError(f'''Invalid type requested: {input_type}''' )
return inputs
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: int =[]
for output in outputs:
if isinstance(lowercase , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(lowercase , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(lowercase , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(f'''Invalid output: {output}''' )
return output_types
@is_tool_test
class a :
def lowerCamelCase__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , """inputs""" ) )
self.assertTrue(hasattr(self.tool , """outputs""" ) )
SCREAMING_SNAKE_CASE_: Optional[int] =self.tool.inputs
for _input in inputs:
if isinstance(_input , lowerCAmelCase ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
SCREAMING_SNAKE_CASE_: Any =self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowerCamelCase__ ( self : str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_: List[Any] =self.tool(*lowerCAmelCase )
# There is a single output
if len(self.tool.outputs ) == 1:
SCREAMING_SNAKE_CASE_: str =[outputs]
self.assertListEqual(output_types(lowerCAmelCase ) , self.tool.outputs )
def lowerCamelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , """description""" ) )
self.assertTrue(hasattr(self.tool , """default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def lowerCamelCase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_: Tuple =self.tool(*lowerCAmelCase )
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: int =[outputs]
self.assertEqual(len(lowerCAmelCase ) , len(self.tool.outputs ) )
for output, output_type in zip(lowerCAmelCase , self.tool.outputs ):
SCREAMING_SNAKE_CASE_: int =AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(lowerCAmelCase , lowerCAmelCase ) )
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_: Union[str, Any] =[]
for _input, input_type in zip(lowerCAmelCase , self.tool.inputs ):
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
SCREAMING_SNAKE_CASE_: Dict =self.tool(*lowerCAmelCase )
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] =[outputs]
self.assertEqual(len(lowerCAmelCase ) , len(self.tool.outputs ) )
| 36
| 0
|
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
_UpperCAmelCase = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: str =torch.load(_lowercase , map_location="""cpu""" )
return sd
def __magic_name__ ( lowercase , lowercase , lowercase=rename_keys_prefix ):
SCREAMING_SNAKE_CASE_: Dict =OrderedDict()
SCREAMING_SNAKE_CASE_: Optional[Any] =torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
SCREAMING_SNAKE_CASE_: Any =key
for name_pair in rename_keys_prefix:
SCREAMING_SNAKE_CASE_: Any =new_key.replace(name_pair[0] , name_pair[1] )
SCREAMING_SNAKE_CASE_: Union[str, Any] =d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
SCREAMING_SNAKE_CASE_: List[str] =new_d['cls.predictions.bias']
return new_d
@torch.no_grad()
def __magic_name__ ( lowercase , lowercase ):
assert (
checkpoint_path.split("""/""" )[-1] in ACCEPTABLE_CHECKPOINTS
), f'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
SCREAMING_SNAKE_CASE_: Union[str, Any] ='pretraining'
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE_: Optional[int] ={'visual_embedding_dim': 512}
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE_: int ={'visual_embedding_dim': 2048}
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE_: Dict ={'visual_embedding_dim': 2048}
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE_: Any ={'visual_embedding_dim': 1024}
else:
raise NotImplementedError(f'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE_: Optional[Any] ={'visual_embedding_dim': 512}
SCREAMING_SNAKE_CASE_: Optional[Any] ='multichoice'
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE_: List[str] ={'visual_embedding_dim': 2048}
SCREAMING_SNAKE_CASE_: Optional[Any] ='vqa_advanced'
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE_: List[Any] ={'visual_embedding_dim': 2048, 'num_labels': 3129}
SCREAMING_SNAKE_CASE_: Any ='vqa'
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE_: Dict ={
'visual_embedding_dim': 1024,
'num_labels': 2,
}
SCREAMING_SNAKE_CASE_: Dict ='nlvr'
SCREAMING_SNAKE_CASE_: Optional[int] =VisualBertConfig(**_lowercase )
# Load State Dict
SCREAMING_SNAKE_CASE_: Tuple =load_state_dict(_lowercase )
SCREAMING_SNAKE_CASE_: List[Any] =get_new_dict(_lowercase , _lowercase )
if model_type == "pretraining":
SCREAMING_SNAKE_CASE_: Optional[int] =VisualBertForPreTraining(_lowercase )
elif model_type == "vqa":
SCREAMING_SNAKE_CASE_: int =VisualBertForQuestionAnswering(_lowercase )
elif model_type == "nlvr":
SCREAMING_SNAKE_CASE_: List[Any] =VisualBertForVisualReasoning(_lowercase )
elif model_type == "multichoice":
SCREAMING_SNAKE_CASE_: Dict =VisualBertForMultipleChoice(_lowercase )
model.load_state_dict(_lowercase )
# Save Checkpoints
Path(_lowercase ).mkdir(exist_ok=_lowercase )
model.save_pretrained(_lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""orig_checkpoint_path""", type=str, help="""A path to .th on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", type=str, help="""Path to the output PyTorch model.""")
_UpperCAmelCase = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 704
|
"""simple docstring"""
from __future__ import annotations
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =sorted(numsa + numsa )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =divmod(len(lowercase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase = [float(x) for x in input("""Enter the elements of first array: """).split()]
_UpperCAmelCase = [float(x) for x in input("""Enter the elements of second array: """).split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 36
| 0
|
"""simple docstring"""
from sklearn.metrics import mean_squared_error
import datasets
_UpperCAmelCase = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
_UpperCAmelCase = "\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n"
_UpperCAmelCase = "\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {'mse': 0.6123724356957945}\n\n If you're using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mse': array([0.41666667, 1. ])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
'''simple docstring'''
def lowerCamelCase__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"""
] , )
def lowerCamelCase__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("""float""" ) ),
"references": datasets.Sequence(datasets.Value("""float""" ) ),
}
else:
return {
"predictions": datasets.Value("""float""" ),
"references": datasets.Value("""float""" ),
}
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Any=None , lowerCAmelCase : Optional[Any]="uniform_average" , lowerCAmelCase : Any=True ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =mean_squared_error(
__lowerCamelCase , __lowerCamelCase , sample_weight=__lowerCamelCase , multioutput=__lowerCamelCase , squared=__lowerCamelCase )
return {"mse": mse}
| 705
|
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a :
def __init__( self : Any , lowerCAmelCase : Any , lowerCAmelCase : List[str]=13 , lowerCAmelCase : Dict=3 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Dict=True , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : str=0.1 , lowerCAmelCase : List[str]=224 , lowerCAmelCase : List[str]=1000 , lowerCAmelCase : Optional[Any]=[3, 3, 6, 4] , lowerCAmelCase : int=[48, 56, 112, 220] , ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =parent
SCREAMING_SNAKE_CASE_: Any =batch_size
SCREAMING_SNAKE_CASE_: Tuple =num_channels
SCREAMING_SNAKE_CASE_: Union[str, Any] =is_training
SCREAMING_SNAKE_CASE_: Tuple =use_labels
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Tuple =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: List[Any] =num_labels
SCREAMING_SNAKE_CASE_: int =image_size
SCREAMING_SNAKE_CASE_: Optional[Any] =layer_depths
SCREAMING_SNAKE_CASE_: List[Any] =embed_dims
def lowerCamelCase__ ( self : List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_: List[Any] =None
if self.use_labels:
SCREAMING_SNAKE_CASE_: List[str] =ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE_: Tuple =self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowerCAmelCase , layer_scale_init_value=1E-5 , )
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =SwiftFormerModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Any =model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =self.num_labels
SCREAMING_SNAKE_CASE_: Dict =SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Dict =model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
SCREAMING_SNAKE_CASE_: int =SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Tuple =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_: Dict =model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)): str =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_: Tuple ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : Optional[int] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
UpperCamelCase : Tuple = (
{'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase : Any = False
UpperCamelCase : Optional[int] = False
UpperCamelCase : Optional[Any] = False
UpperCamelCase : Dict = False
UpperCamelCase : List[str] = False
def lowerCamelCase__ ( self : Dict ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =SwiftFormerModelTester(self )
SCREAMING_SNAKE_CASE_: Union[str, Any] =ConfigTester(
self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def lowerCamelCase__ ( self : Tuple ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def lowerCamelCase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Any =model_class(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase , nn.Linear ) )
def lowerCamelCase__ ( self : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: int =model_class(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_: Any =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_: Tuple =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def lowerCamelCase__ ( self : int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def lowerCamelCase__ ( self : Any ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@slow
def lowerCamelCase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_: Optional[Any] =SwiftFormerModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def lowerCamelCase__ ( self : Optional[int] ) -> str:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
def check_hidden_states_output(lowerCAmelCase : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : Optional[int] ):
SCREAMING_SNAKE_CASE_: Optional[Any] =model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_: List[str] =model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: Dict =outputs.hidden_states
SCREAMING_SNAKE_CASE_: List[Any] =8
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowerCAmelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Dict =True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_: Any =True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
def _config_zero_init(lowerCAmelCase : str ):
SCREAMING_SNAKE_CASE_: Dict =copy.deepcopy(lowerCAmelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowerCAmelCase , lowerCAmelCase , 1E-10 )
if isinstance(getattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: Tuple =_config_zero_init(getattr(lowerCAmelCase , lowerCAmelCase ) )
setattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return configs_no_init
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_: List[Any] =_config_zero_init(lowerCAmelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Any =model_class(config=lowerCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
pass
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: List[Any] =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self : str ) -> str:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =self.default_image_processor
SCREAMING_SNAKE_CASE_: int =prepare_img()
SCREAMING_SNAKE_CASE_: Union[str, Any] =image_processor(images=lowerCAmelCase , return_tensors="""pt""" ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Dict =model(**lowerCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE_: Optional[Any] =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =torch.tensor([[-2.1_703E00, 2.1_107E00, -2.0_811E00]] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
| 36
| 0
|
"""simple docstring"""
import functools
from typing import Any
def __magic_name__ ( lowercase , lowercase ):
# Validation
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or len(lowerCamelCase_ ) == 0:
raise ValueError("""the string should be not empty string""" )
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or not all(
isinstance(lowerCamelCase_ , lowerCamelCase_ ) and len(lowerCamelCase_ ) > 0 for item in words ):
raise ValueError("""the words should be a list of non-empty strings""" )
# Build trie
SCREAMING_SNAKE_CASE_: dict[str, Any] ={}
SCREAMING_SNAKE_CASE_: Any ='WORD_KEEPER'
for word in words:
SCREAMING_SNAKE_CASE_: Optional[Any] =trie
for c in word:
if c not in trie_node:
SCREAMING_SNAKE_CASE_: Optional[int] ={}
SCREAMING_SNAKE_CASE_: str =trie_node[c]
SCREAMING_SNAKE_CASE_: Any =True
SCREAMING_SNAKE_CASE_: List[Any] =len(lowerCamelCase_ )
# Dynamic programming method
@functools.cache
def is_breakable(lowercase ) -> bool:
if index == len_string:
return True
SCREAMING_SNAKE_CASE_: Optional[int] =trie
for i in range(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_: Dict =trie_node.get(string[i] , lowerCamelCase_ )
if trie_node is None:
return False
if trie_node.get(lowerCamelCase_ , lowerCamelCase_ ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706
|
"""simple docstring"""
from math import pi
def __magic_name__ ( lowercase , lowercase ):
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(9_0, 1_0))
| 36
| 0
|
"""simple docstring"""
import math
def __magic_name__ ( lowercase , lowercase ):
if (
not isinstance(lowercase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * power_factor
def __magic_name__ ( lowercase , lowercase ):
if (
not isinstance(lowercase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : Dict ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict =FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE_: Any =jax.device_count()
SCREAMING_SNAKE_CASE_: Dict =num_samples * [prompt]
SCREAMING_SNAKE_CASE_: List[Any] =sd_pipe.prepare_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =replicate(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =shard(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =jax.random.split(lowerCAmelCase , jax.device_count() )
SCREAMING_SNAKE_CASE_: Tuple =sd_pipe(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , num_inference_steps=25 , jit=lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
SCREAMING_SNAKE_CASE_: Dict =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE_: Dict =images[0, 253:256, 253:256, -1]
SCREAMING_SNAKE_CASE_: Optional[int] =jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE_: Optional[int] =jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int ="""stabilityai/stable-diffusion-2"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =FlaxDPMSolverMultistepScheduler.from_pretrained(lowerCAmelCase , subfolder="""scheduler""" )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =FlaxStableDiffusionPipeline.from_pretrained(
lowerCAmelCase , scheduler=lowerCAmelCase , revision="""bf16""" , dtype=jnp.bfloataa , )
SCREAMING_SNAKE_CASE_: Optional[int] =scheduler_params
SCREAMING_SNAKE_CASE_: Tuple ="""A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE_: Union[str, Any] =jax.device_count()
SCREAMING_SNAKE_CASE_: Optional[Any] =num_samples * [prompt]
SCREAMING_SNAKE_CASE_: List[Any] =sd_pipe.prepare_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =replicate(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =shard(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_: Any =jax.random.split(lowerCAmelCase , jax.device_count() )
SCREAMING_SNAKE_CASE_: Tuple =sd_pipe(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , num_inference_steps=25 , jit=lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
SCREAMING_SNAKE_CASE_: str =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE_: Any =images[0, 253:256, 253:256, -1]
SCREAMING_SNAKE_CASE_: Optional[Any] =jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE_: Optional[int] =jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 36
| 0
|
"""simple docstring"""
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class a ( __A ):
def __init__( self : Tuple , lowerCAmelCase : Optional[NestedDataStructureLike[PathLike]] = None , lowerCAmelCase : Optional[NamedSplit] = None , lowerCAmelCase : Optional[Features] = None , lowerCAmelCase : str = None , lowerCAmelCase : bool = False , lowerCAmelCase : bool = False , lowerCAmelCase : Optional[int] = None , **lowerCAmelCase : Dict , ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =path_or_paths
SCREAMING_SNAKE_CASE_: int =split if split or isinstance(lowerCAmelCase , lowerCAmelCase ) else '''train'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =features
SCREAMING_SNAKE_CASE_: List[Any] =cache_dir
SCREAMING_SNAKE_CASE_: Optional[int] =keep_in_memory
SCREAMING_SNAKE_CASE_: Tuple =streaming
SCREAMING_SNAKE_CASE_: Dict =num_proc
SCREAMING_SNAKE_CASE_: Optional[Any] =kwargs
@abstractmethod
def lowerCamelCase__ ( self : List[str] ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
'''simple docstring'''
pass
class a ( __A ):
def __init__( self : Dict , lowerCAmelCase : Optional[Features] = None , lowerCAmelCase : str = None , lowerCAmelCase : bool = False , lowerCAmelCase : bool = False , lowerCAmelCase : Optional[int] = None , **lowerCAmelCase : List[Any] , ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =features
SCREAMING_SNAKE_CASE_: Any =cache_dir
SCREAMING_SNAKE_CASE_: Union[str, Any] =keep_in_memory
SCREAMING_SNAKE_CASE_: int =streaming
SCREAMING_SNAKE_CASE_: Dict =num_proc
SCREAMING_SNAKE_CASE_: Optional[int] =kwargs
@abstractmethod
def lowerCamelCase__ ( self : List[Any] ) -> Union[Dataset, IterableDataset]:
'''simple docstring'''
pass
| 708
|
"""simple docstring"""
def __magic_name__ ( lowercase = 200_0000 ):
SCREAMING_SNAKE_CASE_: List[Any] =[0 for i in range(n + 1 )]
SCREAMING_SNAKE_CASE_: Union[str, Any] =1
SCREAMING_SNAKE_CASE_: Optional[Any] =1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =1
SCREAMING_SNAKE_CASE_: Dict =0
for i in range(lowercase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f"""{solution() = }""")
| 36
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/resolve/main/config.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/config.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/config.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json',
}
class a ( UpperCAmelCase__ ):
UpperCamelCase : Dict = 'bloom'
UpperCamelCase : Any = ['past_key_values']
UpperCamelCase : List[Any] = {
'num_hidden_layers': 'n_layer',
'num_attention_heads': 'n_head',
}
def __init__( self : Dict , lowerCAmelCase : int=25_0880 , lowerCAmelCase : Tuple=64 , lowerCAmelCase : Tuple=2 , lowerCAmelCase : str=8 , lowerCAmelCase : Union[str, Any]=1E-5 , lowerCAmelCase : str=0.0_2 , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Union[str, Any]=1 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : int=False , lowerCAmelCase : Dict=0.0 , lowerCAmelCase : Optional[int]=0.0 , lowerCAmelCase : Any=1 , lowerCAmelCase : Optional[int]=False , **lowerCAmelCase : Dict , ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =vocab_size
# Backward compatibility with n_embed kwarg
SCREAMING_SNAKE_CASE_: Tuple =kwargs.pop("""n_embed""" , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =hidden_size if n_embed is None else n_embed
SCREAMING_SNAKE_CASE_: Optional[Any] =n_layer
SCREAMING_SNAKE_CASE_: Optional[int] =n_head
SCREAMING_SNAKE_CASE_: Optional[int] =layer_norm_epsilon
SCREAMING_SNAKE_CASE_: Optional[Any] =initializer_range
SCREAMING_SNAKE_CASE_: Optional[Any] =use_cache
SCREAMING_SNAKE_CASE_: Optional[Any] =pretraining_tp
SCREAMING_SNAKE_CASE_: List[str] =apply_residual_connection_post_layernorm
SCREAMING_SNAKE_CASE_: List[Any] =hidden_dropout
SCREAMING_SNAKE_CASE_: Optional[Any] =attention_dropout
SCREAMING_SNAKE_CASE_: str =bos_token_id
SCREAMING_SNAKE_CASE_: Dict =eos_token_id
SCREAMING_SNAKE_CASE_: int =slow_but_exact
super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
class a ( UpperCAmelCase__ ):
UpperCamelCase : List[Any] = version.parse('1.12' )
def __init__( self : List[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int = "default" , lowerCAmelCase : Dict = None , lowerCAmelCase : Optional[Any] = False , ) -> int:
'''simple docstring'''
super().__init__(_lowerCAmelCase , task=_lowerCAmelCase , patching_specs=_lowerCAmelCase , use_past=_lowerCAmelCase )
if not getattr(self._config , """pad_token_id""" , _lowerCAmelCase ):
# TODO: how to do that better?
SCREAMING_SNAKE_CASE_: Optional[Any] =0
@property
def lowerCamelCase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(_lowerCAmelCase , direction="""inputs""" , inverted_values_shape=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int ={0: """batch""", 1: """past_sequence + sequence"""}
else:
SCREAMING_SNAKE_CASE_: Any ={0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowerCamelCase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
return self._config.n_layer
@property
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return self._config.n_head
@property
def lowerCamelCase__ ( self : Any ) -> List[str]:
'''simple docstring'''
return 1E-3
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : Dict , lowerCAmelCase : List[str] = -1 , lowerCAmelCase : Union[str, Any] = -1 , lowerCAmelCase : str = False , lowerCAmelCase : Union[str, Any] = None , ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =super(_lowerCAmelCase , self ).generate_dummy_inputs(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
# We need to order the input in the way they appears in the forward()
SCREAMING_SNAKE_CASE_: List[Any] =OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict =common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE_: Dict =seqlen + 2
SCREAMING_SNAKE_CASE_: Optional[int] =self._config.hidden_size // self.num_attention_heads
SCREAMING_SNAKE_CASE_: Optional[Any] =(
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
SCREAMING_SNAKE_CASE_: Union[str, Any] =(
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
SCREAMING_SNAKE_CASE_: List[str] =[
(torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) for _ in range(self.num_layers )
]
SCREAMING_SNAKE_CASE_: int =common_inputs["""attention_mask"""]
if self.use_past:
SCREAMING_SNAKE_CASE_: int =ordered_inputs["""attention_mask"""].dtype
SCREAMING_SNAKE_CASE_: Optional[int] =torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(_lowerCAmelCase , _lowerCAmelCase , dtype=_lowerCAmelCase )] , dim=1 )
return ordered_inputs
@property
def lowerCamelCase__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
return 13
| 709
|
"""simple docstring"""
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""bert""", choices=["""bert"""])
parser.add_argument("""--model_name""", default="""bert-base-uncased""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_bert-base-uncased_0247911.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
_UpperCAmelCase = parser.parse_args()
if args.model_type == "bert":
_UpperCAmelCase = BertForMaskedLM.from_pretrained(args.model_name)
_UpperCAmelCase = """bert"""
else:
raise ValueError("""args.model_type should be \"bert\".""")
_UpperCAmelCase = model.state_dict()
_UpperCAmelCase = {}
for w in ["word_embeddings", "position_embeddings"]:
_UpperCAmelCase = state_dict[f"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""]
_UpperCAmelCase = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
_UpperCAmelCase = state_dict["""cls.predictions.decoder.weight"""]
_UpperCAmelCase = state_dict["""cls.predictions.bias"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[f"""cls.predictions.transform.dense.{w}"""]
_UpperCAmelCase = state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 36
| 0
|
"""simple docstring"""
def __magic_name__ ( lowercase , lowercase ):
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(lowerCamelCase__ , int(b / 2 ) ) * actual_power(lowerCamelCase__ , int(b / 2 ) )
else:
return a * actual_power(lowerCamelCase__ , int(b / 2 ) ) * actual_power(lowerCamelCase__ , int(b / 2 ) )
def __magic_name__ ( lowercase , lowercase ):
if b < 0:
return 1 / actual_power(lowerCamelCase__ , lowerCamelCase__ )
return actual_power(lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
print(power(-2, -3))
| 710
|
"""simple docstring"""
def __magic_name__ ( lowercase , lowercase ):
return int((input_a, input_a).count(0 ) == 0 )
def __magic_name__ ( ):
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 36
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
A = TypeVar("""T""")
A = TypeVar("""U""")
class a ( Generic[T, U] ):
def __init__( self : Any , lowerCAmelCase : T | None , lowerCAmelCase : U | None ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =key
SCREAMING_SNAKE_CASE_: int =val
SCREAMING_SNAKE_CASE_: DoubleLinkedListNode[T, U] | None =None
SCREAMING_SNAKE_CASE_: DoubleLinkedListNode[T, U] | None =None
def __repr__( self : int ) -> Optional[int]:
'''simple docstring'''
return (
f'''Node: key: {self.key}, val: {self.val}, '''
f'''has next: {bool(self.next )}, has prev: {bool(self.prev )}'''
)
class a ( Generic[T, U] ):
def __init__( self : Any ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: DoubleLinkedListNode[T, U] =DoubleLinkedListNode(__A , __A )
SCREAMING_SNAKE_CASE_: DoubleLinkedListNode[T, U] =DoubleLinkedListNode(__A , __A )
SCREAMING_SNAKE_CASE_: List[str] =self.rear, self.head
def __repr__( self : Tuple ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =["DoubleLinkedList"]
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.head
while node.next is not None:
rep.append(str(__A ) )
SCREAMING_SNAKE_CASE_: Optional[Any] =node.next
rep.append(str(self.rear ) )
return ",\n ".join(__A )
def lowerCamelCase__ ( self : Any , lowerCAmelCase : DoubleLinkedListNode[T, U] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
SCREAMING_SNAKE_CASE_: Dict =node
SCREAMING_SNAKE_CASE_: Optional[Any] =previous
SCREAMING_SNAKE_CASE_: int =node
SCREAMING_SNAKE_CASE_: int =self.rear
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : DoubleLinkedListNode[T, U] ) -> Optional[Any]:
'''simple docstring'''
if node.prev is None or node.next is None:
return None
SCREAMING_SNAKE_CASE_: List[Any] =node.next
SCREAMING_SNAKE_CASE_: Any =node.prev
SCREAMING_SNAKE_CASE_: Any =None
SCREAMING_SNAKE_CASE_: Optional[Any] =None
return node
class a ( Generic[T, U] ):
UpperCamelCase : Optional[Any] = {}
def __init__( self : str , lowerCAmelCase : int ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: DoubleLinkedList[T, U] =DoubleLinkedList()
SCREAMING_SNAKE_CASE_: Dict =capacity
SCREAMING_SNAKE_CASE_: List[Any] =0
SCREAMING_SNAKE_CASE_: int =0
SCREAMING_SNAKE_CASE_: Optional[int] =0
SCREAMING_SNAKE_CASE_: dict[T, DoubleLinkedListNode[T, U]] ={}
def __repr__( self : Any ) -> Any:
'''simple docstring'''
return (
f'''CacheInfo(hits={self.hits}, misses={self.miss}, '''
f'''capacity={self.capacity}, current size={self.num_keys})'''
)
def __contains__( self : Union[str, Any] , lowerCAmelCase : T ) -> Any:
'''simple docstring'''
return key in self.cache
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : T ) -> List[Any]:
'''simple docstring'''
if key in self.cache:
self.hits += 1
SCREAMING_SNAKE_CASE_: DoubleLinkedListNode[T, U] =self.cache[key]
SCREAMING_SNAKE_CASE_: int =self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(__A )
return node.val
self.miss += 1
return None
def lowerCamelCase__ ( self : int , lowerCAmelCase : T , lowerCAmelCase : U ) -> str:
'''simple docstring'''
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
SCREAMING_SNAKE_CASE_: Dict =self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(__A ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
SCREAMING_SNAKE_CASE_: Optional[Any] =DoubleLinkedListNode(__A , __A )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
SCREAMING_SNAKE_CASE_: List[Any] =value
self.list.add(__A )
@classmethod
def lowerCamelCase__ ( cls : Tuple , lowerCAmelCase : int = 128 ) -> str:
'''simple docstring'''
def cache_decorator_inner(lowerCAmelCase : Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*lowerCAmelCase : T ) -> U:
if func not in cls.decorator_function_to_instance_map:
SCREAMING_SNAKE_CASE_: Tuple =LRUCache(__A )
SCREAMING_SNAKE_CASE_: str =cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
SCREAMING_SNAKE_CASE_: Optional[int] =func(*__A )
cls.decorator_function_to_instance_map[func].put(args[0] , __A )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(__A , """cache_info""" , __A ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711
|
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger("""transformers.models.speecht5""")
def __magic_name__ ( lowercase , lowercase , lowercase ):
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE_: Tuple =checkpoint["""input_conv.weight_g"""]
SCREAMING_SNAKE_CASE_: Optional[int] =checkpoint["""input_conv.weight_v"""]
SCREAMING_SNAKE_CASE_: Optional[int] =checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''upsamples.{i}.1.weight_g''']
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''upsamples.{i}.1.weight_v''']
SCREAMING_SNAKE_CASE_: List[str] =checkpoint[f'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE_: Dict =checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_g''']
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_v''']
SCREAMING_SNAKE_CASE_: Union[str, Any] =checkpoint[f'''blocks.{i}.convs1.{j}.1.bias''']
SCREAMING_SNAKE_CASE_: Dict =checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_g''']
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_v''']
SCREAMING_SNAKE_CASE_: List[Any] =checkpoint[f'''blocks.{i}.convs2.{j}.1.bias''']
SCREAMING_SNAKE_CASE_: Tuple =checkpoint["""output_conv.1.weight_g"""]
SCREAMING_SNAKE_CASE_: List[str] =checkpoint["""output_conv.1.weight_v"""]
SCREAMING_SNAKE_CASE_: Optional[int] =checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase=None , lowercase=None , ):
if config_path is not None:
SCREAMING_SNAKE_CASE_: List[Any] =SpeechTaHifiGanConfig.from_pretrained(lowercase )
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE_: Union[str, Any] =SpeechTaHifiGan(lowercase )
SCREAMING_SNAKE_CASE_: Any =torch.load(lowercase )
load_weights(orig_checkpoint["""model"""]["""generator"""] , lowercase , lowercase )
SCREAMING_SNAKE_CASE_: List[Any] =np.load(lowercase )
SCREAMING_SNAKE_CASE_: Any =stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE_: str =stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE_: Dict =torch.from_numpy(lowercase ).float()
SCREAMING_SNAKE_CASE_: Dict =torch.from_numpy(lowercase ).float()
model.save_pretrained(lowercase )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
_UpperCAmelCase = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 36
| 0
|
"""simple docstring"""
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =FileLock(str(tmpdir / """foo.lock""" ) )
SCREAMING_SNAKE_CASE_: Union[str, Any] =FileLock(str(tmpdir / """foo.lock""" ) )
SCREAMING_SNAKE_CASE_: str =0.01
with locka.acquire():
with pytest.raises(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: str =time.time()
locka.acquire(__lowerCAmelCase )
assert time.time() - _start > timeout
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: int ="""a""" * 1000 + """.lock"""
SCREAMING_SNAKE_CASE_: Any =FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(__lowerCAmelCase )
assert len(os.path.basename(locka._lock_file ) ) <= 255
SCREAMING_SNAKE_CASE_: Union[str, Any] =FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__lowerCAmelCase ):
locka.acquire(0 )
| 712
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def __magic_name__ ( lowercase ):
if "cls_token" in name:
SCREAMING_SNAKE_CASE_: Optional[int] =name.replace("""cls_token""" , """vit.embeddings.cls_token""" )
if "mask_token" in name:
SCREAMING_SNAKE_CASE_: Optional[int] =name.replace("""mask_token""" , """decoder.mask_token""" )
if "decoder_pos_embed" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""pos_embed""" , """vit.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""patch_embed.proj""" , """vit.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""patch_embed.norm""" , """vit.embeddings.norm""" )
if "decoder_blocks" in name:
SCREAMING_SNAKE_CASE_: List[Any] =name.replace("""decoder_blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""blocks""" , """vit.encoder.layer""" )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE_: Optional[Any] =name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
SCREAMING_SNAKE_CASE_: Union[str, Any] =name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
SCREAMING_SNAKE_CASE_: Optional[Any] =name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE_: Dict =name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
SCREAMING_SNAKE_CASE_: Tuple =name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
SCREAMING_SNAKE_CASE_: Any =name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""norm.weight""" , """vit.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""norm.bias""" , """vit.layernorm.bias""" )
return name
def __magic_name__ ( lowercase , lowercase ):
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE_: Optional[int] =orig_state_dict.pop(lowercase )
if "qkv" in key:
SCREAMING_SNAKE_CASE_: Dict =key.split(""".""" )
SCREAMING_SNAKE_CASE_: Optional[Any] =int(key_split[1] )
if "decoder_blocks" in key:
SCREAMING_SNAKE_CASE_: int =config.decoder_hidden_size
SCREAMING_SNAKE_CASE_: Optional[int] ="""decoder.decoder_layers."""
if "weight" in key:
SCREAMING_SNAKE_CASE_: Dict =val[:dim, :]
SCREAMING_SNAKE_CASE_: Tuple =val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_: str =val[-dim:, :]
elif "bias" in key:
SCREAMING_SNAKE_CASE_: List[Any] =val[:dim]
SCREAMING_SNAKE_CASE_: Tuple =val[dim : dim * 2]
SCREAMING_SNAKE_CASE_: List[Any] =val[-dim:]
else:
SCREAMING_SNAKE_CASE_: Any =config.hidden_size
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""vit.encoder.layer."""
if "weight" in key:
SCREAMING_SNAKE_CASE_: Optional[Any] =val[:dim, :]
SCREAMING_SNAKE_CASE_: Optional[Any] =val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_: Dict =val[-dim:, :]
elif "bias" in key:
SCREAMING_SNAKE_CASE_: Optional[Any] =val[:dim]
SCREAMING_SNAKE_CASE_: Any =val[dim : dim * 2]
SCREAMING_SNAKE_CASE_: Optional[Any] =val[-dim:]
else:
SCREAMING_SNAKE_CASE_: Tuple =val
return orig_state_dict
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Dict =ViTMAEConfig()
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE_: List[Any] =1024
SCREAMING_SNAKE_CASE_: Dict =4096
SCREAMING_SNAKE_CASE_: Tuple =24
SCREAMING_SNAKE_CASE_: int =16
elif "huge" in checkpoint_url:
SCREAMING_SNAKE_CASE_: Union[str, Any] =14
SCREAMING_SNAKE_CASE_: Any =1280
SCREAMING_SNAKE_CASE_: Dict =5120
SCREAMING_SNAKE_CASE_: Optional[int] =32
SCREAMING_SNAKE_CASE_: Optional[Any] =16
SCREAMING_SNAKE_CASE_: Tuple =ViTMAEForPreTraining(lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =torch.hub.load_state_dict_from_url(lowercase , map_location="""cpu""" )["""model"""]
SCREAMING_SNAKE_CASE_: Optional[Any] =ViTMAEImageProcessor(size=config.image_size )
SCREAMING_SNAKE_CASE_: str =convert_state_dict(lowercase , lowercase )
model.load_state_dict(lowercase )
model.eval()
SCREAMING_SNAKE_CASE_: Tuple ="""https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"""
SCREAMING_SNAKE_CASE_: List[Any] =Image.open(requests.get(lowercase , stream=lowercase ).raw )
SCREAMING_SNAKE_CASE_: int =ViTMAEImageProcessor(size=config.image_size )
SCREAMING_SNAKE_CASE_: int =image_processor(images=lowercase , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE_: Optional[Any] =model(**lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =outputs.logits
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE_: Dict =torch.tensor(
[[-0.7_309, -0.7_128, -1.0_169], [-1.0_161, -0.9_058, -1.1_878], [-1.0_478, -0.9_411, -1.1_911]] )
elif "huge" in checkpoint_url:
SCREAMING_SNAKE_CASE_: Tuple =torch.tensor(
[[-1.1_599, -0.9_199, -1.2_221], [-1.1_952, -0.9_269, -1.2_307], [-1.2_143, -0.9_337, -1.2_262]] )
else:
SCREAMING_SNAKE_CASE_: Any =torch.tensor(
[[-0.9_192, -0.8_481, -1.1_259], [-1.1_349, -1.0_034, -1.2_599], [-1.1_757, -1.0_429, -1.2_726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , lowercase , atol=1e-4 )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_UpperCAmelCase = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 36
| 0
|
"""simple docstring"""
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
_UpperCAmelCase = TypeVar("""T""")
def __magic_name__ ( lowercase ):
return (position - 1) // 2
def __magic_name__ ( lowercase ):
return (2 * position) + 1
def __magic_name__ ( lowercase ):
return (2 * position) + 2
class a ( Generic[T] ):
def __init__( self : str ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =[]
SCREAMING_SNAKE_CASE_: int ={}
SCREAMING_SNAKE_CASE_: List[str] =0
def __len__( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
return self.elements
def __repr__( self : Dict ) -> Any:
'''simple docstring'''
return str(self.heap )
def lowerCamelCase__ ( self : int ) -> str:
'''simple docstring'''
return self.elements == 0
def lowerCamelCase__ ( self : int , lowerCAmelCase : T , lowerCAmelCase : int ) -> Union[str, Any]:
'''simple docstring'''
self.heap.append((elem, weight) )
SCREAMING_SNAKE_CASE_: str =self.elements
self.elements += 1
self._bubble_up(lowerCAmelCase )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] =self.heap[0]
self._bubble_down(lowerCAmelCase )
return elem
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : T , lowerCAmelCase : int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =self.position_map[elem]
SCREAMING_SNAKE_CASE_: str =(elem, weight)
if position > 0:
SCREAMING_SNAKE_CASE_: Any =get_parent_position(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int =self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(lowerCAmelCase )
else:
self._bubble_down(lowerCAmelCase )
else:
self._bubble_down(lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : T ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =self.position_map[elem]
if curr_pos == 0:
return None
SCREAMING_SNAKE_CASE_: List[Any] =get_parent_position(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =self.heap[curr_pos]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] =self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(lowerCAmelCase , lowerCAmelCase )
return self._bubble_up(lowerCAmelCase )
return None
def lowerCamelCase__ ( self : int , lowerCAmelCase : T ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =self.position_map[elem]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =self.heap[curr_pos]
SCREAMING_SNAKE_CASE_: List[Any] =get_child_left_position(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =get_child_right_position(lowerCAmelCase )
if child_left_position < self.elements and child_right_position < self.elements:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] =self.heap[child_left_position]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any =self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(lowerCAmelCase , lowerCAmelCase )
return self._bubble_down(lowerCAmelCase )
if child_left_position < self.elements:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(lowerCAmelCase , lowerCAmelCase )
return self._bubble_down(lowerCAmelCase )
else:
return None
if child_right_position < self.elements:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any =self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(lowerCAmelCase , lowerCAmelCase )
return self._bubble_down(lowerCAmelCase )
return None
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : int , lowerCAmelCase : int ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =self.heap[nodea_pos][0]
SCREAMING_SNAKE_CASE_: List[str] =self.heap[nodea_pos][0]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] =(
self.heap[nodea_pos],
self.heap[nodea_pos],
)
SCREAMING_SNAKE_CASE_: str =nodea_pos
SCREAMING_SNAKE_CASE_: List[str] =nodea_pos
class a ( Generic[T] ):
def __init__( self : List[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] ={}
SCREAMING_SNAKE_CASE_: Dict =0
def __repr__( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return str(self.connections )
def __len__( self : List[str] ) -> int:
'''simple docstring'''
return self.nodes
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : T ) -> Tuple:
'''simple docstring'''
if node not in self.connections:
SCREAMING_SNAKE_CASE_: Dict ={}
self.nodes += 1
def lowerCamelCase__ ( self : str , lowerCAmelCase : T , lowerCAmelCase : T , lowerCAmelCase : int ) -> List[str]:
'''simple docstring'''
self.add_node(lowerCAmelCase )
self.add_node(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =weight
SCREAMING_SNAKE_CASE_: Dict =weight
def __magic_name__ ( lowercase , ):
SCREAMING_SNAKE_CASE_: Any ={node: maxsize for node in graph.connections}
SCREAMING_SNAKE_CASE_: str ={node: None for node in graph.connections}
SCREAMING_SNAKE_CASE_: Dict =MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(__UpperCAmelCase , __UpperCAmelCase )
if priority_queue.is_empty():
return dist, parent
# initialization
SCREAMING_SNAKE_CASE_: Union[str, Any] =priority_queue.extract_min()
SCREAMING_SNAKE_CASE_: Optional[Any] =0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
SCREAMING_SNAKE_CASE_: List[Any] =dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__UpperCAmelCase , dist[neighbour] )
SCREAMING_SNAKE_CASE_: Dict =node
# running prim's algorithm
while not priority_queue.is_empty():
SCREAMING_SNAKE_CASE_: Union[str, Any] =priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
SCREAMING_SNAKE_CASE_: int =dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__UpperCAmelCase , dist[neighbour] )
SCREAMING_SNAKE_CASE_: List[Any] =node
return dist, parent
| 713
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_UpperCAmelCase = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 36
| 0
|
"""simple docstring"""
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
_UpperCAmelCase = datasets.logging.get_logger(__name__)
_UpperCAmelCase = """\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",
author = \"Moosavi, Nafise Sadat and
Strube, Michael\",
booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",
month = aug,
year = \"2016\",
address = \"Berlin, Germany\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/P16-1060\",
doi = \"10.18653/v1/P16-1060\",
pages = \"632--642\",
}
"""
_UpperCAmelCase = """\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
"""
_UpperCAmelCase = """
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting 'keep_singletons=False', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
'mentions': mentions
'muc': MUC metric [Vilain et al, 1995]
'bcub': B-cubed [Bagga and Baldwin, 1998]
'ceafe': CEAFe [Luo et al., 2005]
'lea': LEA [Moosavi and Strube, 2016]
'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric('coval')
>>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',
... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',
... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',
... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',
... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',
... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{'mentions/recall': 1.0,[...] 'conll_score': 100.0}
"""
def __magic_name__ ( lowercase , lowercase , lowercase=False , lowercase=False , lowercase=True , lowercase=False , lowercase="dummy_doc" ):
SCREAMING_SNAKE_CASE_: Any ={doc: key_lines}
SCREAMING_SNAKE_CASE_: str ={doc: sys_lines}
SCREAMING_SNAKE_CASE_: List[Any] ={}
SCREAMING_SNAKE_CASE_: str =0
SCREAMING_SNAKE_CASE_: List[Any] =0
SCREAMING_SNAKE_CASE_: Tuple =0
SCREAMING_SNAKE_CASE_: Optional[int] =0
SCREAMING_SNAKE_CASE_: Optional[Any] =0
SCREAMING_SNAKE_CASE_: Tuple =0
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int =reader.get_doc_mentions(lowerCAmelCase__ , key_doc_lines[doc] , lowerCAmelCase__ )
key_singletons_num += singletons_num
if NP_only or min_span:
SCREAMING_SNAKE_CASE_: Union[str, Any] =reader.set_annotated_parse_trees(lowerCAmelCase__ , key_doc_lines[doc] , lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =reader.get_doc_mentions(lowerCAmelCase__ , sys_doc_lines[doc] , lowerCAmelCase__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
SCREAMING_SNAKE_CASE_: Tuple =reader.set_annotated_parse_trees(lowerCAmelCase__ , key_doc_lines[doc] , lowerCAmelCase__ , lowerCAmelCase__ )
if remove_nested:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] =reader.remove_nested_coref_mentions(lowerCAmelCase__ , lowerCAmelCase__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any =reader.remove_nested_coref_mentions(lowerCAmelCase__ , lowerCAmelCase__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
SCREAMING_SNAKE_CASE_: List[Any] =reader.get_mention_assignments(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_: int =reader.get_mention_assignments(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_: Dict =(key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
f'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''' )
logger.info(
"""Number of resulting singleton clusters in the key """
f'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''' )
if not keep_singletons:
logger.info(
f'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '''
"""files, respectively""" )
return doc_coref_infos
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =get_coref_infos(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_: Tuple ={}
SCREAMING_SNAKE_CASE_: Optional[Any] =0
SCREAMING_SNAKE_CASE_: Any =0
for name, metric in metrics:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] =evaluator.evaluate_documents(lowerCAmelCase__ , lowerCAmelCase__ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f'''{name}/recall''': recall, f'''{name}/precision''': precision, f'''{name}/f1''': fa} )
logger.info(
name.ljust(10 ) , f'''Recall: {recall * 100:.2f}''' , f''' Precision: {precision * 100:.2f}''' , f''' F1: {fa * 100:.2f}''' , )
if conll_subparts_num == 3:
SCREAMING_SNAKE_CASE_: Tuple =(conll / 3) * 100
logger.info(f'''CoNLL score: {conll:.2f}''' )
output_scores.update({"""conll_score""": conll} )
return output_scores
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =False
for line in key_lines:
if not line.startswith("""#""" ):
if len(line.split() ) > 6:
SCREAMING_SNAKE_CASE_: List[Any] =line.split()[5]
if not parse_col == "-":
SCREAMING_SNAKE_CASE_: Optional[Any] =True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def lowerCamelCase__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
] , )
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : Tuple , lowerCAmelCase : str=True , lowerCAmelCase : Optional[Any]=False , lowerCAmelCase : List[str]=False , lowerCAmelCase : Dict=False ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =[
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
SCREAMING_SNAKE_CASE_: Dict =util.check_gold_parse_annotation(_UpperCAmelCase )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
SCREAMING_SNAKE_CASE_: int =evaluate(
key_lines=_UpperCAmelCase , sys_lines=_UpperCAmelCase , metrics=_UpperCAmelCase , NP_only=_UpperCAmelCase , remove_nested=_UpperCAmelCase , keep_singletons=_UpperCAmelCase , min_span=_UpperCAmelCase , )
return score
| 714
|
"""simple docstring"""
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: int =False
while is_sorted is False: # Until all the indices are traversed keep looping
SCREAMING_SNAKE_CASE_: Tuple =True
for i in range(0 , len(lowercase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =input_list[i + 1], input_list[i]
# swapping if elements not in order
SCREAMING_SNAKE_CASE_: Tuple =False
for i in range(1 , len(lowercase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str =input_list[i + 1], input_list[i]
# swapping if elements not in order
SCREAMING_SNAKE_CASE_: str =False
return input_list
if __name__ == "__main__":
print("""Enter list to be sorted""")
_UpperCAmelCase = [int(x) for x in input().split()]
# inputing elements of the list in one line
_UpperCAmelCase = odd_even_sort(input_list)
print("""The sorted list is""")
print(sorted_list)
| 36
| 0
|
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCAmelCase = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class a ( __lowerCAmelCase , unittest.TestCase ):
UpperCamelCase : Optional[Any] = XLMRobertaTokenizer
UpperCamelCase : Optional[Any] = XLMRobertaTokenizerFast
UpperCamelCase : int = True
UpperCamelCase : Tuple = True
def lowerCamelCase__ ( self : int ) -> str:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE_: Union[str, Any] =XLMRobertaTokenizer(_UpperCamelCase , keep_accents=_UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] ="""<pad>"""
SCREAMING_SNAKE_CASE_: Optional[int] =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCamelCase ) , _UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCamelCase ) , _UpperCamelCase )
def lowerCamelCase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(_UpperCamelCase ) , 1002 )
def lowerCamelCase__ ( self : Any ) -> str:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1002 )
def lowerCamelCase__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =XLMRobertaTokenizer(_UpperCamelCase , keep_accents=_UpperCamelCase )
SCREAMING_SNAKE_CASE_: Any =tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_UpperCamelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
SCREAMING_SNAKE_CASE_: List[Any] =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_UpperCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
SCREAMING_SNAKE_CASE_: Optional[Any] =tokenizer.convert_tokens_to_ids(_UpperCamelCase )
self.assertListEqual(
_UpperCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
SCREAMING_SNAKE_CASE_: Any =tokenizer.convert_ids_to_tokens(_UpperCamelCase )
self.assertListEqual(
_UpperCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def lowerCamelCase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
SCREAMING_SNAKE_CASE_: Tuple =(self.rust_tokenizer_class, """hf-internal-testing/tiny-xlm-roberta""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE_: Dict =self.rust_tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =self.tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
SCREAMING_SNAKE_CASE_: str =tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_: Optional[int] =tokenizer_r.save_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =tokenizer_p.save_pretrained(_UpperCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
SCREAMING_SNAKE_CASE_: Optional[int] =tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(_UpperCamelCase , _UpperCamelCase )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE_: str =tokenizer_r.from_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =tokenizer_p.from_pretrained(_UpperCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCamelCase , _UpperCamelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_UpperCamelCase )
# Save tokenizer rust, legacy_format=True
SCREAMING_SNAKE_CASE_: Union[str, Any] =tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_: Dict =tokenizer_r.save_pretrained(_UpperCamelCase , legacy_format=_UpperCamelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =tokenizer_p.save_pretrained(_UpperCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(_UpperCamelCase , _UpperCamelCase )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE_: Optional[int] =tokenizer_r.from_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =tokenizer_p.from_pretrained(_UpperCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCamelCase , _UpperCamelCase ) )
shutil.rmtree(_UpperCamelCase )
# Save tokenizer rust, legacy_format=False
SCREAMING_SNAKE_CASE_: Dict =tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_: Union[str, Any] =tokenizer_r.save_pretrained(_UpperCamelCase , legacy_format=_UpperCamelCase )
SCREAMING_SNAKE_CASE_: Any =tokenizer_p.save_pretrained(_UpperCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE_: List[str] =tokenizer_r.from_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE_: Dict =tokenizer_p.from_pretrained(_UpperCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCamelCase , _UpperCamelCase ) )
shutil.rmtree(_UpperCamelCase )
@cached_property
def lowerCamelCase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
return XLMRobertaTokenizer.from_pretrained("""xlm-roberta-base""" )
def lowerCamelCase__ ( self : int ) -> int:
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(_UpperCamelCase , f.name )
SCREAMING_SNAKE_CASE_: Tuple =XLMRobertaTokenizer(f.name , keep_accents=_UpperCamelCase )
SCREAMING_SNAKE_CASE_: List[Any] =pickle.dumps(_UpperCamelCase )
pickle.loads(_UpperCamelCase )
def lowerCamelCase__ ( self : int ) -> Any:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE_: Tuple =self.get_tokenizer()
SCREAMING_SNAKE_CASE_: int =self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_: List[Any] ="""I was born in 92000, and this is falsé."""
SCREAMING_SNAKE_CASE_: List[Any] =tokenizer.tokenize(_UpperCamelCase )
SCREAMING_SNAKE_CASE_: Tuple =rust_tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
SCREAMING_SNAKE_CASE_: List[Any] =rust_tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE_: str =self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_: Any =tokenizer.encode(_UpperCamelCase )
SCREAMING_SNAKE_CASE_: Any =rust_tokenizer.encode(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
@slow
def lowerCamelCase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] ="""Hello World!"""
SCREAMING_SNAKE_CASE_: Tuple =[0, 3_5378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(_UpperCamelCase , self.big_tokenizer.encode(_UpperCamelCase ) )
@slow
def lowerCamelCase__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =(
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
SCREAMING_SNAKE_CASE_: Optional[int] =[
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
17_9459,
12_4850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
1_0114,
711,
152,
20,
6,
5,
2_2376,
642,
1221,
1_5190,
3_4153,
450,
5608,
959,
1119,
5_7702,
136,
186,
47,
1098,
2_9367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
5_0901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(_UpperCamelCase , self.big_tokenizer.encode(_UpperCamelCase ) )
@slow
def lowerCamelCase__ ( self : Tuple ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple ={"""input_ids""": [[0, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [0, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCamelCase , model_name="""xlm-roberta-base""" , revision="""d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3""" , )
| 715
|
"""simple docstring"""
def __magic_name__ ( lowercase ):
return str(lowercase ) == str(lowercase )[::-1]
def __magic_name__ ( lowercase ):
return int(lowercase ) + int(str(lowercase )[::-1] )
def __magic_name__ ( lowercase = 1_0000 ):
SCREAMING_SNAKE_CASE_: List[str] =[]
for num in range(1 , lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =0
SCREAMING_SNAKE_CASE_: int =num
while iterations < 50:
SCREAMING_SNAKE_CASE_: Optional[Any] =sum_reverse(lowercase )
iterations += 1
if is_palindrome(lowercase ):
break
else:
lychrel_nums.append(lowercase )
return len(lowercase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 36
| 0
|
"""simple docstring"""
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
_UpperCAmelCase = logging.getLogger()
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Any =argparse.ArgumentParser()
parser.add_argument("""-f""" )
SCREAMING_SNAKE_CASE_: Dict =parser.parse_args()
return args.f
class a ( a__ ):
def lowerCamelCase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =logging.StreamHandler(sys.stdout )
logger.addHandler(_A )
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : Dict ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , """run_glue_deebert.py""" )
with patch.object(_A , """argv""" , _A ):
SCREAMING_SNAKE_CASE_: Any =run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(_A , 0.6_6_6 )
@slow
@require_torch_non_multi_gpu
def lowerCamelCase__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] ='\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n '.split()
self.run_and_check(_A )
SCREAMING_SNAKE_CASE_: Optional[int] ='\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(_A )
SCREAMING_SNAKE_CASE_: Any ='\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(_A )
| 716
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
_UpperCAmelCase = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""DPTFeatureExtractor"""]
_UpperCAmelCase = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 36
| 0
|
"""simple docstring"""
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class a ( nn.Module ):
def __init__( self : Any , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int=0.0 , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : str = "geglu" , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : bool = False , lowerCAmelCase : bool = False , lowerCAmelCase : bool = False , lowerCAmelCase : bool = False , lowerCAmelCase : bool = True , lowerCAmelCase : str = "layer_norm" , lowerCAmelCase : bool = False , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_: Tuple =only_cross_attention
SCREAMING_SNAKE_CASE_: List[Any] =(num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero"
SCREAMING_SNAKE_CASE_: Any =(num_embeds_ada_norm is not None) and norm_type == "ada_norm"
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
f''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
SCREAMING_SNAKE_CASE_: str =AdaLayerNorm(__lowerCamelCase , __lowerCamelCase )
elif self.use_ada_layer_norm_zero:
SCREAMING_SNAKE_CASE_: Union[str, Any] =AdaLayerNormZero(__lowerCamelCase , __lowerCamelCase )
else:
SCREAMING_SNAKE_CASE_: Tuple =nn.LayerNorm(__lowerCamelCase , elementwise_affine=__lowerCamelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =Attention(
query_dim=__lowerCamelCase , heads=__lowerCamelCase , dim_head=__lowerCamelCase , dropout=__lowerCamelCase , bias=__lowerCamelCase , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=__lowerCamelCase , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
SCREAMING_SNAKE_CASE_: Any =(
AdaLayerNorm(__lowerCamelCase , __lowerCamelCase )
if self.use_ada_layer_norm
else nn.LayerNorm(__lowerCamelCase , elementwise_affine=__lowerCamelCase )
)
SCREAMING_SNAKE_CASE_: int =Attention(
query_dim=__lowerCamelCase , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=__lowerCamelCase , dim_head=__lowerCamelCase , dropout=__lowerCamelCase , bias=__lowerCamelCase , upcast_attention=__lowerCamelCase , ) # is self-attn if encoder_hidden_states is none
else:
SCREAMING_SNAKE_CASE_: Union[str, Any] =None
SCREAMING_SNAKE_CASE_: str =None
# 3. Feed-forward
SCREAMING_SNAKE_CASE_: Dict =nn.LayerNorm(__lowerCamelCase , elementwise_affine=__lowerCamelCase )
SCREAMING_SNAKE_CASE_: int =FeedForward(__lowerCamelCase , dropout=__lowerCamelCase , activation_fn=__lowerCamelCase , final_dropout=__lowerCamelCase )
# let chunk size default to None
SCREAMING_SNAKE_CASE_: str =None
SCREAMING_SNAKE_CASE_: Any =0
def lowerCamelCase__ ( self : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : int ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =chunk_size
SCREAMING_SNAKE_CASE_: Union[str, Any] =dim
def lowerCamelCase__ ( self : Any , lowerCAmelCase : torch.FloatTensor , lowerCAmelCase : Optional[torch.FloatTensor] = None , lowerCAmelCase : Optional[torch.FloatTensor] = None , lowerCAmelCase : Optional[torch.FloatTensor] = None , lowerCAmelCase : Optional[torch.LongTensor] = None , lowerCAmelCase : Dict[str, Any] = None , lowerCAmelCase : Optional[torch.LongTensor] = None , ) -> str:
'''simple docstring'''
if self.use_ada_layer_norm:
SCREAMING_SNAKE_CASE_: Tuple =self.norma(__lowerCamelCase , __lowerCamelCase )
elif self.use_ada_layer_norm_zero:
SCREAMING_SNAKE_CASE_: Dict =self.norma(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hidden_dtype=hidden_states.dtype )
else:
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.norma(__lowerCamelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =cross_attention_kwargs if cross_attention_kwargs is not None else {}
SCREAMING_SNAKE_CASE_: Dict =self.attna(
__lowerCamelCase , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=__lowerCamelCase , **__lowerCamelCase , )
if self.use_ada_layer_norm_zero:
SCREAMING_SNAKE_CASE_: Dict =gate_msa.unsqueeze(1 ) * attn_output
SCREAMING_SNAKE_CASE_: Optional[Any] =attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
SCREAMING_SNAKE_CASE_: Optional[Any] =(
self.norma(__lowerCamelCase , __lowerCamelCase ) if self.use_ada_layer_norm else self.norma(__lowerCamelCase )
)
SCREAMING_SNAKE_CASE_: int =self.attna(
__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , attention_mask=__lowerCamelCase , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE_: List[Any] =attn_output + hidden_states
# 3. Feed-forward
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.norma(__lowerCamelCase )
if self.use_ada_layer_norm_zero:
SCREAMING_SNAKE_CASE_: Dict =norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' )
SCREAMING_SNAKE_CASE_: str =norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
SCREAMING_SNAKE_CASE_: Dict =torch.cat(
[self.ff(__lowerCamelCase ) for hid_slice in norm_hidden_states.chunk(__lowerCamelCase , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
SCREAMING_SNAKE_CASE_: List[str] =self.ff(__lowerCamelCase )
if self.use_ada_layer_norm_zero:
SCREAMING_SNAKE_CASE_: Optional[Any] =gate_mlp.unsqueeze(1 ) * ff_output
SCREAMING_SNAKE_CASE_: Union[str, Any] =ff_output + hidden_states
return hidden_states
class a ( nn.Module ):
def __init__( self : int , lowerCAmelCase : int , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : int = 4 , lowerCAmelCase : float = 0.0 , lowerCAmelCase : str = "geglu" , lowerCAmelCase : bool = False , ) -> Dict:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_: List[str] =int(dim * mult )
SCREAMING_SNAKE_CASE_: Tuple =dim_out if dim_out is not None else dim
if activation_fn == "gelu":
SCREAMING_SNAKE_CASE_: List[str] =GELU(__lowerCamelCase , __lowerCamelCase )
if activation_fn == "gelu-approximate":
SCREAMING_SNAKE_CASE_: str =GELU(__lowerCamelCase , __lowerCamelCase , approximate="""tanh""" )
elif activation_fn == "geglu":
SCREAMING_SNAKE_CASE_: Any =GEGLU(__lowerCamelCase , __lowerCamelCase )
elif activation_fn == "geglu-approximate":
SCREAMING_SNAKE_CASE_: Optional[int] =ApproximateGELU(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE_: Any =nn.ModuleList([] )
# project in
self.net.append(__lowerCamelCase )
# project dropout
self.net.append(nn.Dropout(__lowerCamelCase ) )
# project out
self.net.append(nn.Linear(__lowerCamelCase , __lowerCamelCase ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(__lowerCamelCase ) )
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : Optional[int] ) -> List[str]:
'''simple docstring'''
for module in self.net:
SCREAMING_SNAKE_CASE_: Optional[int] =module(__lowerCamelCase )
return hidden_states
class a ( nn.Module ):
def __init__( self : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : str = "none" ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_: List[Any] =nn.Linear(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE_: int =approximate
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : str ) -> List[str]:
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(__lowerCamelCase , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =self.proj(__lowerCamelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =self.gelu(__lowerCamelCase )
return hidden_states
class a ( nn.Module ):
def __init__( self : List[str] , lowerCAmelCase : int , lowerCAmelCase : int ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_: str =nn.Linear(__lowerCamelCase , dim_out * 2 )
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
if gate.device.type != "mps":
return F.gelu(__lowerCamelCase )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : Tuple ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.proj(__lowerCamelCase ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(__lowerCamelCase )
class a ( nn.Module ):
def __init__( self : Tuple , lowerCAmelCase : int , lowerCAmelCase : int ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_: int =nn.Linear(__lowerCamelCase , __lowerCamelCase )
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =self.proj(__lowerCamelCase )
return x * torch.sigmoid(1.7_0_2 * x )
class a ( nn.Module ):
def __init__( self : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_: List[str] =nn.Embedding(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE_: Dict =nn.SiLU()
SCREAMING_SNAKE_CASE_: Any =nn.Linear(__lowerCamelCase , embedding_dim * 2 )
SCREAMING_SNAKE_CASE_: Optional[Any] =nn.LayerNorm(__lowerCamelCase , elementwise_affine=__lowerCamelCase )
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : Any , lowerCAmelCase : Any ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =self.linear(self.silu(self.emb(__lowerCamelCase ) ) )
SCREAMING_SNAKE_CASE_: str =torch.chunk(__lowerCamelCase , 2 )
SCREAMING_SNAKE_CASE_: Optional[int] =self.norm(__lowerCamelCase ) * (1 + scale) + shift
return x
class a ( nn.Module ):
def __init__( self : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_: Dict =CombinedTimestepLabelEmbeddings(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE_: Dict =nn.SiLU()
SCREAMING_SNAKE_CASE_: Any =nn.Linear(__lowerCamelCase , 6 * embedding_dim , bias=__lowerCamelCase )
SCREAMING_SNAKE_CASE_: Tuple =nn.LayerNorm(__lowerCamelCase , elementwise_affine=__lowerCamelCase , eps=1E-6 )
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Any , lowerCAmelCase : Dict=None ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.linear(self.silu(self.emb(__lowerCamelCase , __lowerCamelCase , hidden_dtype=__lowerCamelCase ) ) )
SCREAMING_SNAKE_CASE_: Optional[Any] =emb.chunk(6 , dim=1 )
SCREAMING_SNAKE_CASE_: int =self.norm(__lowerCamelCase ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class a ( nn.Module ):
def __init__( self : str , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : Optional[str] = None , lowerCAmelCase : float = 1E-5 ) -> Dict:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_: Dict =num_groups
SCREAMING_SNAKE_CASE_: List[str] =eps
if act_fn is None:
SCREAMING_SNAKE_CASE_: List[str] =None
else:
SCREAMING_SNAKE_CASE_: Optional[int] =get_activation(__lowerCamelCase )
SCREAMING_SNAKE_CASE_: List[Any] =nn.Linear(__lowerCamelCase , out_dim * 2 )
def lowerCamelCase__ ( self : str , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int ) -> str:
'''simple docstring'''
if self.act:
SCREAMING_SNAKE_CASE_: int =self.act(__lowerCamelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =self.linear(__lowerCamelCase )
SCREAMING_SNAKE_CASE_: Any =emb[:, :, None, None]
SCREAMING_SNAKE_CASE_: Union[str, Any] =emb.chunk(2 , dim=1 )
SCREAMING_SNAKE_CASE_: Optional[Any] =F.group_norm(__lowerCamelCase , self.num_groups , eps=self.eps )
SCREAMING_SNAKE_CASE_: Any =x * (1 + scale) + shift
return x
| 717
|
"""simple docstring"""
from __future__ import annotations
import math
import random
from typing import Any
class a :
def __init__( self : str ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: list[Any] =[]
SCREAMING_SNAKE_CASE_: int =0
SCREAMING_SNAKE_CASE_: int =0
def lowerCamelCase__ ( self : Optional[Any] ) -> bool:
'''simple docstring'''
return self.head == self.tail
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
self.data.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =self.tail + 1
def lowerCamelCase__ ( self : int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self.data[self.head]
SCREAMING_SNAKE_CASE_: Optional[int] =self.head + 1
return ret
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
return self.tail - self.head
def lowerCamelCase__ ( self : str ) -> None:
'''simple docstring'''
print(self.data )
print("""**************""" )
print(self.data[self.head : self.tail] )
class a :
def __init__( self : Union[str, Any] , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =data
SCREAMING_SNAKE_CASE_: MyNode | None =None
SCREAMING_SNAKE_CASE_: MyNode | None =None
SCREAMING_SNAKE_CASE_: int =1
def lowerCamelCase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
return self.data
def lowerCamelCase__ ( self : List[Any] ) -> MyNode | None:
'''simple docstring'''
return self.left
def lowerCamelCase__ ( self : Dict ) -> MyNode | None:
'''simple docstring'''
return self.right
def lowerCamelCase__ ( self : Any ) -> int:
'''simple docstring'''
return self.height
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =data
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : MyNode | None ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =node
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : MyNode | None ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =node
def lowerCamelCase__ ( self : int , lowerCAmelCase : int ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =height
def __magic_name__ ( lowercase ):
if node is None:
return 0
return node.get_height()
def __magic_name__ ( lowercase , lowercase ):
if a > b:
return a
return b
def __magic_name__ ( lowercase ):
print("""left rotation node:""" , node.get_data() )
SCREAMING_SNAKE_CASE_: List[Any] =node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(lowercase )
SCREAMING_SNAKE_CASE_: Tuple =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowercase )
SCREAMING_SNAKE_CASE_: int =my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowercase )
return ret
def __magic_name__ ( lowercase ):
print("""right rotation node:""" , node.get_data() )
SCREAMING_SNAKE_CASE_: List[Any] =node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(lowercase )
SCREAMING_SNAKE_CASE_: List[Any] =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowercase )
return ret
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any =node.get_left()
assert left_child is not None
node.set_left(left_rotation(lowercase ) )
return right_rotation(lowercase )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Dict =node.get_right()
assert right_child is not None
node.set_right(right_rotation(lowercase ) )
return left_rotation(lowercase )
def __magic_name__ ( lowercase , lowercase ):
if node is None:
return MyNode(lowercase )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , lowercase ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
SCREAMING_SNAKE_CASE_: Union[str, Any] =node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
SCREAMING_SNAKE_CASE_: Any =right_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: List[Any] =lr_rotation(lowercase )
else:
node.set_right(insert_node(node.get_right() , lowercase ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
SCREAMING_SNAKE_CASE_: Tuple =node.get_right()
assert right_child is not None
if data < right_child.get_data():
SCREAMING_SNAKE_CASE_: Optional[Any] =rl_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: Optional[int] =left_rotation(lowercase )
SCREAMING_SNAKE_CASE_: Tuple =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowercase )
return node
def __magic_name__ ( lowercase ):
while True:
SCREAMING_SNAKE_CASE_: Dict =root.get_right()
if right_child is None:
break
SCREAMING_SNAKE_CASE_: str =right_child
return root.get_data()
def __magic_name__ ( lowercase ):
while True:
SCREAMING_SNAKE_CASE_: str =root.get_left()
if left_child is None:
break
SCREAMING_SNAKE_CASE_: Dict =left_child
return root.get_data()
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: str =root.get_left()
SCREAMING_SNAKE_CASE_: List[Any] =root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
SCREAMING_SNAKE_CASE_: Union[str, Any] =get_left_most(lowercase )
root.set_data(lowercase )
root.set_right(del_node(lowercase , lowercase ) )
elif left_child is not None:
SCREAMING_SNAKE_CASE_: Optional[int] =left_child
elif right_child is not None:
SCREAMING_SNAKE_CASE_: Any =right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("""No such data""" )
return root
else:
root.set_left(del_node(lowercase , lowercase ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(lowercase , lowercase ) )
if get_height(lowercase ) - get_height(lowercase ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
SCREAMING_SNAKE_CASE_: Tuple =left_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =rl_rotation(lowercase )
elif get_height(lowercase ) - get_height(lowercase ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
SCREAMING_SNAKE_CASE_: Optional[Any] =right_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: str =lr_rotation(lowercase )
SCREAMING_SNAKE_CASE_: str =my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(lowercase )
return root
class a :
def __init__( self : int ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: MyNode | None =None
def lowerCamelCase__ ( self : List[Any] ) -> int:
'''simple docstring'''
return get_height(self.root )
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
print("""insert:""" + str(lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: Tuple =insert_node(self.root , lowerCAmelCase )
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
print("""delete:""" + str(lowerCAmelCase ) )
if self.root is None:
print("""Tree is empty!""" )
return
SCREAMING_SNAKE_CASE_: Union[str, Any] =del_node(self.root , lowerCAmelCase )
def __str__( self : List[str] , ) -> str: # a level traversale, gives a more intuitive look on the tree
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =""""""
SCREAMING_SNAKE_CASE_: str =MyQueue()
q.push(self.root )
SCREAMING_SNAKE_CASE_: List[str] =self.get_height()
if layer == 0:
return output
SCREAMING_SNAKE_CASE_: int =0
while not q.is_empty():
SCREAMING_SNAKE_CASE_: int =q.pop()
SCREAMING_SNAKE_CASE_: List[Any] =""" """ * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(lowerCAmelCase )
q.push(lowerCAmelCase )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
SCREAMING_SNAKE_CASE_: List[Any] =cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , lowerCAmelCase ) - 1:
SCREAMING_SNAKE_CASE_: int =layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def __magic_name__ ( ):
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
_UpperCAmelCase = AVLtree()
_UpperCAmelCase = list(range(1_0))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 36
| 0
|
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.6, 'eval_loss': 0.9},
},
{
'framework': 'tensorflow',
'script': 'run_tf.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.3, 'eval_loss': 0.9},
},
] )
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : List[str] ) -> int:
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="""utf-8""" , check=lowerCamelCase_ , )
assert hasattr(self , """env""" )
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : Any=1 ) -> Optional[int]:
'''simple docstring'''
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'''{self.env.base_job_name}-single''' , instance_count=lowerCamelCase_ , instance_type=self.instance_type , debugger_hook_config=lowerCamelCase_ , hyperparameters={**self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="""py36""" , )
def lowerCamelCase__ ( self : str , lowerCAmelCase : int ) -> Optional[Any]:
'''simple docstring'''
TrainingJobAnalytics(lowerCamelCase_ ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
def lowerCamelCase__ ( self : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =self.create_estimator()
# run training
estimator.fit()
# result dataframe
SCREAMING_SNAKE_CASE_: Optional[int] =TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
SCREAMING_SNAKE_CASE_: int =list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
SCREAMING_SNAKE_CASE_: Dict =list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
SCREAMING_SNAKE_CASE_: Any =(
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , lowerCamelCase_ )
| 718
|
"""simple docstring"""
import string
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =""""""
for i in sequence:
SCREAMING_SNAKE_CASE_: List[Any] =ord(lowercase )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any =string.ascii_letters
SCREAMING_SNAKE_CASE_: Tuple =string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(lowercase )] if c in letters else c for c in sequence )
def __magic_name__ ( ):
from timeit import timeit
print("""Running performance benchmarks...""" )
SCREAMING_SNAKE_CASE_: int ="""from string import printable ; from __main__ import atbash, atbash_slow"""
print(f'''> atbash_slow(): {timeit("atbash_slow(printable)" , setup=lowercase )} seconds''' )
print(f'''> atbash(): {timeit("atbash(printable)" , setup=lowercase )} seconds''' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(f"""{example} encrypted in atbash: {atbash(example)}""")
benchmark()
| 36
| 0
|
from __future__ import annotations
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: List[str] =list(range(len(_A ) ) )
SCREAMING_SNAKE_CASE_: Union[str, Any] =[v / w for v, w in zip(_A , _A )]
index.sort(key=lambda lowercase : ratio[i] , reverse=_A )
SCREAMING_SNAKE_CASE_: List[Any] =0
SCREAMING_SNAKE_CASE_: Union[str, Any] =[0] * len(_A )
for i in index:
if weight[i] <= capacity:
SCREAMING_SNAKE_CASE_: int =1
max_value += value[i]
capacity -= weight[i]
else:
SCREAMING_SNAKE_CASE_: str =capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719
|
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class a :
def __init__( self : Union[str, Any] , lowerCAmelCase : List[str]=2 , lowerCAmelCase : int=3 , lowerCAmelCase : Optional[Any]=64 , lowerCAmelCase : Union[str, Any]=None ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =np.random.default_rng(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =length
SCREAMING_SNAKE_CASE_: Union[str, Any] =rng.normal(size=(length,) ).astype(np.floataa )
SCREAMING_SNAKE_CASE_: Tuple =a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : List[Any] ) -> str:
'''simple docstring'''
return self.length
def __getitem__( self : Union[str, Any] , lowerCAmelCase : Any ) -> List[str]:
'''simple docstring'''
return {"x": self.x[i], "y": self.y[i]}
class a ( torch.nn.Module ):
def __init__( self : Optional[int] , lowerCAmelCase : str=0 , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : Optional[int]=False ) -> Tuple:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_: int =torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE_: int =torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE_: Dict =True
def lowerCamelCase__ ( self : str , lowerCAmelCase : Tuple=None ) -> int:
'''simple docstring'''
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
SCREAMING_SNAKE_CASE_: Union[str, Any] =False
return x * self.a[0] + self.b[0]
class a ( torch.nn.Module ):
def __init__( self : Union[str, Any] , lowerCAmelCase : Any=0 , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : List[Any]=False ) -> str:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_: List[str] =torch.nn.Parameter(torch.tensor(lowerCAmelCase ).float() )
SCREAMING_SNAKE_CASE_: Optional[int] =torch.nn.Parameter(torch.tensor(lowerCAmelCase ).float() )
SCREAMING_SNAKE_CASE_: List[Any] =True
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : int=None ) -> Any:
'''simple docstring'''
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
SCREAMING_SNAKE_CASE_: Optional[int] =False
return x * self.a + self.b
def __magic_name__ ( lowercase , lowercase = 16 ):
from datasets import load_dataset
from transformers import AutoTokenizer
SCREAMING_SNAKE_CASE_: Optional[Any] =AutoTokenizer.from_pretrained("""bert-base-cased""" )
SCREAMING_SNAKE_CASE_: Optional[int] ={"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""}
SCREAMING_SNAKE_CASE_: Any =load_dataset("""csv""" , data_files=lowercase )
SCREAMING_SNAKE_CASE_: Any =datasets["""train"""].unique("""label""" )
SCREAMING_SNAKE_CASE_: List[Any] ={v: i for i, v in enumerate(lowercase )}
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_: Dict =tokenizer(
examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase , max_length=lowercase , padding="""max_length""" )
if "label" in examples:
SCREAMING_SNAKE_CASE_: Optional[int] =[label_to_id[l] for l in examples["""label"""]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE_: List[Any] =datasets.map(
lowercase , batched=lowercase , remove_columns=["""sentence1""", """sentence2""", """label"""] , )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(lowercase , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_: Optional[int] =DataLoader(tokenized_datasets["""train"""] , shuffle=lowercase , collate_fn=lowercase , batch_size=2 )
SCREAMING_SNAKE_CASE_: Dict =DataLoader(tokenized_datasets["""validation"""] , shuffle=lowercase , collate_fn=lowercase , batch_size=1 )
return train_dataloader, eval_dataloader
| 36
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
_UpperCAmelCase = logging.get_logger(__name__)
def __magic_name__ ( lowercase ):
if isinstance(lowercase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowercase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowercase ):
return [[videos]]
raise ValueError(f'''Could not make batched video from {videos}''' )
class a ( _a ):
UpperCamelCase : Tuple = ["""pixel_values"""]
def __init__( self : int , lowerCAmelCase : str = True , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : Dict = PILImageResampling.BILINEAR , lowerCAmelCase : List[Any] = True , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : Tuple = True , lowerCAmelCase : List[str] = 1 / 255 , lowerCAmelCase : Optional[Any] = True , lowerCAmelCase : int = True , lowerCAmelCase : List[Any] = None , lowerCAmelCase : int = None , **lowerCAmelCase : Optional[int] , ) -> None:
'''simple docstring'''
super().__init__(**snake_case_ )
SCREAMING_SNAKE_CASE_: Dict =size if size is not None else {"""shortest_edge""": 256}
SCREAMING_SNAKE_CASE_: Tuple =get_size_dict(snake_case_ , default_to_square=snake_case_ )
SCREAMING_SNAKE_CASE_: List[Any] =crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
SCREAMING_SNAKE_CASE_: str =get_size_dict(snake_case_ , param_name="""crop_size""" )
SCREAMING_SNAKE_CASE_: Optional[Any] =do_resize
SCREAMING_SNAKE_CASE_: Dict =size
SCREAMING_SNAKE_CASE_: Optional[int] =do_center_crop
SCREAMING_SNAKE_CASE_: str =crop_size
SCREAMING_SNAKE_CASE_: Any =resample
SCREAMING_SNAKE_CASE_: Optional[Any] =do_rescale
SCREAMING_SNAKE_CASE_: str =rescale_factor
SCREAMING_SNAKE_CASE_: Any =offset
SCREAMING_SNAKE_CASE_: Optional[int] =do_normalize
SCREAMING_SNAKE_CASE_: Optional[int] =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE_: Union[str, Any] =image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase__ ( self : int , lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple = PILImageResampling.BILINEAR , lowerCAmelCase : int = None , **lowerCAmelCase : Dict , ) -> np.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =get_size_dict(snake_case_ , default_to_square=snake_case_ )
if "shortest_edge" in size:
SCREAMING_SNAKE_CASE_: Optional[Any] =get_resize_output_image_size(snake_case_ , size["""shortest_edge"""] , default_to_square=snake_case_ )
elif "height" in size and "width" in size:
SCREAMING_SNAKE_CASE_: Any =(size["""height"""], size["""width"""])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(snake_case_ , size=snake_case_ , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowerCamelCase__ ( self : int , lowerCAmelCase : List[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int = None , **lowerCAmelCase : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =get_size_dict(snake_case_ )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(snake_case_ , size=(size["""height"""], size["""width"""]) , data_format=snake_case_ , **snake_case_ )
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : Any = True , lowerCAmelCase : str = None , **lowerCAmelCase : List[Any] , ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =image.astype(np.floataa )
if offset:
SCREAMING_SNAKE_CASE_: Dict =image - (scale / 2)
return rescale(snake_case_ , scale=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] = None , **lowerCAmelCase : Any , ) -> np.ndarray:
'''simple docstring'''
return normalize(snake_case_ , mean=snake_case_ , std=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : Dict , lowerCAmelCase : Tuple = None , lowerCAmelCase : str = None , lowerCAmelCase : Optional[Any] = None , lowerCAmelCase : Tuple = None , lowerCAmelCase : Any = None , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : List[str] = None , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : Tuple = None , lowerCAmelCase : List[Any] = None , lowerCAmelCase : List[Any] = None , lowerCAmelCase : int = ChannelDimension.FIRST , ) -> np.ndarray:
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
if offset and not do_rescale:
raise ValueError("""For offset, do_rescale must also be set to True.""" )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_: List[Any] =to_numpy_array(snake_case_ )
if do_resize:
SCREAMING_SNAKE_CASE_: Optional[int] =self.resize(image=snake_case_ , size=snake_case_ , resample=snake_case_ )
if do_center_crop:
SCREAMING_SNAKE_CASE_: Dict =self.center_crop(snake_case_ , size=snake_case_ )
if do_rescale:
SCREAMING_SNAKE_CASE_: Optional[Any] =self.rescale(image=snake_case_ , scale=snake_case_ , offset=snake_case_ )
if do_normalize:
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.normalize(image=snake_case_ , mean=snake_case_ , std=snake_case_ )
SCREAMING_SNAKE_CASE_: str =to_channel_dimension_format(snake_case_ , snake_case_ )
return image
def lowerCamelCase__ ( self : int , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any = None , lowerCAmelCase : str = None , lowerCAmelCase : Any = None , lowerCAmelCase : str = None , lowerCAmelCase : Any = None , lowerCAmelCase : Optional[Any] = None , lowerCAmelCase : List[str] = None , lowerCAmelCase : Dict = None , lowerCAmelCase : Optional[Any] = None , lowerCAmelCase : List[str] = None , lowerCAmelCase : Any = None , lowerCAmelCase : str = None , lowerCAmelCase : Tuple = ChannelDimension.FIRST , **lowerCAmelCase : List[str] , ) -> PIL.Image.Image:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_: List[Any] =resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_: int =do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE_: List[str] =do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_: Tuple =rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_: Optional[int] =offset if offset is not None else self.offset
SCREAMING_SNAKE_CASE_: Optional[Any] =do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_: Union[str, Any] =image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_: Optional[Any] =image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_: Optional[Any] =size if size is not None else self.size
SCREAMING_SNAKE_CASE_: List[Any] =get_size_dict(snake_case_ , default_to_square=snake_case_ )
SCREAMING_SNAKE_CASE_: Any =crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE_: int =get_size_dict(snake_case_ , param_name="""crop_size""" )
if not valid_images(snake_case_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
SCREAMING_SNAKE_CASE_: List[Any] =make_batched(snake_case_ )
SCREAMING_SNAKE_CASE_: Optional[Any] =[
[
self._preprocess_image(
image=snake_case_ , do_resize=snake_case_ , size=snake_case_ , resample=snake_case_ , do_center_crop=snake_case_ , crop_size=snake_case_ , do_rescale=snake_case_ , rescale_factor=snake_case_ , offset=snake_case_ , do_normalize=snake_case_ , image_mean=snake_case_ , image_std=snake_case_ , data_format=snake_case_ , )
for img in video
]
for video in videos
]
SCREAMING_SNAKE_CASE_: Dict ={"""pixel_values""": videos}
return BatchFeature(data=snake_case_ , tensor_type=snake_case_ )
| 720
|
"""simple docstring"""
def __magic_name__ ( lowercase ):
if upper_limit < 0:
raise ValueError("""Limit for the Catalan sequence must be ≥ 0""" )
SCREAMING_SNAKE_CASE_: Tuple =[0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
SCREAMING_SNAKE_CASE_: Any =1
if upper_limit > 0:
SCREAMING_SNAKE_CASE_: List[str] =1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(lowercase ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""")
print("""\n*** Enter -1 at any time to quit ***""")
print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""")
try:
while True:
_UpperCAmelCase = int(input().strip())
if N < 0:
print("""\n********* Goodbye!! ************""")
break
else:
print(f"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print("""Try another upper limit for the sequence: """, end="""""")
except (NameError, ValueError):
print("""\n********* Invalid input, goodbye! ************\n""")
import doctest
doctest.testmod()
| 36
| 0
|
"""simple docstring"""
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase ):
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase , lowercase=True ):
model.train()
SCREAMING_SNAKE_CASE_: str =model(__UpperCamelCase )
SCREAMING_SNAKE_CASE_: Tuple =F.mse_loss(__UpperCamelCase , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(__UpperCamelCase )
def __magic_name__ ( lowercase , lowercase=False ):
set_seed(42 )
SCREAMING_SNAKE_CASE_: int =RegressionModel()
SCREAMING_SNAKE_CASE_: Dict =deepcopy(__UpperCamelCase )
SCREAMING_SNAKE_CASE_: List[str] =RegressionDataset(length=80 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =DataLoader(__UpperCamelCase , batch_size=16 )
model.to(accelerator.device )
if sched:
SCREAMING_SNAKE_CASE_: Any =AdamW(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_: str =AdamW(params=ddp_model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE_: Tuple =LambdaLR(__UpperCamelCase , lr_lambda=lambda lowercase : epoch**0.65 )
SCREAMING_SNAKE_CASE_: Optional[Any] =LambdaLR(__UpperCamelCase , lr_lambda=lambda lowercase : epoch**0.65 )
# Make a copy of `model`
if sched:
SCREAMING_SNAKE_CASE_: List[Any] =accelerator.prepare(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
SCREAMING_SNAKE_CASE_: str =accelerator.prepare(__UpperCamelCase , __UpperCamelCase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: int =get_training_setup(__UpperCamelCase )
# Use a single batch
SCREAMING_SNAKE_CASE_: List[Any] =next(iter(__UpperCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE_: List[Any] =accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE_: Dict =input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__UpperCamelCase ):
step_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
# Sync grads
step_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
SCREAMING_SNAKE_CASE_: Union[str, Any] =ddp_input[torch.randperm(len(__UpperCamelCase ) )]
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =get_training_setup(__UpperCamelCase )
# Use a single batch
SCREAMING_SNAKE_CASE_: str =next(iter(__UpperCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE_: Optional[Any] =accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE_: Optional[Any] =input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__UpperCamelCase ):
step_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
# Sync grads
step_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
SCREAMING_SNAKE_CASE_: Union[str, Any] =ddp_input[torch.randperm(len(__UpperCamelCase ) )]
def __magic_name__ ( lowercase=False , lowercase=False ):
SCREAMING_SNAKE_CASE_: List[Any] =Accelerator(
split_batches=__UpperCamelCase , dispatch_batches=__UpperCamelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
SCREAMING_SNAKE_CASE_: List[str] =get_training_setup(__UpperCamelCase )
for iteration, batch in enumerate(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_: int =batch.values()
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE_: Any =accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE_: int =input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(__UpperCamelCase ):
step_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(__UpperCamelCase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
SCREAMING_SNAKE_CASE_: str =ddp_input[torch.randperm(len(__UpperCamelCase ) )]
GradientState._reset_state()
def __magic_name__ ( lowercase=False , lowercase=False ):
SCREAMING_SNAKE_CASE_: Dict =Accelerator(
split_batches=__UpperCamelCase , dispatch_batches=__UpperCamelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
SCREAMING_SNAKE_CASE_: List[str] =get_training_setup(__UpperCamelCase , __UpperCamelCase )
for iteration, batch in enumerate(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_: List[str] =batch.values()
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE_: str =accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE_: Any =input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(__UpperCamelCase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(__UpperCamelCase ):
step_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'''
SCREAMING_SNAKE_CASE_: Tuple =(((iteration + 1) % 2) == 0) or ((iteration + 1) == len(__UpperCamelCase ))
if accelerator.num_processes > 1:
check_model_parameters(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: int =Accelerator()
SCREAMING_SNAKE_CASE_: Tuple =RegressionDataset(length=80 )
SCREAMING_SNAKE_CASE_: Optional[int] =DataLoader(__UpperCamelCase , batch_size=16 )
SCREAMING_SNAKE_CASE_: Any =RegressionDataset(length=96 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =DataLoader(__UpperCamelCase , batch_size=16 )
SCREAMING_SNAKE_CASE_: Optional[Any] =accelerator.prepare(__UpperCamelCase , __UpperCamelCase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(__UpperCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__UpperCamelCase )
if iteration < len(__UpperCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(__UpperCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__UpperCamelCase )
if batch_num < len(__UpperCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Any =Accelerator()
SCREAMING_SNAKE_CASE_: Optional[int] =accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(__UpperCamelCase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(__UpperCamelCase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """ , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(__UpperCamelCase , __UpperCamelCase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""" , """2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(__UpperCamelCase , __UpperCamelCase )
def __magic_name__ ( lowercase ):
main()
if __name__ == "__main__":
main()
| 721
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
_UpperCAmelCase = {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""",
}
class a ( UpperCAmelCase__ ):
UpperCamelCase : Any = 'albert'
def __init__( self : Dict , lowerCAmelCase : List[str]=3_0000 , lowerCAmelCase : List[Any]=128 , lowerCAmelCase : List[str]=4096 , lowerCAmelCase : str=12 , lowerCAmelCase : str=1 , lowerCAmelCase : Tuple=64 , lowerCAmelCase : Dict=1_6384 , lowerCAmelCase : int=1 , lowerCAmelCase : str="gelu_new" , lowerCAmelCase : Dict=0 , lowerCAmelCase : Optional[Any]=0 , lowerCAmelCase : str=512 , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : List[Any]=0.0_2 , lowerCAmelCase : Union[str, Any]=1E-12 , lowerCAmelCase : Tuple=0.1 , lowerCAmelCase : List[Any]="absolute" , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : int=2 , lowerCAmelCase : Optional[int]=3 , **lowerCAmelCase : int , ) -> Tuple:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =vocab_size
SCREAMING_SNAKE_CASE_: Optional[int] =embedding_size
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_size
SCREAMING_SNAKE_CASE_: Tuple =num_hidden_layers
SCREAMING_SNAKE_CASE_: Any =num_hidden_groups
SCREAMING_SNAKE_CASE_: List[Any] =num_attention_heads
SCREAMING_SNAKE_CASE_: List[Any] =inner_group_num
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_act
SCREAMING_SNAKE_CASE_: int =intermediate_size
SCREAMING_SNAKE_CASE_: Any =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Union[str, Any] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: int =max_position_embeddings
SCREAMING_SNAKE_CASE_: Any =type_vocab_size
SCREAMING_SNAKE_CASE_: int =initializer_range
SCREAMING_SNAKE_CASE_: List[Any] =layer_norm_eps
SCREAMING_SNAKE_CASE_: Dict =classifier_dropout_prob
SCREAMING_SNAKE_CASE_: int =position_embedding_type
class a ( UpperCAmelCase__ ):
@property
def lowerCamelCase__ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_: str ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
SCREAMING_SNAKE_CASE_: Dict ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 36
| 0
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __magic_name__ ( lowercase , lowercase=10 ):
SCREAMING_SNAKE_CASE_: List[Any] =[]
for _ in range(snake_case__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __magic_name__ ( lowercase , lowercase=10 ):
SCREAMING_SNAKE_CASE_: Any =[]
for step in range(snake_case__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_: Union[str, Any] =os.path.join(snake_case__ , """schedule.bin""" )
torch.save(scheduler.state_dict() , snake_case__ )
SCREAMING_SNAKE_CASE_: int =torch.load(snake_case__ )
scheduler.load_state_dict(snake_case__ )
return lrs
@require_torch
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : str , lowerCAmelCase : Tuple , lowerCAmelCase : List[Any] ) -> List[str]:
'''simple docstring'''
self.assertEqual(len(A_ ) , len(A_ ) )
for a, b in zip(A_ , A_ ):
self.assertAlmostEqual(A_ , A_ , delta=A_ )
def lowerCamelCase__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =torch.tensor([0.1, -0.2, -0.1] , requires_grad=A_ )
SCREAMING_SNAKE_CASE_: int =torch.tensor([0.4, 0.2, -0.5] )
SCREAMING_SNAKE_CASE_: Dict =nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
SCREAMING_SNAKE_CASE_: List[str] =AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(100 ):
SCREAMING_SNAKE_CASE_: Optional[int] =criterion(A_ , A_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def lowerCamelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =torch.tensor([0.1, -0.2, -0.1] , requires_grad=A_ )
SCREAMING_SNAKE_CASE_: Tuple =torch.tensor([0.4, 0.2, -0.5] )
SCREAMING_SNAKE_CASE_: List[str] =nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
SCREAMING_SNAKE_CASE_: Optional[int] =Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=A_ , weight_decay=0.0 , relative_step=A_ , scale_parameter=A_ , warmup_init=A_ , )
for _ in range(1000 ):
SCREAMING_SNAKE_CASE_: Dict =criterion(A_ , A_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class a ( unittest.TestCase ):
UpperCamelCase : Dict = nn.Linear(5_0 , 5_0 ) if is_torch_available() else None
UpperCamelCase : Dict = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
UpperCamelCase : Optional[int] = 1_0
def lowerCamelCase__ ( self : Any , lowerCAmelCase : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Dict , lowerCAmelCase : List[Any]=None ) -> int:
'''simple docstring'''
self.assertEqual(len(A_ ) , len(A_ ) )
for a, b in zip(A_ , A_ ):
self.assertAlmostEqual(A_ , A_ , delta=A_ , msg=A_ )
def lowerCamelCase__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] ={"""num_warmup_steps""": 2, """num_training_steps""": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
SCREAMING_SNAKE_CASE_: int ={
get_constant_schedule: ({}, [1_0.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"""num_warmup_steps""": 4},
[0.0, 2.5, 5.0, 7.5, 1_0.0, 1_0.0, 1_0.0, 1_0.0, 1_0.0, 1_0.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 1_0.0, 8.7_5, 7.5, 6.2_5, 5.0, 3.7_5, 2.5, 1.2_5],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 1_0.0, 9.6_1, 8.5_3, 6.9_1, 5.0, 3.0_8, 1.4_6, 0.3_8],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, """num_cycles""": 2},
[0.0, 5.0, 1_0.0, 8.5_3, 5.0, 1.4_6, 1_0.0, 8.5_3, 5.0, 1.4_6],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, """power""": 2.0, """lr_end""": 1E-7},
[0.0, 5.0, 1_0.0, 7.6_5_6, 5.6_2_5, 3.9_0_6, 2.5, 1.4_0_6, 0.6_2_5, 0.1_5_6],
),
get_inverse_sqrt_schedule: (
{"""num_warmup_steps""": 2},
[0.0, 5.0, 1_0.0, 8.1_6_5, 7.0_7_1, 6.3_2_5, 5.7_7_4, 5.3_4_5, 5.0, 4.7_1_4],
),
}
for scheduler_func, data in scheds.items():
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict =data
SCREAMING_SNAKE_CASE_: int =scheduler_func(self.optimizer , **A_ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
SCREAMING_SNAKE_CASE_: Tuple =unwrap_schedule(A_ , self.num_steps )
self.assertListAlmostEqual(
A_ , A_ , tol=1E-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , )
SCREAMING_SNAKE_CASE_: Dict =scheduler_func(self.optimizer , **A_ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(A_ ) # wrap to test picklability of the schedule
SCREAMING_SNAKE_CASE_: List[str] =unwrap_and_save_reload_schedule(A_ , self.num_steps )
self.assertListEqual(A_ , A_ , msg=f'''failed for {scheduler_func} in save and reload''' )
class a :
def __init__( self : Tuple , lowerCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =fn
def __call__( self : Tuple , *lowerCAmelCase : List[Any] , **lowerCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
return self.fn(*A_ , **A_ )
@classmethod
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : Optional[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =list(map(self , scheduler.lr_lambdas ) )
| 700
|
"""simple docstring"""
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class a ( yaml.SafeLoader ):
def lowerCamelCase__ ( self : int , lowerCAmelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =[self.constructed_objects[key_node] for key_node, _ in node.value]
SCREAMING_SNAKE_CASE_: Any =[tuple(lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else key for key in keys]
SCREAMING_SNAKE_CASE_: Dict =Counter(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =[key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f'''Got duplicate yaml keys: {duplicate_keys}''' )
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : str , lowerCAmelCase : Optional[int]=False ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =super().construct_mapping(lowerCAmelCase , deep=lowerCAmelCase )
self._check_no_duplicates_on_constructed_node(lowerCAmelCase )
return mapping
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Dict =list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
SCREAMING_SNAKE_CASE_: Union[str, Any] =full_content[1:].index("""---""" ) + 1
SCREAMING_SNAKE_CASE_: List[str] ="""\n""".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(lowercase )
class a ( UpperCAmelCase__ ):
# class attributes
UpperCamelCase : Tuple = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def lowerCamelCase__ ( cls : List[Any] , lowerCAmelCase : Path ) -> "DatasetMetadata":
'''simple docstring'''
with open(lowerCAmelCase , encoding="""utf-8""" ) as readme_file:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] =_split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(lowerCAmelCase )
else:
return cls()
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Path ) -> List[str]:
'''simple docstring'''
if path.exists():
with open(lowerCAmelCase , encoding="""utf-8""" ) as readme_file:
SCREAMING_SNAKE_CASE_: str =readme_file.read()
else:
SCREAMING_SNAKE_CASE_: str =None
SCREAMING_SNAKE_CASE_: Tuple =self._to_readme(lowerCAmelCase )
with open(lowerCAmelCase , """w""" , encoding="""utf-8""" ) as readme_file:
readme_file.write(lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : Optional[str] = None ) -> str:
'''simple docstring'''
if readme_content is not None:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] =_split_yaml_from_readme(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] ="""---\n""" + self.to_yaml_string() + """---\n""" + content
else:
SCREAMING_SNAKE_CASE_: List[Any] ="""---\n""" + self.to_yaml_string() + """---\n"""
return full_content
@classmethod
def lowerCamelCase__ ( cls : Optional[int] , lowerCAmelCase : str ) -> "DatasetMetadata":
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =yaml.load(lowerCAmelCase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
SCREAMING_SNAKE_CASE_: List[Any] ={
(key.replace("""-""" , """_""" ) if key.replace("""-""" , """_""" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**lowerCAmelCase )
def lowerCamelCase__ ( self : Dict ) -> str:
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace("""_""" , """-""" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=lowerCAmelCase , allow_unicode=lowerCAmelCase , encoding="""utf-8""" , ).decode("""utf-8""" )
_UpperCAmelCase = {
"""image-classification""": [],
"""translation""": [],
"""image-segmentation""": [],
"""fill-mask""": [],
"""automatic-speech-recognition""": [],
"""token-classification""": [],
"""sentence-similarity""": [],
"""audio-classification""": [],
"""question-answering""": [],
"""summarization""": [],
"""zero-shot-classification""": [],
"""table-to-text""": [],
"""feature-extraction""": [],
"""other""": [],
"""multiple-choice""": [],
"""text-classification""": [],
"""text-to-image""": [],
"""text2text-generation""": [],
"""zero-shot-image-classification""": [],
"""tabular-classification""": [],
"""tabular-regression""": [],
"""image-to-image""": [],
"""tabular-to-text""": [],
"""unconditional-image-generation""": [],
"""text-retrieval""": [],
"""text-to-speech""": [],
"""object-detection""": [],
"""audio-to-audio""": [],
"""text-generation""": [],
"""conversational""": [],
"""table-question-answering""": [],
"""visual-question-answering""": [],
"""image-to-text""": [],
"""reinforcement-learning""": [],
"""voice-activity-detection""": [],
"""time-series-forecasting""": [],
"""document-question-answering""": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
_UpperCAmelCase = ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""")
ap.add_argument("""readme_filepath""")
_UpperCAmelCase = ap.parse_args()
_UpperCAmelCase = Path(args.readme_filepath)
_UpperCAmelCase = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 36
| 0
|
"""simple docstring"""
import math
from datetime import datetime, timedelta
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[str] =year % 19
SCREAMING_SNAKE_CASE_: List[Any] =year % 4
SCREAMING_SNAKE_CASE_: List[str] =year % 7
SCREAMING_SNAKE_CASE_: Dict =math.floor(year / 100 )
SCREAMING_SNAKE_CASE_: str =math.floor((13 + 8 * leap_day_inhibits) / 25 )
SCREAMING_SNAKE_CASE_: List[Any] =leap_day_inhibits / 4
SCREAMING_SNAKE_CASE_: Optional[Any] =(
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
SCREAMING_SNAKE_CASE_: Tuple =(4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
SCREAMING_SNAKE_CASE_: Dict =(19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
SCREAMING_SNAKE_CASE_: Optional[Any] =(
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(__SCREAMING_SNAKE_CASE , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(__SCREAMING_SNAKE_CASE , 4 , 18 )
else:
return datetime(__SCREAMING_SNAKE_CASE , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
_UpperCAmelCase : Optional[int] = """will be""" if year > datetime.now().year else """was"""
print(f"""Easter in {year} {tense} {gauss_easter(year)}""")
| 701
|
"""simple docstring"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def __magic_name__ ( lowercase ):
return (data["data"], data["target"])
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Dict =XGBClassifier()
classifier.fit(lowercase , lowercase )
return classifier
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Optional[Any] =load_iris()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =data_handling(lowercase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =train_test_split(
lowercase , lowercase , test_size=0.25 )
SCREAMING_SNAKE_CASE_: Tuple =iris["""target_names"""]
# Create an XGBoost Classifier from the training data
SCREAMING_SNAKE_CASE_: Optional[int] =xgboost(lowercase , lowercase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
lowercase , lowercase , lowercase , display_labels=lowercase , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 36
| 0
|
"""simple docstring"""
_UpperCAmelCase = {
"""Pillow""": """Pillow<10.0.0""",
"""accelerate""": """accelerate>=0.20.3""",
"""av""": """av==9.2.0""",
"""beautifulsoup4""": """beautifulsoup4""",
"""black""": """black~=23.1""",
"""codecarbon""": """codecarbon==1.2.0""",
"""cookiecutter""": """cookiecutter==1.7.3""",
"""dataclasses""": """dataclasses""",
"""datasets""": """datasets!=2.5.0""",
"""decord""": """decord==0.6.0""",
"""deepspeed""": """deepspeed>=0.9.3""",
"""diffusers""": """diffusers""",
"""dill""": """dill<0.3.5""",
"""evaluate""": """evaluate>=0.2.0""",
"""fairscale""": """fairscale>0.3""",
"""faiss-cpu""": """faiss-cpu""",
"""fastapi""": """fastapi""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1,<=0.7.0""",
"""ftfy""": """ftfy""",
"""fugashi""": """fugashi>=1.0""",
"""GitPython""": """GitPython<3.1.19""",
"""hf-doc-builder""": """hf-doc-builder>=0.3.0""",
"""huggingface-hub""": """huggingface-hub>=0.14.1,<1.0""",
"""importlib_metadata""": """importlib_metadata""",
"""ipadic""": """ipadic>=1.0.0,<2.0""",
"""isort""": """isort>=5.5.4""",
"""jax""": """jax>=0.2.8,!=0.3.2,<=0.4.13""",
"""jaxlib""": """jaxlib>=0.1.65,<=0.4.13""",
"""jieba""": """jieba""",
"""kenlm""": """kenlm""",
"""keras-nlp""": """keras-nlp>=0.3.1""",
"""librosa""": """librosa""",
"""nltk""": """nltk""",
"""natten""": """natten>=0.14.6""",
"""numpy""": """numpy>=1.17""",
"""onnxconverter-common""": """onnxconverter-common""",
"""onnxruntime-tools""": """onnxruntime-tools>=1.4.2""",
"""onnxruntime""": """onnxruntime>=1.4.0""",
"""opencv-python""": """opencv-python""",
"""optuna""": """optuna""",
"""optax""": """optax>=0.0.8,<=0.1.4""",
"""packaging""": """packaging>=20.0""",
"""parameterized""": """parameterized""",
"""phonemizer""": """phonemizer""",
"""protobuf""": """protobuf""",
"""psutil""": """psutil""",
"""pyyaml""": """pyyaml>=5.1""",
"""pydantic""": """pydantic<2""",
"""pytest""": """pytest>=7.2.0""",
"""pytest-timeout""": """pytest-timeout""",
"""pytest-xdist""": """pytest-xdist""",
"""python""": """python>=3.8.0""",
"""ray[tune]""": """ray[tune]""",
"""regex""": """regex!=2019.12.17""",
"""requests""": """requests""",
"""rhoknp""": """rhoknp>=1.1.0,<1.3.1""",
"""rjieba""": """rjieba""",
"""rouge-score""": """rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1""",
"""ruff""": """ruff>=0.0.241,<=0.0.259""",
"""sacrebleu""": """sacrebleu>=1.4.12,<2.0.0""",
"""sacremoses""": """sacremoses""",
"""safetensors""": """safetensors>=0.3.1""",
"""sagemaker""": """sagemaker>=2.31.0""",
"""scikit-learn""": """scikit-learn""",
"""sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""",
"""sigopt""": """sigopt""",
"""starlette""": """starlette""",
"""sudachipy""": """sudachipy>=0.6.6""",
"""sudachidict_core""": """sudachidict_core>=20220729""",
"""tensorflow-cpu""": """tensorflow-cpu>=2.6,<2.14""",
"""tensorflow""": """tensorflow>=2.6,<2.14""",
"""tensorflow-text""": """tensorflow-text<2.14""",
"""tf2onnx""": """tf2onnx""",
"""timeout-decorator""": """timeout-decorator""",
"""timm""": """timm""",
"""tokenizers""": """tokenizers>=0.11.1,!=0.11.3,<0.14""",
"""torch""": """torch>=1.9,!=1.12.0""",
"""torchaudio""": """torchaudio""",
"""torchvision""": """torchvision""",
"""pyctcdecode""": """pyctcdecode>=0.4.0""",
"""tqdm""": """tqdm>=4.27""",
"""unidic""": """unidic>=1.0.2""",
"""unidic_lite""": """unidic_lite>=1.0.7""",
"""urllib3""": """urllib3<2.0.0""",
"""uvicorn""": """uvicorn""",
}
| 702
|
"""simple docstring"""
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =[]
SCREAMING_SNAKE_CASE_: List[str] =[]
SCREAMING_SNAKE_CASE_: Any =[]
for rt in rc.restypes:
SCREAMING_SNAKE_CASE_: Optional[int] =rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
SCREAMING_SNAKE_CASE_: Any ={name: i for i, name in enumerate(lowercase )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =torch.tensor(
lowercase , dtype=torch.intaa , device=protein["""aatype"""].device , )
SCREAMING_SNAKE_CASE_: Optional[Any] =torch.tensor(
lowercase , dtype=torch.intaa , device=protein["""aatype"""].device , )
SCREAMING_SNAKE_CASE_: List[str] =torch.tensor(
lowercase , dtype=torch.floataa , device=protein["""aatype"""].device , )
SCREAMING_SNAKE_CASE_: Optional[Any] =protein["""aatype"""].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
SCREAMING_SNAKE_CASE_: List[str] =restype_atomaa_to_atomaa[protein_aatype]
SCREAMING_SNAKE_CASE_: Any =restype_atomaa_mask[protein_aatype]
SCREAMING_SNAKE_CASE_: Tuple =residx_atomaa_mask
SCREAMING_SNAKE_CASE_: Dict =residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
SCREAMING_SNAKE_CASE_: Dict =restype_atomaa_to_atomaa[protein_aatype]
SCREAMING_SNAKE_CASE_: Optional[int] =residx_atomaa_to_atomaa.long()
# create the corresponding mask
SCREAMING_SNAKE_CASE_: Optional[int] =torch.zeros([21, 37] , dtype=torch.floataa , device=protein["""aatype"""].device )
for restype, restype_letter in enumerate(rc.restypes ):
SCREAMING_SNAKE_CASE_: int =rc.restype_atoa[restype_letter]
SCREAMING_SNAKE_CASE_: Any =rc.residue_atoms[restype_name]
for atom_name in atom_names:
SCREAMING_SNAKE_CASE_: Optional[int] =rc.atom_order[atom_name]
SCREAMING_SNAKE_CASE_: Dict =1
SCREAMING_SNAKE_CASE_: List[str] =restype_atomaa_mask[protein_aatype]
SCREAMING_SNAKE_CASE_: List[Any] =residx_atomaa_mask
return protein
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any =tree_map(lambda lowercase : torch.tensor(lowercase , device=batch["""aatype"""].device ) , lowercase , np.ndarray )
SCREAMING_SNAKE_CASE_: int =tensor_tree_map(lambda lowercase : np.array(lowercase ) , make_atomaa_masks(lowercase ) )
return out
| 36
| 0
|
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class a ( UpperCamelCase_ ):
@slow
@require_torch
def lowerCamelCase__ ( self : Any ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
SCREAMING_SNAKE_CASE_: Optional[Any] =BertTokenizer.from_pretrained("""bert-base-uncased""" )
SCREAMING_SNAKE_CASE_: Any =bertabert.config.encoder.vocab_size
SCREAMING_SNAKE_CASE_: List[str] =tokenizer.sep_token_id
SCREAMING_SNAKE_CASE_: Any =tokenizer.cls_token_id
SCREAMING_SNAKE_CASE_: Tuple =128
SCREAMING_SNAKE_CASE_: str =datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
SCREAMING_SNAKE_CASE_: List[str] =datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
SCREAMING_SNAKE_CASE_: Optional[int] =train_dataset.select(range(32 ) )
SCREAMING_SNAKE_CASE_: Tuple =val_dataset.select(range(16 ) )
SCREAMING_SNAKE_CASE_: List[str] =4
def _map_to_encoder_decoder_inputs(lowerCAmelCase : Optional[int] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
SCREAMING_SNAKE_CASE_: Dict =tokenizer(batch["""article"""] , padding="""max_length""" , truncation=__A , max_length=512 )
SCREAMING_SNAKE_CASE_: List[str] =tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=__A , max_length=128 )
SCREAMING_SNAKE_CASE_: str =inputs.input_ids
SCREAMING_SNAKE_CASE_: int =inputs.attention_mask
SCREAMING_SNAKE_CASE_: Tuple =outputs.input_ids
SCREAMING_SNAKE_CASE_: Dict =outputs.input_ids.copy()
SCREAMING_SNAKE_CASE_: Optional[Any] =[
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"]
]
SCREAMING_SNAKE_CASE_: List[Any] =outputs.attention_mask
assert all(len(__A ) == 512 for x in inputs.input_ids )
assert all(len(__A ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(lowerCAmelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE_: Dict =pred.label_ids
SCREAMING_SNAKE_CASE_: Any =pred.predictions
# all unnecessary tokens are removed
SCREAMING_SNAKE_CASE_: str =tokenizer.batch_decode(__A , skip_special_tokens=__A )
SCREAMING_SNAKE_CASE_: Any =tokenizer.batch_decode(__A , skip_special_tokens=__A )
SCREAMING_SNAKE_CASE_: str =sum([int(pred_str[i] == label_str[i] ) for i in range(len(__A ) )] ) / len(__A )
return {"accuracy": accuracy}
# map train dataset
SCREAMING_SNAKE_CASE_: Tuple =train_dataset.map(
_map_to_encoder_decoder_inputs , batched=__A , batch_size=__A , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
SCREAMING_SNAKE_CASE_: int =val_dataset.map(
_map_to_encoder_decoder_inputs , batched=__A , batch_size=__A , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
SCREAMING_SNAKE_CASE_: Dict =self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE_: List[str] =SeqaSeqTrainingArguments(
output_dir=__A , per_device_train_batch_size=__A , per_device_eval_batch_size=__A , predict_with_generate=__A , evaluation_strategy="""steps""" , do_train=__A , do_eval=__A , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
SCREAMING_SNAKE_CASE_: Dict =SeqaSeqTrainer(
model=__A , args=__A , compute_metrics=_compute_metrics , train_dataset=__A , eval_dataset=__A , tokenizer=__A , )
# start training
trainer.train()
| 703
|
"""simple docstring"""
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_UpperCAmelCase = ["""text""", """image""", """audio"""]
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: str =[]
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(lowercase , lowercase ):
inputs.append(create_inputs(lowercase ) )
else:
raise ValueError(f'''Invalid type requested: {input_type}''' )
return inputs
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: int =[]
for output in outputs:
if isinstance(lowercase , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(lowercase , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(lowercase , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(f'''Invalid output: {output}''' )
return output_types
@is_tool_test
class a :
def lowerCamelCase__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , """inputs""" ) )
self.assertTrue(hasattr(self.tool , """outputs""" ) )
SCREAMING_SNAKE_CASE_: Optional[int] =self.tool.inputs
for _input in inputs:
if isinstance(_input , lowerCAmelCase ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
SCREAMING_SNAKE_CASE_: Any =self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowerCamelCase__ ( self : str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_: List[Any] =self.tool(*lowerCAmelCase )
# There is a single output
if len(self.tool.outputs ) == 1:
SCREAMING_SNAKE_CASE_: str =[outputs]
self.assertListEqual(output_types(lowerCAmelCase ) , self.tool.outputs )
def lowerCamelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , """description""" ) )
self.assertTrue(hasattr(self.tool , """default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def lowerCamelCase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_: Tuple =self.tool(*lowerCAmelCase )
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: int =[outputs]
self.assertEqual(len(lowerCAmelCase ) , len(self.tool.outputs ) )
for output, output_type in zip(lowerCAmelCase , self.tool.outputs ):
SCREAMING_SNAKE_CASE_: int =AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(lowerCAmelCase , lowerCAmelCase ) )
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_: Union[str, Any] =[]
for _input, input_type in zip(lowerCAmelCase , self.tool.inputs ):
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
SCREAMING_SNAKE_CASE_: Dict =self.tool(*lowerCAmelCase )
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] =[outputs]
self.assertEqual(len(lowerCAmelCase ) , len(self.tool.outputs ) )
| 36
| 0
|
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"""kwargs, expected""" , [
({"""num_shards""": 0, """max_num_jobs""": 1}, []),
({"""num_shards""": 10, """max_num_jobs""": 1}, [range(10 )]),
({"""num_shards""": 10, """max_num_jobs""": 10}, [range(_lowerCamelCase , i + 1 ) for i in range(10 )]),
({"""num_shards""": 1, """max_num_jobs""": 10}, [range(1 )]),
({"""num_shards""": 10, """max_num_jobs""": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"""num_shards""": 3, """max_num_jobs""": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: str =_distribute_shards(**_lowerCamelCase )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, max_num_jobs, expected""" , [
({"""foo""": 0}, 10, [{"""foo""": 0}]),
({"""shards""": [0, 1, 2, 3]}, 1, [{"""shards""": [0, 1, 2, 3]}]),
({"""shards""": [0, 1, 2, 3]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}, {"""shards""": [2]}, {"""shards""": [3]}]),
({"""shards""": [0, 1]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}]),
({"""shards""": [0, 1, 2, 3]}, 2, [{"""shards""": [0, 1]}, {"""shards""": [2, 3]}]),
] , )
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: str =_split_gen_kwargs(_lowerCamelCase , _lowerCamelCase )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, expected""" , [
({"""foo""": 0}, 1),
({"""shards""": [0]}, 1),
({"""shards""": [0, 1, 2, 3]}, 4),
({"""shards""": [0, 1, 2, 3], """foo""": 0}, 4),
({"""shards""": [0, 1, 2, 3], """other""": (0, 1)}, 4),
({"""shards""": [0, 1, 2, 3], """shards2""": [0, 1]}, RuntimeError),
] , )
def __magic_name__ ( lowercase , lowercase ):
if expected is RuntimeError:
with pytest.raises(_lowerCamelCase ):
_number_of_shards_in_gen_kwargs(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE_: Dict =_number_of_shards_in_gen_kwargs(_lowerCamelCase )
assert out == expected
| 704
|
"""simple docstring"""
from __future__ import annotations
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =sorted(numsa + numsa )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =divmod(len(lowercase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase = [float(x) for x in input("""Enter the elements of first array: """).split()]
_UpperCAmelCase = [float(x) for x in input("""Enter the elements of second array: """).split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 36
| 0
|
"""simple docstring"""
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("""Googling.....""")
_UpperCAmelCase = """https://www.google.com/search?q=""" + """ """.join(sys.argv[1:])
_UpperCAmelCase = requests.get(url, headers={"""UserAgent""": UserAgent().random})
# res.raise_for_status()
with open("""project1a.html""", """wb""") as out_file: # only for knowing the class
for data in res.iter_content(1_0_0_0_0):
out_file.write(data)
_UpperCAmelCase = BeautifulSoup(res.text, """html.parser""")
_UpperCAmelCase = list(soup.select(""".eZt8xd"""))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("""href"""))
else:
webbrowser.open(f"""https://google.com{link.get('href')}""")
| 705
|
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a :
def __init__( self : Any , lowerCAmelCase : Any , lowerCAmelCase : List[str]=13 , lowerCAmelCase : Dict=3 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Dict=True , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : str=0.1 , lowerCAmelCase : List[str]=224 , lowerCAmelCase : List[str]=1000 , lowerCAmelCase : Optional[Any]=[3, 3, 6, 4] , lowerCAmelCase : int=[48, 56, 112, 220] , ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =parent
SCREAMING_SNAKE_CASE_: Any =batch_size
SCREAMING_SNAKE_CASE_: Tuple =num_channels
SCREAMING_SNAKE_CASE_: Union[str, Any] =is_training
SCREAMING_SNAKE_CASE_: Tuple =use_labels
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Tuple =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: List[Any] =num_labels
SCREAMING_SNAKE_CASE_: int =image_size
SCREAMING_SNAKE_CASE_: Optional[Any] =layer_depths
SCREAMING_SNAKE_CASE_: List[Any] =embed_dims
def lowerCamelCase__ ( self : List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_: List[Any] =None
if self.use_labels:
SCREAMING_SNAKE_CASE_: List[str] =ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE_: Tuple =self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowerCAmelCase , layer_scale_init_value=1E-5 , )
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =SwiftFormerModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Any =model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =self.num_labels
SCREAMING_SNAKE_CASE_: Dict =SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Dict =model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
SCREAMING_SNAKE_CASE_: int =SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Tuple =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_: Dict =model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)): str =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_: Tuple ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : Optional[int] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
UpperCamelCase : Tuple = (
{'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase : Any = False
UpperCamelCase : Optional[int] = False
UpperCamelCase : Optional[Any] = False
UpperCamelCase : Dict = False
UpperCamelCase : List[str] = False
def lowerCamelCase__ ( self : Dict ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =SwiftFormerModelTester(self )
SCREAMING_SNAKE_CASE_: Union[str, Any] =ConfigTester(
self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def lowerCamelCase__ ( self : Tuple ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def lowerCamelCase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Any =model_class(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase , nn.Linear ) )
def lowerCamelCase__ ( self : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: int =model_class(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_: Any =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_: Tuple =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def lowerCamelCase__ ( self : int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def lowerCamelCase__ ( self : Any ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@slow
def lowerCamelCase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_: Optional[Any] =SwiftFormerModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def lowerCamelCase__ ( self : Optional[int] ) -> str:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
def check_hidden_states_output(lowerCAmelCase : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : Optional[int] ):
SCREAMING_SNAKE_CASE_: Optional[Any] =model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_: List[str] =model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: Dict =outputs.hidden_states
SCREAMING_SNAKE_CASE_: List[Any] =8
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowerCAmelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Dict =True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_: Any =True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
def _config_zero_init(lowerCAmelCase : str ):
SCREAMING_SNAKE_CASE_: Dict =copy.deepcopy(lowerCAmelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowerCAmelCase , lowerCAmelCase , 1E-10 )
if isinstance(getattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: Tuple =_config_zero_init(getattr(lowerCAmelCase , lowerCAmelCase ) )
setattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return configs_no_init
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_: List[Any] =_config_zero_init(lowerCAmelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Any =model_class(config=lowerCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
pass
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: List[Any] =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self : str ) -> str:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =self.default_image_processor
SCREAMING_SNAKE_CASE_: int =prepare_img()
SCREAMING_SNAKE_CASE_: Union[str, Any] =image_processor(images=lowerCAmelCase , return_tensors="""pt""" ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Dict =model(**lowerCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE_: Optional[Any] =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =torch.tensor([[-2.1_703E00, 2.1_107E00, -2.0_811E00]] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
| 36
| 0
|
"""simple docstring"""
from __future__ import annotations
def __magic_name__ ( lowercase ):
return [ord(snake_case__ ) - 96 for elem in plain]
def __magic_name__ ( lowercase ):
return "".join(chr(elem + 96 ) for elem in encoded )
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Any =encode(input("""-> """ ).strip().lower() )
print("""Encoded: """ , snake_case__ )
print("""Decoded:""" , decode(snake_case__ ) )
if __name__ == "__main__":
main()
| 706
|
"""simple docstring"""
from math import pi
def __magic_name__ ( lowercase , lowercase ):
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(9_0, 1_0))
| 36
| 0
|
"""simple docstring"""
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class a ( unittest.TestCase ):
@parameterized.expand([(None,), ("""foo.json""",)] )
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =GenerationConfig(
do_sample=__lowerCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCAmelCase , config_name=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =GenerationConfig.from_pretrained(__lowerCAmelCase , config_name=__lowerCAmelCase )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , __lowerCAmelCase )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , __lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =AutoConfig.from_pretrained("""gpt2""" )
SCREAMING_SNAKE_CASE_: str =GenerationConfig.from_model_config(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def lowerCamelCase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =GenerationConfig()
SCREAMING_SNAKE_CASE_: str ={
"""max_new_tokens""": 1024,
"""foo""": """bar""",
}
SCREAMING_SNAKE_CASE_: Optional[Any] =copy.deepcopy(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =generation_config.update(**__lowerCAmelCase )
# update_kwargs was not modified (no side effects)
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(__lowerCAmelCase , {"""foo""": """bar"""} )
def lowerCamelCase__ ( self : Any ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =GenerationConfig()
SCREAMING_SNAKE_CASE_: str ="""bar"""
with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir:
generation_config.save_pretrained(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =GenerationConfig.from_pretrained(__lowerCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , """bar""" )
SCREAMING_SNAKE_CASE_: Union[str, Any] =GenerationConfig.from_model_config(__lowerCAmelCase )
assert not hasattr(__lowerCAmelCase , """foo""" ) # no new kwargs should be initialized if from config
def lowerCamelCase__ ( self : List[str] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , __lowerCAmelCase )
self.assertEqual(default_config.num_beams , 1 )
SCREAMING_SNAKE_CASE_: Optional[Any] =GenerationConfig(
do_sample=__lowerCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , __lowerCAmelCase )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =GenerationConfig.from_pretrained(__lowerCAmelCase , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , __lowerCAmelCase )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class a ( unittest.TestCase ):
@classmethod
def lowerCamelCase__ ( cls : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =TOKEN
HfFolder.save_token(__lowerCAmelCase )
@classmethod
def lowerCamelCase__ ( cls : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="""test-generation-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" )
except HTTPError:
pass
def lowerCamelCase__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =GenerationConfig(
do_sample=__lowerCAmelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""test-generation-config""" , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_: Union[str, Any] =GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-generation-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__lowerCAmelCase , repo_id="""test-generation-config""" , push_to_hub=__lowerCAmelCase , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_: Tuple =GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) )
def lowerCamelCase__ ( self : Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =GenerationConfig(
do_sample=__lowerCAmelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_: Dict =GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__lowerCAmelCase , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=__lowerCAmelCase , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_: Optional[int] =GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) )
| 707
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : Dict ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict =FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE_: Any =jax.device_count()
SCREAMING_SNAKE_CASE_: Dict =num_samples * [prompt]
SCREAMING_SNAKE_CASE_: List[Any] =sd_pipe.prepare_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =replicate(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =shard(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =jax.random.split(lowerCAmelCase , jax.device_count() )
SCREAMING_SNAKE_CASE_: Tuple =sd_pipe(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , num_inference_steps=25 , jit=lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
SCREAMING_SNAKE_CASE_: Dict =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE_: Dict =images[0, 253:256, 253:256, -1]
SCREAMING_SNAKE_CASE_: Optional[int] =jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE_: Optional[int] =jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int ="""stabilityai/stable-diffusion-2"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =FlaxDPMSolverMultistepScheduler.from_pretrained(lowerCAmelCase , subfolder="""scheduler""" )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =FlaxStableDiffusionPipeline.from_pretrained(
lowerCAmelCase , scheduler=lowerCAmelCase , revision="""bf16""" , dtype=jnp.bfloataa , )
SCREAMING_SNAKE_CASE_: Optional[int] =scheduler_params
SCREAMING_SNAKE_CASE_: Tuple ="""A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE_: Union[str, Any] =jax.device_count()
SCREAMING_SNAKE_CASE_: Optional[Any] =num_samples * [prompt]
SCREAMING_SNAKE_CASE_: List[Any] =sd_pipe.prepare_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =replicate(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =shard(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_: Any =jax.random.split(lowerCAmelCase , jax.device_count() )
SCREAMING_SNAKE_CASE_: Tuple =sd_pipe(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , num_inference_steps=25 , jit=lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
SCREAMING_SNAKE_CASE_: str =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE_: Any =images[0, 253:256, 253:256, -1]
SCREAMING_SNAKE_CASE_: Optional[Any] =jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE_: Optional[int] =jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 36
| 0
|
"""simple docstring"""
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
_UpperCAmelCase = logging.get_logger(__name__)
# General docstring
_UpperCAmelCase = "RegNetConfig"
# Base docstring
_UpperCAmelCase = "facebook/regnet-y-040"
_UpperCAmelCase = [1, 1_0_8_8, 7, 7]
# Image classification docstring
_UpperCAmelCase = "facebook/regnet-y-040"
_UpperCAmelCase = "tabby, tabby cat"
_UpperCAmelCase = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class a ( nn.Module ):
def __init__( self : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int = 3 , lowerCAmelCase : int = 1 , lowerCAmelCase : int = 1 , lowerCAmelCase : Optional[str] = "relu" , ) -> Optional[int]:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_: List[Any] =nn.Convad(
lowercase_ , lowercase_ , kernel_size=lowercase_ , stride=lowercase_ , padding=kernel_size // 2 , groups=lowercase_ , bias=lowercase_ , )
SCREAMING_SNAKE_CASE_: str =nn.BatchNormad(lowercase_ )
SCREAMING_SNAKE_CASE_: Optional[int] =ACTaFN[activation] if activation is not None else nn.Identity()
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : List[str] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =self.convolution(lowercase_ )
SCREAMING_SNAKE_CASE_: str =self.normalization(lowercase_ )
SCREAMING_SNAKE_CASE_: Optional[Any] =self.activation(lowercase_ )
return hidden_state
class a ( nn.Module ):
def __init__( self : List[Any] , lowerCAmelCase : RegNetConfig ) -> Optional[int]:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_: str =RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
SCREAMING_SNAKE_CASE_: Any =config.num_channels
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
SCREAMING_SNAKE_CASE_: Any =self.embedder(lowercase_ )
return hidden_state
class a ( nn.Module ):
def __init__( self : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int = 2 ) -> List[Any]:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_: Optional[Any] =nn.Convad(lowercase_ , lowercase_ , kernel_size=1 , stride=lowercase_ , bias=lowercase_ )
SCREAMING_SNAKE_CASE_: Union[str, Any] =nn.BatchNormad(lowercase_ )
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : Tensor ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =self.convolution(lowercase_ )
SCREAMING_SNAKE_CASE_: str =self.normalization(lowercase_ )
return hidden_state
class a ( nn.Module ):
def __init__( self : str , lowerCAmelCase : int , lowerCAmelCase : int ) -> Any:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_: int =nn.AdaptiveAvgPoolad((1, 1) )
SCREAMING_SNAKE_CASE_: int =nn.Sequential(
nn.Convad(lowercase_ , lowercase_ , kernel_size=1 ) , nn.ReLU() , nn.Convad(lowercase_ , lowercase_ , kernel_size=1 ) , nn.Sigmoid() , )
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : Any ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =self.pooler(lowercase_ )
SCREAMING_SNAKE_CASE_: Optional[int] =self.attention(lowercase_ )
SCREAMING_SNAKE_CASE_: Any =hidden_state * attention
return hidden_state
class a ( nn.Module ):
def __init__( self : Optional[int] , lowerCAmelCase : RegNetConfig , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int = 1 ) -> Dict:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_: List[Any] =in_channels != out_channels or stride != 1
SCREAMING_SNAKE_CASE_: Optional[int] =max(1 , out_channels // config.groups_width )
SCREAMING_SNAKE_CASE_: Dict =(
RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity()
)
SCREAMING_SNAKE_CASE_: List[Any] =nn.Sequential(
RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , )
SCREAMING_SNAKE_CASE_: int =ACTaFN[config.hidden_act]
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Any ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =hidden_state
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.layer(lowercase_ )
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.shortcut(lowercase_ )
hidden_state += residual
SCREAMING_SNAKE_CASE_: str =self.activation(lowercase_ )
return hidden_state
class a ( nn.Module ):
def __init__( self : Optional[Any] , lowerCAmelCase : RegNetConfig , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int = 1 ) -> Dict:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_: str =in_channels != out_channels or stride != 1
SCREAMING_SNAKE_CASE_: int =max(1 , out_channels // config.groups_width )
SCREAMING_SNAKE_CASE_: int =(
RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity()
)
SCREAMING_SNAKE_CASE_: Union[str, Any] =nn.Sequential(
RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act ) , RegNetSELayer(lowercase_ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , )
SCREAMING_SNAKE_CASE_: Optional[int] =ACTaFN[config.hidden_act]
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : Union[str, Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_state
SCREAMING_SNAKE_CASE_: str =self.layer(lowercase_ )
SCREAMING_SNAKE_CASE_: int =self.shortcut(lowercase_ )
hidden_state += residual
SCREAMING_SNAKE_CASE_: Optional[Any] =self.activation(lowercase_ )
return hidden_state
class a ( nn.Module ):
def __init__( self : str , lowerCAmelCase : RegNetConfig , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int = 2 , lowerCAmelCase : int = 2 , ) -> List[str]:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_: str =RegNetXLayer if config.layer_type == """x""" else RegNetYLayer
SCREAMING_SNAKE_CASE_: str =nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowercase_ , lowercase_ , lowercase_ , stride=lowercase_ , ) , *[layer(lowercase_ , lowercase_ , lowercase_ ) for _ in range(depth - 1 )] , )
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =self.layers(lowercase_ )
return hidden_state
class a ( nn.Module ):
def __init__( self : Dict , lowerCAmelCase : RegNetConfig ) -> int:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_: Optional[Any] =nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowercase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
SCREAMING_SNAKE_CASE_: Optional[Any] =zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowercase_ , config.depths[1:] ):
self.stages.append(RegNetStage(lowercase_ , lowercase_ , lowercase_ , depth=lowercase_ ) )
def lowerCamelCase__ ( self : str , lowerCAmelCase : Tensor , lowerCAmelCase : bool = False , lowerCAmelCase : bool = True ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =() if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
SCREAMING_SNAKE_CASE_: Union[str, Any] =hidden_states + (hidden_state,)
SCREAMING_SNAKE_CASE_: Dict =stage_module(lowercase_ )
if output_hidden_states:
SCREAMING_SNAKE_CASE_: Optional[Any] =hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowercase_ , hidden_states=lowercase_ )
class a ( _UpperCAmelCase ):
UpperCamelCase : List[str] = RegNetConfig
UpperCamelCase : Any = 'regnet'
UpperCamelCase : List[str] = 'pixel_values'
UpperCamelCase : str = True
def lowerCamelCase__ ( self : int , lowerCAmelCase : Optional[Any] ) -> List[str]:
'''simple docstring'''
if isinstance(lowercase_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" )
elif isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any=False ) -> Union[str, Any]:
'''simple docstring'''
if isinstance(lowercase_ , lowercase_ ):
SCREAMING_SNAKE_CASE_: List[str] =value
_UpperCAmelCase = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
_UpperCAmelCase = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , _UpperCAmelCase , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class a ( _UpperCAmelCase ):
def __init__( self : Any , lowerCAmelCase : Any ) -> int:
'''simple docstring'''
super().__init__(lowercase_ )
SCREAMING_SNAKE_CASE_: List[str] =config
SCREAMING_SNAKE_CASE_: Union[str, Any] =RegNetEmbeddings(lowercase_ )
SCREAMING_SNAKE_CASE_: Union[str, Any] =RegNetEncoder(lowercase_ )
SCREAMING_SNAKE_CASE_: str =nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCamelCase__ ( self : str , lowerCAmelCase : Tensor , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Optional[bool] = None ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE_: Tuple =return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_: str =self.embedder(lowercase_ )
SCREAMING_SNAKE_CASE_: Optional[Any] =self.encoder(
lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ )
SCREAMING_SNAKE_CASE_: List[Any] =encoder_outputs[0]
SCREAMING_SNAKE_CASE_: str =self.pooler(lowercase_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase_ , pooler_output=lowercase_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , _UpperCAmelCase , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class a ( _UpperCAmelCase ):
def __init__( self : Dict , lowerCAmelCase : str ) -> Dict:
'''simple docstring'''
super().__init__(lowercase_ )
SCREAMING_SNAKE_CASE_: Any =config.num_labels
SCREAMING_SNAKE_CASE_: List[str] =RegNetModel(lowercase_ )
# classification head
SCREAMING_SNAKE_CASE_: Any =nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : Optional[torch.FloatTensor] = None , lowerCAmelCase : Optional[torch.LongTensor] = None , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Optional[bool] = None , ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_: Optional[int] =self.regnet(lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ )
SCREAMING_SNAKE_CASE_: Optional[int] =outputs.pooler_output if return_dict else outputs[1]
SCREAMING_SNAKE_CASE_: List[Any] =self.classifier(lowercase_ )
SCREAMING_SNAKE_CASE_: Optional[int] =None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
SCREAMING_SNAKE_CASE_: Optional[int] ="""regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
SCREAMING_SNAKE_CASE_: str ="""single_label_classification"""
else:
SCREAMING_SNAKE_CASE_: str ="""multi_label_classification"""
if self.config.problem_type == "regression":
SCREAMING_SNAKE_CASE_: str =MSELoss()
if self.num_labels == 1:
SCREAMING_SNAKE_CASE_: List[Any] =loss_fct(logits.squeeze() , labels.squeeze() )
else:
SCREAMING_SNAKE_CASE_: List[str] =loss_fct(lowercase_ , lowercase_ )
elif self.config.problem_type == "single_label_classification":
SCREAMING_SNAKE_CASE_: Optional[int] =CrossEntropyLoss()
SCREAMING_SNAKE_CASE_: Union[str, Any] =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
SCREAMING_SNAKE_CASE_: Dict =BCEWithLogitsLoss()
SCREAMING_SNAKE_CASE_: Tuple =loss_fct(lowercase_ , lowercase_ )
if not return_dict:
SCREAMING_SNAKE_CASE_: Tuple =(logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase_ , logits=lowercase_ , hidden_states=outputs.hidden_states )
| 708
|
"""simple docstring"""
def __magic_name__ ( lowercase = 200_0000 ):
SCREAMING_SNAKE_CASE_: List[Any] =[0 for i in range(n + 1 )]
SCREAMING_SNAKE_CASE_: Union[str, Any] =1
SCREAMING_SNAKE_CASE_: Optional[Any] =1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =1
SCREAMING_SNAKE_CASE_: Dict =0
for i in range(lowercase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f"""{solution() = }""")
| 36
| 0
|
"""simple docstring"""
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""roberta""", choices=["""roberta""", """gpt2"""])
parser.add_argument("""--model_name""", default="""roberta-large""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_roberta_048131723.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
_UpperCAmelCase = parser.parse_args()
if args.model_type == "roberta":
_UpperCAmelCase = RobertaForMaskedLM.from_pretrained(args.model_name)
_UpperCAmelCase = '''roberta'''
elif args.model_type == "gpt2":
_UpperCAmelCase = GPTaLMHeadModel.from_pretrained(args.model_name)
_UpperCAmelCase = '''transformer'''
_UpperCAmelCase = model.state_dict()
_UpperCAmelCase = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
_UpperCAmelCase = state_dict[f"""{prefix}.{param_name}"""]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
_UpperCAmelCase = f"""{prefix}.embeddings.{w}.weight"""
_UpperCAmelCase = state_dict[param_name]
for w in ["weight", "bias"]:
_UpperCAmelCase = f"""{prefix}.embeddings.LayerNorm.{w}"""
_UpperCAmelCase = state_dict[param_name]
# Transformer Blocks #
_UpperCAmelCase = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[
f"""{prefix}.h.{teacher_idx}.{layer}.{w}"""
]
_UpperCAmelCase = state_dict[f"""{prefix}.h.{teacher_idx}.attn.bias"""]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"""
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
_UpperCAmelCase = state_dict[f"""{layer}"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[f"""lm_head.dense.{w}"""]
_UpperCAmelCase = state_dict[f"""lm_head.layer_norm.{w}"""]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[f"""{prefix}.ln_f.{w}"""]
_UpperCAmelCase = state_dict['''lm_head.weight''']
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 709
|
"""simple docstring"""
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""bert""", choices=["""bert"""])
parser.add_argument("""--model_name""", default="""bert-base-uncased""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_bert-base-uncased_0247911.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
_UpperCAmelCase = parser.parse_args()
if args.model_type == "bert":
_UpperCAmelCase = BertForMaskedLM.from_pretrained(args.model_name)
_UpperCAmelCase = """bert"""
else:
raise ValueError("""args.model_type should be \"bert\".""")
_UpperCAmelCase = model.state_dict()
_UpperCAmelCase = {}
for w in ["word_embeddings", "position_embeddings"]:
_UpperCAmelCase = state_dict[f"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""]
_UpperCAmelCase = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
_UpperCAmelCase = state_dict["""cls.predictions.decoder.weight"""]
_UpperCAmelCase = state_dict["""cls.predictions.bias"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[f"""cls.predictions.transform.dense.{w}"""]
_UpperCAmelCase = state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 36
| 0
|
"""simple docstring"""
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
_UpperCAmelCase = logging.getLogger(__name__)
class a ( UpperCAmelCase__ ):
def __init__( self : Any , lowerCAmelCase : str , lowerCAmelCase : List[Any] , lowerCAmelCase : str , lowerCAmelCase : Tuple=None ) -> Optional[int]:
'''simple docstring'''
super().__init__(
__lowerCAmelCase , question_encoder_tokenizer=__lowerCAmelCase , generator_tokenizer=__lowerCAmelCase , index=__lowerCAmelCase , init_retrieval=__lowerCAmelCase , )
SCREAMING_SNAKE_CASE_: List[str] =None
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : int ) -> Union[str, Any]:
'''simple docstring'''
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
SCREAMING_SNAKE_CASE_: List[str] =self._infer_socket_ifname()
# avoid clash with the NCCL port
SCREAMING_SNAKE_CASE_: Optional[int] =str(distributed_port + 1 )
SCREAMING_SNAKE_CASE_: Any =dist.new_group(ranks=__lowerCAmelCase , backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def lowerCamelCase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return dist.get_rank(group=self.process_group ) == 0
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : Tuple=torch.floataa ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =torch.empty(__lowerCAmelCase , dtype=__lowerCAmelCase )
dist.scatter(__lowerCAmelCase , src=0 , scatter_list=__lowerCAmelCase , group=self.process_group )
return target_tensor
def lowerCamelCase__ ( self : Dict ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
SCREAMING_SNAKE_CASE_: str =next((addr for addr in addrs if addr.startswith("""e""" )) , __lowerCAmelCase )
return ifname
def lowerCamelCase__ ( self : str , lowerCAmelCase : np.ndarray , lowerCAmelCase : int ) -> Tuple[np.ndarray, List[dict]]:
'''simple docstring'''
if not dist.is_initialized():
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] =self._main_retrieve(__lowerCAmelCase , __lowerCAmelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__lowerCAmelCase )
# distributed training
SCREAMING_SNAKE_CASE_: Any =dist.get_world_size(group=self.process_group )
# gather logic
SCREAMING_SNAKE_CASE_: List[str] =None
if self._is_main():
SCREAMING_SNAKE_CASE_: Dict =[torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__lowerCAmelCase )]
dist.gather(torch.tensor(__lowerCAmelCase ) , dst=0 , gather_list=__lowerCAmelCase , group=self.process_group )
# scatter logic
SCREAMING_SNAKE_CASE_: int =question_hidden_states.shape[0]
SCREAMING_SNAKE_CASE_: Dict =[]
SCREAMING_SNAKE_CASE_: Tuple =[]
if self._is_main():
assert len(__lowerCAmelCase ) == world_size
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] =self._main_retrieve(torch.cat(__lowerCAmelCase ).numpy() , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =torch.tensor(__lowerCAmelCase ), torch.tensor(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =self._chunk_tensor(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =self._chunk_tensor(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =self._scattered(__lowerCAmelCase , [n_queries, n_docs] , target_type=torch.intaa )
SCREAMING_SNAKE_CASE_: Dict =self._scattered(__lowerCAmelCase , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__lowerCAmelCase )
| 710
|
"""simple docstring"""
def __magic_name__ ( lowercase , lowercase ):
return int((input_a, input_a).count(0 ) == 0 )
def __magic_name__ ( ):
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 36
| 0
|
"""simple docstring"""
from math import factorial
def __magic_name__ ( lowercase , lowercase ):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError("""Please enter positive integers for n and k where n >= k""" )
return factorial(_snake_case ) // (factorial(_snake_case ) * factorial(n - k ))
if __name__ == "__main__":
print(
"""The number of five-card hands possible from a standard""",
f"""fifty-two card deck is: {combinations(5_2, 5)}\n""",
)
print(
"""If a class of 40 students must be arranged into groups of""",
f"""4 for group projects, there are {combinations(4_0, 4)} ways""",
"""to arrange them.\n""",
)
print(
"""If 10 teams are competing in a Formula One race, there""",
f"""are {combinations(1_0, 3)} ways that first, second and""",
"""third place can be awarded.""",
)
| 711
|
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger("""transformers.models.speecht5""")
def __magic_name__ ( lowercase , lowercase , lowercase ):
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE_: Tuple =checkpoint["""input_conv.weight_g"""]
SCREAMING_SNAKE_CASE_: Optional[int] =checkpoint["""input_conv.weight_v"""]
SCREAMING_SNAKE_CASE_: Optional[int] =checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''upsamples.{i}.1.weight_g''']
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''upsamples.{i}.1.weight_v''']
SCREAMING_SNAKE_CASE_: List[str] =checkpoint[f'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE_: Dict =checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_g''']
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_v''']
SCREAMING_SNAKE_CASE_: Union[str, Any] =checkpoint[f'''blocks.{i}.convs1.{j}.1.bias''']
SCREAMING_SNAKE_CASE_: Dict =checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_g''']
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_v''']
SCREAMING_SNAKE_CASE_: List[Any] =checkpoint[f'''blocks.{i}.convs2.{j}.1.bias''']
SCREAMING_SNAKE_CASE_: Tuple =checkpoint["""output_conv.1.weight_g"""]
SCREAMING_SNAKE_CASE_: List[str] =checkpoint["""output_conv.1.weight_v"""]
SCREAMING_SNAKE_CASE_: Optional[int] =checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase=None , lowercase=None , ):
if config_path is not None:
SCREAMING_SNAKE_CASE_: List[Any] =SpeechTaHifiGanConfig.from_pretrained(lowercase )
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE_: Union[str, Any] =SpeechTaHifiGan(lowercase )
SCREAMING_SNAKE_CASE_: Any =torch.load(lowercase )
load_weights(orig_checkpoint["""model"""]["""generator"""] , lowercase , lowercase )
SCREAMING_SNAKE_CASE_: List[Any] =np.load(lowercase )
SCREAMING_SNAKE_CASE_: Any =stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE_: str =stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE_: Dict =torch.from_numpy(lowercase ).float()
SCREAMING_SNAKE_CASE_: Dict =torch.from_numpy(lowercase ).float()
model.save_pretrained(lowercase )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
_UpperCAmelCase = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 36
| 0
|
"""simple docstring"""
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_UpperCAmelCase = 1_6
_UpperCAmelCase = 3_2
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase , lowercase = 16 ):
SCREAMING_SNAKE_CASE_: List[str] =AutoTokenizer.from_pretrained("""bert-base-cased""" )
SCREAMING_SNAKE_CASE_: Any =DatasetDict(
{
"""train""": dataset["""train"""].select(_lowercase ),
"""validation""": dataset["""train"""].select(_lowercase ),
"""test""": dataset["""validation"""],
} )
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_: int =tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_lowercase , max_length=_lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE_: Union[str, Any] =datasets.map(
_lowercase , batched=_lowercase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE_: Tuple =tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE_: Dict =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE_: Optional[Any] =16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE_: str =8
else:
SCREAMING_SNAKE_CASE_: Any =None
return tokenizer.pad(
_lowercase , padding="""longest""" , max_length=_lowercase , pad_to_multiple_of=_lowercase , return_tensors="""pt""" , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_: Optional[Any] =DataLoader(
tokenized_datasets["""train"""] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
SCREAMING_SNAKE_CASE_: int =DataLoader(
tokenized_datasets["""validation"""] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
SCREAMING_SNAKE_CASE_: str =DataLoader(
tokenized_datasets["""test"""] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
return train_dataloader, eval_dataloader, test_dataloader
def __magic_name__ ( lowercase , lowercase ):
# New Code #
SCREAMING_SNAKE_CASE_: Union[str, Any] =[]
# Download the dataset
SCREAMING_SNAKE_CASE_: Dict =load_dataset("""glue""" , """mrpc""" )
# Create our splits
SCREAMING_SNAKE_CASE_: Union[str, Any] =StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
SCREAMING_SNAKE_CASE_: Union[str, Any] =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE_: Optional[Any] =config['''lr''']
SCREAMING_SNAKE_CASE_: Optional[int] =int(config["""num_epochs"""] )
SCREAMING_SNAKE_CASE_: Dict =int(config["""seed"""] )
SCREAMING_SNAKE_CASE_: int =int(config["""batch_size"""] )
SCREAMING_SNAKE_CASE_: str =evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
SCREAMING_SNAKE_CASE_: Optional[int] =1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
SCREAMING_SNAKE_CASE_: Optional[Any] =batch_size // MAX_GPU_BATCH_SIZE
SCREAMING_SNAKE_CASE_: int =MAX_GPU_BATCH_SIZE
set_seed(_lowercase )
# New Code #
# Create our folds:
SCREAMING_SNAKE_CASE_: Dict =kfold.split(np.zeros(datasets["""train"""].num_rows ) , datasets["""train"""]["""label"""] )
SCREAMING_SNAKE_CASE_: List[Any] =[]
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(_lowercase ):
SCREAMING_SNAKE_CASE_: int =get_fold_dataloaders(
_lowercase , _lowercase , _lowercase , _lowercase , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE_: Tuple =AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=_lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE_: List[Any] =model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE_: Any =AdamW(params=model.parameters() , lr=_lowercase )
# Instantiate scheduler
SCREAMING_SNAKE_CASE_: int =get_linear_schedule_with_warmup(
optimizer=_lowercase , num_warmup_steps=100 , num_training_steps=(len(_lowercase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE_: Tuple =accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# Now we train the model
for epoch in range(_lowercase ):
model.train()
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE_: Union[str, Any] =model(**_lowercase )
SCREAMING_SNAKE_CASE_: List[Any] =outputs.loss
SCREAMING_SNAKE_CASE_: str =loss / gradient_accumulation_steps
accelerator.backward(_lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Union[str, Any] =model(**_lowercase )
SCREAMING_SNAKE_CASE_: Tuple =outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE_: List[Any] =accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=_lowercase , references=_lowercase , )
SCREAMING_SNAKE_CASE_: Dict =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , _lowercase )
# New Code #
# We also run predictions on the test set at the very end
SCREAMING_SNAKE_CASE_: Dict =[]
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Dict =model(**_lowercase )
SCREAMING_SNAKE_CASE_: List[str] =outputs.logits
SCREAMING_SNAKE_CASE_: Dict =accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(_lowercase , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
SCREAMING_SNAKE_CASE_: Union[str, Any] =torch.cat(_lowercase , dim=0 )
SCREAMING_SNAKE_CASE_: int =torch.stack(_lowercase , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
SCREAMING_SNAKE_CASE_: Optional[Any] =metric.compute(predictions=_lowercase , references=_lowercase )
accelerator.print("""Average test metrics from all folds:""" , _lowercase )
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Dict =argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=_lowercase , default=_lowercase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
# New Code #
parser.add_argument("""--num_folds""" , type=_lowercase , default=3 , help="""The number of splits to perform across the dataset""" )
SCREAMING_SNAKE_CASE_: Tuple =parser.parse_args()
SCREAMING_SNAKE_CASE_: Optional[int] ={'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(_lowercase , _lowercase )
if __name__ == "__main__":
main()
| 712
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def __magic_name__ ( lowercase ):
if "cls_token" in name:
SCREAMING_SNAKE_CASE_: Optional[int] =name.replace("""cls_token""" , """vit.embeddings.cls_token""" )
if "mask_token" in name:
SCREAMING_SNAKE_CASE_: Optional[int] =name.replace("""mask_token""" , """decoder.mask_token""" )
if "decoder_pos_embed" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""pos_embed""" , """vit.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""patch_embed.proj""" , """vit.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""patch_embed.norm""" , """vit.embeddings.norm""" )
if "decoder_blocks" in name:
SCREAMING_SNAKE_CASE_: List[Any] =name.replace("""decoder_blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""blocks""" , """vit.encoder.layer""" )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE_: Optional[Any] =name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
SCREAMING_SNAKE_CASE_: Union[str, Any] =name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
SCREAMING_SNAKE_CASE_: Optional[Any] =name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE_: Dict =name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
SCREAMING_SNAKE_CASE_: Tuple =name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
SCREAMING_SNAKE_CASE_: Any =name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""norm.weight""" , """vit.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""norm.bias""" , """vit.layernorm.bias""" )
return name
def __magic_name__ ( lowercase , lowercase ):
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE_: Optional[int] =orig_state_dict.pop(lowercase )
if "qkv" in key:
SCREAMING_SNAKE_CASE_: Dict =key.split(""".""" )
SCREAMING_SNAKE_CASE_: Optional[Any] =int(key_split[1] )
if "decoder_blocks" in key:
SCREAMING_SNAKE_CASE_: int =config.decoder_hidden_size
SCREAMING_SNAKE_CASE_: Optional[int] ="""decoder.decoder_layers."""
if "weight" in key:
SCREAMING_SNAKE_CASE_: Dict =val[:dim, :]
SCREAMING_SNAKE_CASE_: Tuple =val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_: str =val[-dim:, :]
elif "bias" in key:
SCREAMING_SNAKE_CASE_: List[Any] =val[:dim]
SCREAMING_SNAKE_CASE_: Tuple =val[dim : dim * 2]
SCREAMING_SNAKE_CASE_: List[Any] =val[-dim:]
else:
SCREAMING_SNAKE_CASE_: Any =config.hidden_size
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""vit.encoder.layer."""
if "weight" in key:
SCREAMING_SNAKE_CASE_: Optional[Any] =val[:dim, :]
SCREAMING_SNAKE_CASE_: Optional[Any] =val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_: Dict =val[-dim:, :]
elif "bias" in key:
SCREAMING_SNAKE_CASE_: Optional[Any] =val[:dim]
SCREAMING_SNAKE_CASE_: Any =val[dim : dim * 2]
SCREAMING_SNAKE_CASE_: Optional[Any] =val[-dim:]
else:
SCREAMING_SNAKE_CASE_: Tuple =val
return orig_state_dict
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Dict =ViTMAEConfig()
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE_: List[Any] =1024
SCREAMING_SNAKE_CASE_: Dict =4096
SCREAMING_SNAKE_CASE_: Tuple =24
SCREAMING_SNAKE_CASE_: int =16
elif "huge" in checkpoint_url:
SCREAMING_SNAKE_CASE_: Union[str, Any] =14
SCREAMING_SNAKE_CASE_: Any =1280
SCREAMING_SNAKE_CASE_: Dict =5120
SCREAMING_SNAKE_CASE_: Optional[int] =32
SCREAMING_SNAKE_CASE_: Optional[Any] =16
SCREAMING_SNAKE_CASE_: Tuple =ViTMAEForPreTraining(lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =torch.hub.load_state_dict_from_url(lowercase , map_location="""cpu""" )["""model"""]
SCREAMING_SNAKE_CASE_: Optional[Any] =ViTMAEImageProcessor(size=config.image_size )
SCREAMING_SNAKE_CASE_: str =convert_state_dict(lowercase , lowercase )
model.load_state_dict(lowercase )
model.eval()
SCREAMING_SNAKE_CASE_: Tuple ="""https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"""
SCREAMING_SNAKE_CASE_: List[Any] =Image.open(requests.get(lowercase , stream=lowercase ).raw )
SCREAMING_SNAKE_CASE_: int =ViTMAEImageProcessor(size=config.image_size )
SCREAMING_SNAKE_CASE_: int =image_processor(images=lowercase , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE_: Optional[Any] =model(**lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =outputs.logits
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE_: Dict =torch.tensor(
[[-0.7_309, -0.7_128, -1.0_169], [-1.0_161, -0.9_058, -1.1_878], [-1.0_478, -0.9_411, -1.1_911]] )
elif "huge" in checkpoint_url:
SCREAMING_SNAKE_CASE_: Tuple =torch.tensor(
[[-1.1_599, -0.9_199, -1.2_221], [-1.1_952, -0.9_269, -1.2_307], [-1.2_143, -0.9_337, -1.2_262]] )
else:
SCREAMING_SNAKE_CASE_: Any =torch.tensor(
[[-0.9_192, -0.8_481, -1.1_259], [-1.1_349, -1.0_034, -1.2_599], [-1.1_757, -1.0_429, -1.2_726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , lowercase , atol=1e-4 )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_UpperCAmelCase = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 36
| 0
|
"""simple docstring"""
from argparse import ArgumentParser
from .env import EnvironmentCommand
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Any =ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
SCREAMING_SNAKE_CASE_: Optional[Any] =parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(__UpperCamelCase )
# Let's go
SCREAMING_SNAKE_CASE_: List[str] =parser.parse_args()
if not hasattr(__UpperCamelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
SCREAMING_SNAKE_CASE_: Union[str, Any] =args.func(__UpperCamelCase )
service.run()
if __name__ == "__main__":
main()
| 713
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_UpperCAmelCase = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 36
| 0
|
"""simple docstring"""
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
_UpperCAmelCase = logging.getLogger()
_UpperCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class a ( lowercase__ ):
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : Tuple ) -> Dict:
'''simple docstring'''
os.makedirs(__lowercase , exist_ok=__lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] ={"""source""": """What is love ?""", """target""": """life"""}
SCREAMING_SNAKE_CASE_: List[str] ={"""train""": 12, """val""": 2, """test""": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
SCREAMING_SNAKE_CASE_: int ="""\n""".join([contents[field]] * n_lines[split] )
with open(os.path.join(__lowercase , f'''{split}.{field}''' ) , """w""" ) as f:
f.write(__lowercase )
def lowerCamelCase__ ( self : int , lowerCAmelCase : int , lowerCAmelCase : str = "pytorch" ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE_: Optional[int] =os.path.join(__lowercase , """output""" )
SCREAMING_SNAKE_CASE_: List[Any] =os.path.join(__lowercase , """data""" )
self._create_dummy_data(data_dir=__lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =f'''
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
'''.split()
if gpus > 0:
testargs.append(f'''--gpus={gpus}''' )
if is_apex_available():
testargs.append("""--fp16""" )
else:
testargs.append("""--gpus=0""" )
testargs.append("""--distributed_backend=ddp_cpu""" )
testargs.append("""--num_processes=2""" )
SCREAMING_SNAKE_CASE_: Dict =[sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(__lowercase , env=self.get_env() )
SCREAMING_SNAKE_CASE_: Optional[int] =os.path.join(__lowercase , """metrics.json""" )
with open(__lowercase ) as f:
SCREAMING_SNAKE_CASE_: Union[str, Any] =json.load(__lowercase )
return result
@require_torch_gpu
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_gpu
@require_ray
def lowerCamelCase__ ( self : Tuple ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
@require_ray
def lowerCamelCase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
| 714
|
"""simple docstring"""
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: int =False
while is_sorted is False: # Until all the indices are traversed keep looping
SCREAMING_SNAKE_CASE_: Tuple =True
for i in range(0 , len(lowercase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =input_list[i + 1], input_list[i]
# swapping if elements not in order
SCREAMING_SNAKE_CASE_: Tuple =False
for i in range(1 , len(lowercase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str =input_list[i + 1], input_list[i]
# swapping if elements not in order
SCREAMING_SNAKE_CASE_: str =False
return input_list
if __name__ == "__main__":
print("""Enter list to be sorted""")
_UpperCAmelCase = [int(x) for x in input().split()]
# inputing elements of the list in one line
_UpperCAmelCase = odd_even_sort(input_list)
print("""The sorted list is""")
print(sorted_list)
| 36
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""SCUT-DLVCLab/lilt-roberta-en-base""": (
"""https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"""
),
}
class a ( __snake_case ):
UpperCamelCase : str = 'lilt'
def __init__( self : List[Any] , lowerCAmelCase : Optional[int]=3_0522 , lowerCAmelCase : Dict=768 , lowerCAmelCase : List[str]=12 , lowerCAmelCase : Union[str, Any]=12 , lowerCAmelCase : int=3072 , lowerCAmelCase : Optional[Any]="gelu" , lowerCAmelCase : Optional[Any]=0.1 , lowerCAmelCase : Optional[Any]=0.1 , lowerCAmelCase : Any=512 , lowerCAmelCase : Tuple=2 , lowerCAmelCase : Optional[int]=0.0_2 , lowerCAmelCase : int=1E-12 , lowerCAmelCase : Tuple=0 , lowerCAmelCase : Union[str, Any]="absolute" , lowerCAmelCase : Any=None , lowerCAmelCase : Optional[Any]=4 , lowerCAmelCase : List[str]=1024 , **lowerCAmelCase : Any , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=A_ , **A_ )
SCREAMING_SNAKE_CASE_: List[str] =vocab_size
SCREAMING_SNAKE_CASE_: Union[str, Any] =hidden_size
SCREAMING_SNAKE_CASE_: Dict =num_hidden_layers
SCREAMING_SNAKE_CASE_: Dict =num_attention_heads
SCREAMING_SNAKE_CASE_: Any =hidden_act
SCREAMING_SNAKE_CASE_: Dict =intermediate_size
SCREAMING_SNAKE_CASE_: List[Any] =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: int =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: Tuple =max_position_embeddings
SCREAMING_SNAKE_CASE_: Any =type_vocab_size
SCREAMING_SNAKE_CASE_: Tuple =initializer_range
SCREAMING_SNAKE_CASE_: List[str] =layer_norm_eps
SCREAMING_SNAKE_CASE_: int =position_embedding_type
SCREAMING_SNAKE_CASE_: List[str] =classifier_dropout
SCREAMING_SNAKE_CASE_: int =channel_shrink_ratio
SCREAMING_SNAKE_CASE_: str =max_ad_position_embeddings
| 715
|
"""simple docstring"""
def __magic_name__ ( lowercase ):
return str(lowercase ) == str(lowercase )[::-1]
def __magic_name__ ( lowercase ):
return int(lowercase ) + int(str(lowercase )[::-1] )
def __magic_name__ ( lowercase = 1_0000 ):
SCREAMING_SNAKE_CASE_: List[str] =[]
for num in range(1 , lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =0
SCREAMING_SNAKE_CASE_: int =num
while iterations < 50:
SCREAMING_SNAKE_CASE_: Optional[Any] =sum_reverse(lowercase )
iterations += 1
if is_palindrome(lowercase ):
break
else:
lychrel_nums.append(lowercase )
return len(lowercase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 36
| 0
|
"""simple docstring"""
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Tuple =checkpoints.load_tax_checkpoint(lowercase )
SCREAMING_SNAKE_CASE_: List[Any] =flatten_dict(lowercase )
return flax_params
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: int ={}
SCREAMING_SNAKE_CASE_: Optional[Any] ={
"""token_embedder""": """embeddings""",
"""encoder_norm""": """layernorm""",
"""kernel""": """weight""",
""".out""": """.output""",
"""scale""": """weight""",
"""embedders_0.pos_embedding""": """row_embedder.weight""",
"""embedders_1.pos_embedding""": """column_embedder.weight""",
}
SCREAMING_SNAKE_CASE_: Dict ={
"""query""": """attention.query""",
"""key""": """attention.key""",
"""value""": """attention.value""",
"""output.dense""": """output""",
"""encoder_decoder_attention.o""": """encoder_decoder_attention.attention.o""",
"""pre_self_attention_layer_norm""": """self_attention.layer_norm""",
"""pre_cross_attention_layer_norm""": """encoder_decoder_attention.layer_norm""",
"""mlp.""": """mlp.DenseReluDense.""",
"""pre_mlp_layer_norm""": """mlp.layer_norm""",
"""self_attention.o""": """self_attention.attention.o""",
"""decoder.embeddings.embedding""": """decoder.embed_tokens.weight""",
"""decoder.relpos_bias.rel_embedding""": """decoder.layer.0.self_attention.attention.relative_attention_bias.weight""",
"""decoder.decoder_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.logits_dense.weight""": """decoder.lm_head.weight""",
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
SCREAMING_SNAKE_CASE_: int =""".""".join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
SCREAMING_SNAKE_CASE_: List[Any] =new_key.replace(lowercase , lowercase )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
SCREAMING_SNAKE_CASE_: List[Any] =new_key.replace(lowercase , lowercase )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
SCREAMING_SNAKE_CASE_: Dict =re.sub(R"""layers_(\d+)""" , R"""layer.\1""" , lowercase )
SCREAMING_SNAKE_CASE_: Dict =new_key.replace("""encoder""" , """encoder.encoder""" )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
SCREAMING_SNAKE_CASE_: Optional[int] =re.sub(R"""layers_(\d+)""" , R"""layer.\1""" , lowercase )
SCREAMING_SNAKE_CASE_: Tuple =flax_dict[key]
SCREAMING_SNAKE_CASE_: str ={}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
SCREAMING_SNAKE_CASE_: int =torch.from_numpy(converted_dict[key].T )
else:
SCREAMING_SNAKE_CASE_: List[Any] =torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def __magic_name__ ( lowercase , lowercase , lowercase=False , lowercase=False ):
SCREAMING_SNAKE_CASE_: Optional[int] =get_flax_param(lowercase )
if not use_large:
SCREAMING_SNAKE_CASE_: str =PixaStructVisionConfig()
SCREAMING_SNAKE_CASE_: Optional[int] =PixaStructTextConfig()
else:
SCREAMING_SNAKE_CASE_: List[Any] =PixaStructVisionConfig(
hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 )
SCREAMING_SNAKE_CASE_: List[Any] =PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=lowercase )
SCREAMING_SNAKE_CASE_: Tuple =PixaStructForConditionalGeneration(lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =rename_and_convert_flax_params(lowercase )
model.load_state_dict(lowercase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =AutoTokenizer.from_pretrained("""ybelkada/test-pix2struct-tokenizer""" )
SCREAMING_SNAKE_CASE_: Dict =PixaStructImageProcessor()
SCREAMING_SNAKE_CASE_: str =PixaStructProcessor(image_processor=lowercase , tokenizer=lowercase )
if use_large:
SCREAMING_SNAKE_CASE_: Tuple =4096
SCREAMING_SNAKE_CASE_: Dict =True
# mkdir if needed
os.makedirs(lowercase , exist_ok=lowercase )
model.save_pretrained(lowercase )
processor.save_pretrained(lowercase )
print("""Model saved in {}""".format(lowercase ) )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--t5x_checkpoint_path""", default=None, type=str, help="""Path to the original T5x checkpoint.""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--use_large""", action="""store_true""", help="""Use large model.""")
parser.add_argument("""--is_vqa""", action="""store_true""", help="""Use large model.""")
_UpperCAmelCase = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 716
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
_UpperCAmelCase = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""DPTFeatureExtractor"""]
_UpperCAmelCase = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 36
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase = {
"""configuration_instructblip""": [
"""INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InstructBlipConfig""",
"""InstructBlipQFormerConfig""",
"""InstructBlipVisionConfig""",
],
"""processing_instructblip""": ["""InstructBlipProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InstructBlipQFormerModel""",
"""InstructBlipPreTrainedModel""",
"""InstructBlipForConditionalGeneration""",
"""InstructBlipVisionModel""",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 717
|
"""simple docstring"""
from __future__ import annotations
import math
import random
from typing import Any
class a :
def __init__( self : str ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: list[Any] =[]
SCREAMING_SNAKE_CASE_: int =0
SCREAMING_SNAKE_CASE_: int =0
def lowerCamelCase__ ( self : Optional[Any] ) -> bool:
'''simple docstring'''
return self.head == self.tail
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
self.data.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =self.tail + 1
def lowerCamelCase__ ( self : int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self.data[self.head]
SCREAMING_SNAKE_CASE_: Optional[int] =self.head + 1
return ret
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
return self.tail - self.head
def lowerCamelCase__ ( self : str ) -> None:
'''simple docstring'''
print(self.data )
print("""**************""" )
print(self.data[self.head : self.tail] )
class a :
def __init__( self : Union[str, Any] , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =data
SCREAMING_SNAKE_CASE_: MyNode | None =None
SCREAMING_SNAKE_CASE_: MyNode | None =None
SCREAMING_SNAKE_CASE_: int =1
def lowerCamelCase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
return self.data
def lowerCamelCase__ ( self : List[Any] ) -> MyNode | None:
'''simple docstring'''
return self.left
def lowerCamelCase__ ( self : Dict ) -> MyNode | None:
'''simple docstring'''
return self.right
def lowerCamelCase__ ( self : Any ) -> int:
'''simple docstring'''
return self.height
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =data
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : MyNode | None ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =node
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : MyNode | None ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =node
def lowerCamelCase__ ( self : int , lowerCAmelCase : int ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =height
def __magic_name__ ( lowercase ):
if node is None:
return 0
return node.get_height()
def __magic_name__ ( lowercase , lowercase ):
if a > b:
return a
return b
def __magic_name__ ( lowercase ):
print("""left rotation node:""" , node.get_data() )
SCREAMING_SNAKE_CASE_: List[Any] =node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(lowercase )
SCREAMING_SNAKE_CASE_: Tuple =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowercase )
SCREAMING_SNAKE_CASE_: int =my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowercase )
return ret
def __magic_name__ ( lowercase ):
print("""right rotation node:""" , node.get_data() )
SCREAMING_SNAKE_CASE_: List[Any] =node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(lowercase )
SCREAMING_SNAKE_CASE_: List[Any] =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowercase )
return ret
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any =node.get_left()
assert left_child is not None
node.set_left(left_rotation(lowercase ) )
return right_rotation(lowercase )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Dict =node.get_right()
assert right_child is not None
node.set_right(right_rotation(lowercase ) )
return left_rotation(lowercase )
def __magic_name__ ( lowercase , lowercase ):
if node is None:
return MyNode(lowercase )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , lowercase ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
SCREAMING_SNAKE_CASE_: Union[str, Any] =node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
SCREAMING_SNAKE_CASE_: Any =right_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: List[Any] =lr_rotation(lowercase )
else:
node.set_right(insert_node(node.get_right() , lowercase ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
SCREAMING_SNAKE_CASE_: Tuple =node.get_right()
assert right_child is not None
if data < right_child.get_data():
SCREAMING_SNAKE_CASE_: Optional[Any] =rl_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: Optional[int] =left_rotation(lowercase )
SCREAMING_SNAKE_CASE_: Tuple =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowercase )
return node
def __magic_name__ ( lowercase ):
while True:
SCREAMING_SNAKE_CASE_: Dict =root.get_right()
if right_child is None:
break
SCREAMING_SNAKE_CASE_: str =right_child
return root.get_data()
def __magic_name__ ( lowercase ):
while True:
SCREAMING_SNAKE_CASE_: str =root.get_left()
if left_child is None:
break
SCREAMING_SNAKE_CASE_: Dict =left_child
return root.get_data()
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: str =root.get_left()
SCREAMING_SNAKE_CASE_: List[Any] =root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
SCREAMING_SNAKE_CASE_: Union[str, Any] =get_left_most(lowercase )
root.set_data(lowercase )
root.set_right(del_node(lowercase , lowercase ) )
elif left_child is not None:
SCREAMING_SNAKE_CASE_: Optional[int] =left_child
elif right_child is not None:
SCREAMING_SNAKE_CASE_: Any =right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("""No such data""" )
return root
else:
root.set_left(del_node(lowercase , lowercase ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(lowercase , lowercase ) )
if get_height(lowercase ) - get_height(lowercase ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
SCREAMING_SNAKE_CASE_: Tuple =left_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =rl_rotation(lowercase )
elif get_height(lowercase ) - get_height(lowercase ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
SCREAMING_SNAKE_CASE_: Optional[Any] =right_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: str =lr_rotation(lowercase )
SCREAMING_SNAKE_CASE_: str =my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(lowercase )
return root
class a :
def __init__( self : int ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: MyNode | None =None
def lowerCamelCase__ ( self : List[Any] ) -> int:
'''simple docstring'''
return get_height(self.root )
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
print("""insert:""" + str(lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: Tuple =insert_node(self.root , lowerCAmelCase )
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
print("""delete:""" + str(lowerCAmelCase ) )
if self.root is None:
print("""Tree is empty!""" )
return
SCREAMING_SNAKE_CASE_: Union[str, Any] =del_node(self.root , lowerCAmelCase )
def __str__( self : List[str] , ) -> str: # a level traversale, gives a more intuitive look on the tree
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =""""""
SCREAMING_SNAKE_CASE_: str =MyQueue()
q.push(self.root )
SCREAMING_SNAKE_CASE_: List[str] =self.get_height()
if layer == 0:
return output
SCREAMING_SNAKE_CASE_: int =0
while not q.is_empty():
SCREAMING_SNAKE_CASE_: int =q.pop()
SCREAMING_SNAKE_CASE_: List[Any] =""" """ * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(lowerCAmelCase )
q.push(lowerCAmelCase )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
SCREAMING_SNAKE_CASE_: List[Any] =cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , lowerCAmelCase ) - 1:
SCREAMING_SNAKE_CASE_: int =layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def __magic_name__ ( ):
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
_UpperCAmelCase = AVLtree()
_UpperCAmelCase = list(range(1_0))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 36
| 0
|
"""simple docstring"""
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
debug_launcher(test_script.main )
def lowerCamelCase__ ( self : Any ) -> int:
'''simple docstring'''
debug_launcher(test_ops.main )
| 718
|
"""simple docstring"""
import string
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =""""""
for i in sequence:
SCREAMING_SNAKE_CASE_: List[Any] =ord(lowercase )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any =string.ascii_letters
SCREAMING_SNAKE_CASE_: Tuple =string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(lowercase )] if c in letters else c for c in sequence )
def __magic_name__ ( ):
from timeit import timeit
print("""Running performance benchmarks...""" )
SCREAMING_SNAKE_CASE_: int ="""from string import printable ; from __main__ import atbash, atbash_slow"""
print(f'''> atbash_slow(): {timeit("atbash_slow(printable)" , setup=lowercase )} seconds''' )
print(f'''> atbash(): {timeit("atbash(printable)" , setup=lowercase )} seconds''' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(f"""{example} encrypted in atbash: {atbash(example)}""")
benchmark()
| 36
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.