code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
_UpperCAmelCase = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
_UpperCAmelCase = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Tuple =True
SCREAMING_SNAKE_CASE_: Any =[]
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
order.append(UpperCamelCase__ )
return order
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =True
SCREAMING_SNAKE_CASE_: int =[vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return component
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: int =len(UpperCamelCase__ ) * [False]
SCREAMING_SNAKE_CASE_: dict[int, list[int]] ={vert: [] for vert in range(len(UpperCamelCase__ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_: int =[]
for i, was_visited in enumerate(UpperCamelCase__ ):
if not was_visited:
order += topology_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE_: Tuple =[]
SCREAMING_SNAKE_CASE_: str =len(UpperCamelCase__ ) * [False]
for i in range(len(UpperCamelCase__ ) ):
SCREAMING_SNAKE_CASE_: Any =order[len(UpperCamelCase__ ) - i - 1]
if not visited[vert]:
SCREAMING_SNAKE_CASE_: List[str] =find_components(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
components_list.append(UpperCamelCase__ )
return components_list
| 719
|
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class a :
def __init__( self : Union[str, Any] , lowerCAmelCase : List[str]=2 , lowerCAmelCase : int=3 , lowerCAmelCase : Optional[Any]=64 , lowerCAmelCase : Union[str, Any]=None ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =np.random.default_rng(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =length
SCREAMING_SNAKE_CASE_: Union[str, Any] =rng.normal(size=(length,) ).astype(np.floataa )
SCREAMING_SNAKE_CASE_: Tuple =a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : List[Any] ) -> str:
'''simple docstring'''
return self.length
def __getitem__( self : Union[str, Any] , lowerCAmelCase : Any ) -> List[str]:
'''simple docstring'''
return {"x": self.x[i], "y": self.y[i]}
class a ( torch.nn.Module ):
def __init__( self : Optional[int] , lowerCAmelCase : str=0 , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : Optional[int]=False ) -> Tuple:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_: int =torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE_: int =torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE_: Dict =True
def lowerCamelCase__ ( self : str , lowerCAmelCase : Tuple=None ) -> int:
'''simple docstring'''
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
SCREAMING_SNAKE_CASE_: Union[str, Any] =False
return x * self.a[0] + self.b[0]
class a ( torch.nn.Module ):
def __init__( self : Union[str, Any] , lowerCAmelCase : Any=0 , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : List[Any]=False ) -> str:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_: List[str] =torch.nn.Parameter(torch.tensor(lowerCAmelCase ).float() )
SCREAMING_SNAKE_CASE_: Optional[int] =torch.nn.Parameter(torch.tensor(lowerCAmelCase ).float() )
SCREAMING_SNAKE_CASE_: List[Any] =True
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : int=None ) -> Any:
'''simple docstring'''
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
SCREAMING_SNAKE_CASE_: Optional[int] =False
return x * self.a + self.b
def __magic_name__ ( lowercase , lowercase = 16 ):
from datasets import load_dataset
from transformers import AutoTokenizer
SCREAMING_SNAKE_CASE_: Optional[Any] =AutoTokenizer.from_pretrained("""bert-base-cased""" )
SCREAMING_SNAKE_CASE_: Optional[int] ={"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""}
SCREAMING_SNAKE_CASE_: Any =load_dataset("""csv""" , data_files=lowercase )
SCREAMING_SNAKE_CASE_: Any =datasets["""train"""].unique("""label""" )
SCREAMING_SNAKE_CASE_: List[Any] ={v: i for i, v in enumerate(lowercase )}
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_: Dict =tokenizer(
examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase , max_length=lowercase , padding="""max_length""" )
if "label" in examples:
SCREAMING_SNAKE_CASE_: Optional[int] =[label_to_id[l] for l in examples["""label"""]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE_: List[Any] =datasets.map(
lowercase , batched=lowercase , remove_columns=["""sentence1""", """sentence2""", """label"""] , )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(lowercase , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_: Optional[int] =DataLoader(tokenized_datasets["""train"""] , shuffle=lowercase , collate_fn=lowercase , batch_size=2 )
SCREAMING_SNAKE_CASE_: Dict =DataLoader(tokenized_datasets["""validation"""] , shuffle=lowercase , collate_fn=lowercase , batch_size=1 )
return train_dataloader, eval_dataloader
| 36
| 0
|
"""simple docstring"""
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
_UpperCAmelCase = {
"facebook/maskformer-swin-base-ade": (
"https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
_UpperCAmelCase = logging.get_logger(__name__)
class a ( UpperCAmelCase__ ):
UpperCamelCase : List[str] = 'maskformer'
UpperCamelCase : List[str] = {'hidden_size': 'mask_feature_size'}
UpperCamelCase : Union[str, Any] = ['resnet', 'swin']
UpperCamelCase : Optional[int] = ['detr']
def __init__( self : List[str] , lowerCAmelCase : int = 256 , lowerCAmelCase : int = 256 , lowerCAmelCase : float = 0.1 , lowerCAmelCase : bool = False , lowerCAmelCase : Optional[Dict] = None , lowerCAmelCase : Optional[Dict] = None , lowerCAmelCase : float = 0.0_2 , lowerCAmelCase : float = 1.0 , lowerCAmelCase : float = 1.0 , lowerCAmelCase : float = 1.0 , lowerCAmelCase : float = 2_0.0 , lowerCAmelCase : Optional[bool] = None , **lowerCAmelCase : Optional[Any] , ) -> str:
'''simple docstring'''
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
SCREAMING_SNAKE_CASE_: str =SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE_: List[str] =backbone_config.pop("""model_type""" )
SCREAMING_SNAKE_CASE_: int =CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE_: str =config_class.from_dict(UpperCAmelCase__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '''
f'''Supported model types: {",".join(self.backbones_supported )}''' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
SCREAMING_SNAKE_CASE_: Tuple =DetrConfig()
else:
# verify that the decoder is supported
SCREAMING_SNAKE_CASE_: Optional[int] =(
decoder_config.pop("""model_type""" ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f'''Transformer Decoder {decoder_type} not supported, please use one of'''
f''' {",".join(self.decoders_supported )}''' )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE_: Optional[Any] =CONFIG_MAPPING[decoder_type]
SCREAMING_SNAKE_CASE_: Dict =config_class.from_dict(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE_: List[Any] =backbone_config
SCREAMING_SNAKE_CASE_: Optional[int] =decoder_config
# main feature dimension for the model
SCREAMING_SNAKE_CASE_: Any =fpn_feature_size
SCREAMING_SNAKE_CASE_: Dict =mask_feature_size
# initializer
SCREAMING_SNAKE_CASE_: Union[str, Any] =init_std
SCREAMING_SNAKE_CASE_: Optional[int] =init_xavier_std
# Hungarian matcher && loss
SCREAMING_SNAKE_CASE_: Optional[Any] =cross_entropy_weight
SCREAMING_SNAKE_CASE_: Union[str, Any] =dice_weight
SCREAMING_SNAKE_CASE_: Tuple =mask_weight
SCREAMING_SNAKE_CASE_: List[Any] =use_auxiliary_loss
SCREAMING_SNAKE_CASE_: Optional[int] =no_object_weight
SCREAMING_SNAKE_CASE_: List[Any] =output_auxiliary_logits
SCREAMING_SNAKE_CASE_: Tuple =self.decoder_config.encoder_attention_heads
SCREAMING_SNAKE_CASE_: Dict =self.decoder_config.num_hidden_layers
super().__init__(**UpperCAmelCase__ )
@classmethod
def lowerCamelCase__ ( cls : Tuple , lowerCAmelCase : PretrainedConfig , lowerCAmelCase : PretrainedConfig , **lowerCAmelCase : List[Any] ) -> Dict:
'''simple docstring'''
return cls(
backbone_config=UpperCAmelCase__ , decoder_config=UpperCAmelCase__ , **UpperCAmelCase__ , )
def lowerCamelCase__ ( self : Dict ) -> Dict[str, any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE_: Dict =self.decoder_config.to_dict()
SCREAMING_SNAKE_CASE_: Optional[int] =self.__class__.model_type
return output
| 720
|
"""simple docstring"""
def __magic_name__ ( lowercase ):
if upper_limit < 0:
raise ValueError("""Limit for the Catalan sequence must be ≥ 0""" )
SCREAMING_SNAKE_CASE_: Tuple =[0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
SCREAMING_SNAKE_CASE_: Any =1
if upper_limit > 0:
SCREAMING_SNAKE_CASE_: List[str] =1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(lowercase ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""")
print("""\n*** Enter -1 at any time to quit ***""")
print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""")
try:
while True:
_UpperCAmelCase = int(input().strip())
if N < 0:
print("""\n********* Goodbye!! ************""")
break
else:
print(f"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print("""Try another upper limit for the sequence: """, end="""""")
except (NameError, ValueError):
print("""\n********* Invalid input, goodbye! ************\n""")
import doctest
doctest.testmod()
| 36
| 0
|
"""simple docstring"""
from __future__ import annotations
def __magic_name__ ( lowercase , lowercase , lowercase , ):
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif stress < 0:
raise ValueError("""Stress cannot be negative""" )
elif tangential_force < 0:
raise ValueError("""Tangential Force cannot be negative""" )
elif area < 0:
raise ValueError("""Area cannot be negative""" )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
_UpperCAmelCase = {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""",
}
class a ( UpperCAmelCase__ ):
UpperCamelCase : Any = 'albert'
def __init__( self : Dict , lowerCAmelCase : List[str]=3_0000 , lowerCAmelCase : List[Any]=128 , lowerCAmelCase : List[str]=4096 , lowerCAmelCase : str=12 , lowerCAmelCase : str=1 , lowerCAmelCase : Tuple=64 , lowerCAmelCase : Dict=1_6384 , lowerCAmelCase : int=1 , lowerCAmelCase : str="gelu_new" , lowerCAmelCase : Dict=0 , lowerCAmelCase : Optional[Any]=0 , lowerCAmelCase : str=512 , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : List[Any]=0.0_2 , lowerCAmelCase : Union[str, Any]=1E-12 , lowerCAmelCase : Tuple=0.1 , lowerCAmelCase : List[Any]="absolute" , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : int=2 , lowerCAmelCase : Optional[int]=3 , **lowerCAmelCase : int , ) -> Tuple:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =vocab_size
SCREAMING_SNAKE_CASE_: Optional[int] =embedding_size
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_size
SCREAMING_SNAKE_CASE_: Tuple =num_hidden_layers
SCREAMING_SNAKE_CASE_: Any =num_hidden_groups
SCREAMING_SNAKE_CASE_: List[Any] =num_attention_heads
SCREAMING_SNAKE_CASE_: List[Any] =inner_group_num
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_act
SCREAMING_SNAKE_CASE_: int =intermediate_size
SCREAMING_SNAKE_CASE_: Any =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Union[str, Any] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: int =max_position_embeddings
SCREAMING_SNAKE_CASE_: Any =type_vocab_size
SCREAMING_SNAKE_CASE_: int =initializer_range
SCREAMING_SNAKE_CASE_: List[Any] =layer_norm_eps
SCREAMING_SNAKE_CASE_: Dict =classifier_dropout_prob
SCREAMING_SNAKE_CASE_: int =position_embedding_type
class a ( UpperCAmelCase__ ):
@property
def lowerCamelCase__ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_: str ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
SCREAMING_SNAKE_CASE_: Dict ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 36
| 0
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
_UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class a ( UpperCAmelCase__ ):
def __init__( self : int , lowerCAmelCase : List[str] , lowerCAmelCase : int ) -> Dict:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCAmelCase , scheduler=lowerCAmelCase )
@torch.no_grad()
def __call__( self : Optional[int] , lowerCAmelCase : int = 1 , lowerCAmelCase : int = 100 , lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase : Optional[float] = None , lowerCAmelCase : bool = True , ) -> Union[AudioPipelineOutput, Tuple]:
'''simple docstring'''
if audio_length_in_s is None:
SCREAMING_SNAKE_CASE_: Optional[int] =self.unet.config.sample_size / self.unet.config.sample_rate
SCREAMING_SNAKE_CASE_: List[str] =audio_length_in_s * self.unet.config.sample_rate
SCREAMING_SNAKE_CASE_: Tuple =2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
SCREAMING_SNAKE_CASE_: Tuple =int(lowerCAmelCase )
if sample_size % down_scale_factor != 0:
SCREAMING_SNAKE_CASE_: Any =(
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
""" process.""" )
SCREAMING_SNAKE_CASE_: List[str] =int(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =next(iter(self.unet.parameters() ) ).dtype
SCREAMING_SNAKE_CASE_: List[str] =(batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowerCAmelCase , lowerCAmelCase ) and len(lowerCAmelCase ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowerCAmelCase )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
SCREAMING_SNAKE_CASE_: Tuple =randn_tensor(lowerCAmelCase , generator=lowerCAmelCase , device=self.device , dtype=lowerCAmelCase )
# set step values
self.scheduler.set_timesteps(lowerCAmelCase , device=audio.device )
SCREAMING_SNAKE_CASE_: Any =self.scheduler.timesteps.to(lowerCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
SCREAMING_SNAKE_CASE_: List[str] =self.unet(lowerCAmelCase , lowerCAmelCase ).sample
# 2. compute previous image: x_t -> t_t-1
SCREAMING_SNAKE_CASE_: Optional[int] =self.scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).prev_sample
SCREAMING_SNAKE_CASE_: List[str] =audio.clamp(-1 , 1 ).float().cpu().numpy()
SCREAMING_SNAKE_CASE_: str =audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowerCAmelCase )
| 700
|
"""simple docstring"""
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class a ( yaml.SafeLoader ):
def lowerCamelCase__ ( self : int , lowerCAmelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =[self.constructed_objects[key_node] for key_node, _ in node.value]
SCREAMING_SNAKE_CASE_: Any =[tuple(lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else key for key in keys]
SCREAMING_SNAKE_CASE_: Dict =Counter(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =[key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f'''Got duplicate yaml keys: {duplicate_keys}''' )
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : str , lowerCAmelCase : Optional[int]=False ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =super().construct_mapping(lowerCAmelCase , deep=lowerCAmelCase )
self._check_no_duplicates_on_constructed_node(lowerCAmelCase )
return mapping
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Dict =list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
SCREAMING_SNAKE_CASE_: Union[str, Any] =full_content[1:].index("""---""" ) + 1
SCREAMING_SNAKE_CASE_: List[str] ="""\n""".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(lowercase )
class a ( UpperCAmelCase__ ):
# class attributes
UpperCamelCase : Tuple = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def lowerCamelCase__ ( cls : List[Any] , lowerCAmelCase : Path ) -> "DatasetMetadata":
'''simple docstring'''
with open(lowerCAmelCase , encoding="""utf-8""" ) as readme_file:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] =_split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(lowerCAmelCase )
else:
return cls()
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Path ) -> List[str]:
'''simple docstring'''
if path.exists():
with open(lowerCAmelCase , encoding="""utf-8""" ) as readme_file:
SCREAMING_SNAKE_CASE_: str =readme_file.read()
else:
SCREAMING_SNAKE_CASE_: str =None
SCREAMING_SNAKE_CASE_: Tuple =self._to_readme(lowerCAmelCase )
with open(lowerCAmelCase , """w""" , encoding="""utf-8""" ) as readme_file:
readme_file.write(lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : Optional[str] = None ) -> str:
'''simple docstring'''
if readme_content is not None:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] =_split_yaml_from_readme(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] ="""---\n""" + self.to_yaml_string() + """---\n""" + content
else:
SCREAMING_SNAKE_CASE_: List[Any] ="""---\n""" + self.to_yaml_string() + """---\n"""
return full_content
@classmethod
def lowerCamelCase__ ( cls : Optional[int] , lowerCAmelCase : str ) -> "DatasetMetadata":
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =yaml.load(lowerCAmelCase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
SCREAMING_SNAKE_CASE_: List[Any] ={
(key.replace("""-""" , """_""" ) if key.replace("""-""" , """_""" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**lowerCAmelCase )
def lowerCamelCase__ ( self : Dict ) -> str:
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace("""_""" , """-""" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=lowerCAmelCase , allow_unicode=lowerCAmelCase , encoding="""utf-8""" , ).decode("""utf-8""" )
_UpperCAmelCase = {
"""image-classification""": [],
"""translation""": [],
"""image-segmentation""": [],
"""fill-mask""": [],
"""automatic-speech-recognition""": [],
"""token-classification""": [],
"""sentence-similarity""": [],
"""audio-classification""": [],
"""question-answering""": [],
"""summarization""": [],
"""zero-shot-classification""": [],
"""table-to-text""": [],
"""feature-extraction""": [],
"""other""": [],
"""multiple-choice""": [],
"""text-classification""": [],
"""text-to-image""": [],
"""text2text-generation""": [],
"""zero-shot-image-classification""": [],
"""tabular-classification""": [],
"""tabular-regression""": [],
"""image-to-image""": [],
"""tabular-to-text""": [],
"""unconditional-image-generation""": [],
"""text-retrieval""": [],
"""text-to-speech""": [],
"""object-detection""": [],
"""audio-to-audio""": [],
"""text-generation""": [],
"""conversational""": [],
"""table-question-answering""": [],
"""visual-question-answering""": [],
"""image-to-text""": [],
"""reinforcement-learning""": [],
"""voice-activity-detection""": [],
"""time-series-forecasting""": [],
"""document-question-answering""": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
_UpperCAmelCase = ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""")
ap.add_argument("""readme_filepath""")
_UpperCAmelCase = ap.parse_args()
_UpperCAmelCase = Path(args.readme_filepath)
_UpperCAmelCase = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 36
| 0
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =[]
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
f'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
f'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
f'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
f'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: str =[]
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =[]
token.append((f'''cvt.encoder.stages.{idx}.cls_token''', """stage2.cls_token""") )
return token
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Tuple =[]
head.append(("""layernorm.weight""", """norm.weight""") )
head.append(("""layernorm.bias""", """norm.bias""") )
head.append(("""classifier.weight""", """head.weight""") )
head.append(("""classifier.bias""", """head.bias""") )
return head
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Any ="""imagenet-1k-id2label.json"""
SCREAMING_SNAKE_CASE_: int =1000
SCREAMING_SNAKE_CASE_: Tuple ="""huggingface/label-files"""
SCREAMING_SNAKE_CASE_: Union[str, Any] =num_labels
SCREAMING_SNAKE_CASE_: List[str] =json.load(open(cached_download(hf_hub_url(lowercase__ , lowercase__ , repo_type="""dataset""" ) ) , """r""" ) )
SCREAMING_SNAKE_CASE_: List[str] ={int(lowercase__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_: str =idalabel
SCREAMING_SNAKE_CASE_: Optional[Any] ={v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_: List[str] =CvtConfig(num_labels=lowercase__ , idalabel=lowercase__ , labelaid=lowercase__ )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "13":
SCREAMING_SNAKE_CASE_: Any =[1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "21":
SCREAMING_SNAKE_CASE_: Any =[1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
SCREAMING_SNAKE_CASE_: Union[str, Any] =[2, 2, 20]
SCREAMING_SNAKE_CASE_: List[Any] =[3, 12, 16]
SCREAMING_SNAKE_CASE_: Any =[192, 768, 1024]
SCREAMING_SNAKE_CASE_: Union[str, Any] =CvtForImageClassification(lowercase__ )
SCREAMING_SNAKE_CASE_: List[str] =AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
SCREAMING_SNAKE_CASE_: Optional[int] =image_size
SCREAMING_SNAKE_CASE_: str =torch.load(lowercase__ , map_location=torch.device("""cpu""" ) )
SCREAMING_SNAKE_CASE_: Union[str, Any] =OrderedDict()
SCREAMING_SNAKE_CASE_: Optional[int] =[]
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
SCREAMING_SNAKE_CASE_: Union[str, Any] =list_of_state_dict + cls_token(lowercase__ )
SCREAMING_SNAKE_CASE_: Optional[Any] =list_of_state_dict + embeddings(lowercase__ )
for cnt in range(config.depth[idx] ):
SCREAMING_SNAKE_CASE_: str =list_of_state_dict + attention(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE_: Optional[int] =list_of_state_dict + final()
for gg in list_of_state_dict:
print(lowercase__ )
for i in range(len(lowercase__ ) ):
SCREAMING_SNAKE_CASE_: Any =original_weights[list_of_state_dict[i][1]]
model.load_state_dict(lowercase__ )
model.save_pretrained(lowercase__ )
image_processor.save_pretrained(lowercase__ )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_UpperCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
"""--cvt_model""",
default="""cvt-w24""",
type=str,
help="""Name of the cvt model you\'d like to convert.""",
)
parser.add_argument(
"""--image_size""",
default=3_8_4,
type=int,
help="""Input Image Size""",
)
parser.add_argument(
"""--cvt_file_name""",
default=r"""cvtmodels\CvT-w24-384x384-IN-22k.pth""",
type=str,
help="""Input Image Size""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_UpperCAmelCase : Union[str, Any] = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 701
|
"""simple docstring"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def __magic_name__ ( lowercase ):
return (data["data"], data["target"])
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Dict =XGBClassifier()
classifier.fit(lowercase , lowercase )
return classifier
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Optional[Any] =load_iris()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =data_handling(lowercase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =train_test_split(
lowercase , lowercase , test_size=0.25 )
SCREAMING_SNAKE_CASE_: Tuple =iris["""target_names"""]
# Create an XGBoost Classifier from the training data
SCREAMING_SNAKE_CASE_: Optional[int] =xgboost(lowercase , lowercase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
lowercase , lowercase , lowercase , display_labels=lowercase , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 36
| 0
|
"""simple docstring"""
from __future__ import annotations
def __magic_name__ ( lowercase = 4 ):
SCREAMING_SNAKE_CASE_: List[Any] =abs(lowerCAmelCase_ ) or 4
return [[1 + x + y * row_size for x in range(lowerCAmelCase_ )] for y in range(lowerCAmelCase_ )]
def __magic_name__ ( lowercase ):
return reverse_row(transpose(lowerCAmelCase_ ) )
# OR.. transpose(reverse_column(matrix))
def __magic_name__ ( lowercase ):
return reverse_row(reverse_column(lowerCAmelCase_ ) )
# OR.. reverse_column(reverse_row(matrix))
def __magic_name__ ( lowercase ):
return reverse_column(transpose(lowerCAmelCase_ ) )
# OR.. transpose(reverse_row(matrix))
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Tuple =[list(lowerCAmelCase_ ) for x in zip(*lowerCAmelCase_ )]
return matrix
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Dict =matrix[::-1]
return matrix
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =[x[::-1] for x in matrix]
return matrix
def __magic_name__ ( lowercase ):
for i in matrix:
print(*lowerCAmelCase_ )
if __name__ == "__main__":
_UpperCAmelCase = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 90 counterclockwise:\n""")
print_matrix(rotate_aa(matrix))
_UpperCAmelCase = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 180:\n""")
print_matrix(rotate_aaa(matrix))
_UpperCAmelCase = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 270 counterclockwise:\n""")
print_matrix(rotate_aaa(matrix))
| 702
|
"""simple docstring"""
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =[]
SCREAMING_SNAKE_CASE_: List[str] =[]
SCREAMING_SNAKE_CASE_: Any =[]
for rt in rc.restypes:
SCREAMING_SNAKE_CASE_: Optional[int] =rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
SCREAMING_SNAKE_CASE_: Any ={name: i for i, name in enumerate(lowercase )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =torch.tensor(
lowercase , dtype=torch.intaa , device=protein["""aatype"""].device , )
SCREAMING_SNAKE_CASE_: Optional[Any] =torch.tensor(
lowercase , dtype=torch.intaa , device=protein["""aatype"""].device , )
SCREAMING_SNAKE_CASE_: List[str] =torch.tensor(
lowercase , dtype=torch.floataa , device=protein["""aatype"""].device , )
SCREAMING_SNAKE_CASE_: Optional[Any] =protein["""aatype"""].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
SCREAMING_SNAKE_CASE_: List[str] =restype_atomaa_to_atomaa[protein_aatype]
SCREAMING_SNAKE_CASE_: Any =restype_atomaa_mask[protein_aatype]
SCREAMING_SNAKE_CASE_: Tuple =residx_atomaa_mask
SCREAMING_SNAKE_CASE_: Dict =residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
SCREAMING_SNAKE_CASE_: Dict =restype_atomaa_to_atomaa[protein_aatype]
SCREAMING_SNAKE_CASE_: Optional[int] =residx_atomaa_to_atomaa.long()
# create the corresponding mask
SCREAMING_SNAKE_CASE_: Optional[int] =torch.zeros([21, 37] , dtype=torch.floataa , device=protein["""aatype"""].device )
for restype, restype_letter in enumerate(rc.restypes ):
SCREAMING_SNAKE_CASE_: int =rc.restype_atoa[restype_letter]
SCREAMING_SNAKE_CASE_: Any =rc.residue_atoms[restype_name]
for atom_name in atom_names:
SCREAMING_SNAKE_CASE_: Optional[int] =rc.atom_order[atom_name]
SCREAMING_SNAKE_CASE_: Dict =1
SCREAMING_SNAKE_CASE_: List[str] =restype_atomaa_mask[protein_aatype]
SCREAMING_SNAKE_CASE_: List[Any] =residx_atomaa_mask
return protein
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any =tree_map(lambda lowercase : torch.tensor(lowercase , device=batch["""aatype"""].device ) , lowercase , np.ndarray )
SCREAMING_SNAKE_CASE_: int =tensor_tree_map(lambda lowercase : np.array(lowercase ) , make_atomaa_masks(lowercase ) )
return out
| 36
| 0
|
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
_UpperCAmelCase = pytest.mark.integration
_UpperCAmelCase = {"""comet"""}
_UpperCAmelCase = importlib.util.find_spec("""fairseq""") is not None
_UpperCAmelCase = {"""code_eval"""}
_UpperCAmelCase = os.name == """nt"""
_UpperCAmelCase = {"""bertscore""", """frugalscore""", """perplexity"""}
_UpperCAmelCase = importlib.util.find_spec("""transformers""") is not None
def __magic_name__ ( lowercase ):
@wraps(SCREAMING_SNAKE_CASE__ )
def wrapper(self , lowercase ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("""\"test requires Fairseq\"""" )
else:
test_case(self , SCREAMING_SNAKE_CASE__ )
return wrapper
def __magic_name__ ( lowercase ):
@wraps(SCREAMING_SNAKE_CASE__ )
def wrapper(self , lowercase ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("""\"test requires transformers\"""" )
else:
test_case(self , SCREAMING_SNAKE_CASE__ )
return wrapper
def __magic_name__ ( lowercase ):
@wraps(SCREAMING_SNAKE_CASE__ )
def wrapper(self , lowercase ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("""\"test not supported on Windows\"""" )
else:
test_case(self , SCREAMING_SNAKE_CASE__ )
return wrapper
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =[metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("""./metrics/*/""" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
@local
class a ( parameterized.TestCase ):
UpperCamelCase : Optional[Any] = {}
UpperCamelCase : Any = None
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:load_metric is deprecated:FutureWarning""" )
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : Tuple ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""[...]"""
SCREAMING_SNAKE_CASE_: Optional[int] =importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""" , lowercase__ ) ).module_path )
SCREAMING_SNAKE_CASE_: Optional[Any] =datasets.load.import_main_class(metric_module.__name__ , dataset=lowercase__ )
# check parameters
SCREAMING_SNAKE_CASE_: Dict =inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(lowercase__ , metric_module.__name__ ):
with self.use_local_metrics():
try:
SCREAMING_SNAKE_CASE_: Optional[int] =doctest.testmod(lowercase__ , verbose=lowercase__ , raise_on_error=lowercase__ )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : Any ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any ="""[...]"""
SCREAMING_SNAKE_CASE_: Any =importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""" , lowercase__ ) ).module_path )
# run doctest
with self.use_local_metrics():
SCREAMING_SNAKE_CASE_: Optional[int] =doctest.testmod(lowercase__ , verbose=lowercase__ , raise_on_error=lowercase__ )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any] ) -> Dict:
'''simple docstring'''
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](lowercase__ ):
yield
else:
yield
@contextmanager
def lowerCamelCase__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
def load_local_metric(lowerCAmelCase : Any , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Optional[Any] ):
return load_metric(os.path.join("""metrics""" , lowercase__ ) , *lowercase__ , **lowercase__ )
with patch("""datasets.load_metric""" ) as mock_load_metric:
SCREAMING_SNAKE_CASE_: Union[str, Any] =load_local_metric
yield
@classmethod
def lowerCamelCase__ ( cls : int , lowerCAmelCase : Optional[Any] ) -> Any:
'''simple docstring'''
def wrapper(lowerCAmelCase : Any ):
SCREAMING_SNAKE_CASE_: List[str] =contextmanager(lowercase__ )
SCREAMING_SNAKE_CASE_: int =patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("""bleurt""" )
def __magic_name__ ( lowercase ):
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("""sv""" , """""" , """""" ) # handle pytest cli flags
class a ( _UpperCAmelCase ):
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : Dict ) -> str:
'''simple docstring'''
assert len(input_dict["""input_ids"""] ) == 2
return np.array([1.0_3, 1.0_4] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("""bleurt.score._create_predictor""" ) as mock_create_predictor:
SCREAMING_SNAKE_CASE_: Optional[int] =MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("""bertscore""" )
def __magic_name__ ( lowercase ):
import torch
def bert_cos_score_idf(lowercase , lowercase , *lowercase , **lowercase ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(SCREAMING_SNAKE_CASE__ ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("""bert_score.scorer.get_model""" ), patch(
"""bert_score.scorer.bert_cos_score_idf""" ) as mock_bert_cos_score_idf:
SCREAMING_SNAKE_CASE_: int =bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("""comet""" )
def __magic_name__ ( lowercase ):
def load_from_checkpoint(lowercase ):
class a :
def lowerCamelCase__ ( self : str , lowerCAmelCase : Any , *lowerCAmelCase : int , **lowerCAmelCase : int ) -> Any:
'''simple docstring'''
assert len(lowercase__ ) == 2
SCREAMING_SNAKE_CASE_: List[str] =[0.1_9, 0.9_2]
return scores, sum(lowercase__ ) / len(lowercase__ )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("""comet.download_model""" ) as mock_download_model:
SCREAMING_SNAKE_CASE_: List[Any] =None
with patch("""comet.load_from_checkpoint""" ) as mock_load_from_checkpoint:
SCREAMING_SNAKE_CASE_: List[Any] =load_from_checkpoint
yield
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: str =load_metric(os.path.join("""metrics""" , """seqeval""" ) )
SCREAMING_SNAKE_CASE_: Optional[Any] ="""ERROR"""
SCREAMING_SNAKE_CASE_: Dict =f'''Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}'''
with pytest.raises(SCREAMING_SNAKE_CASE__ , match=re.escape(SCREAMING_SNAKE_CASE__ ) ):
metric.compute(predictions=[] , references=[] , scheme=SCREAMING_SNAKE_CASE__ )
| 703
|
"""simple docstring"""
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_UpperCAmelCase = ["""text""", """image""", """audio"""]
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: str =[]
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(lowercase , lowercase ):
inputs.append(create_inputs(lowercase ) )
else:
raise ValueError(f'''Invalid type requested: {input_type}''' )
return inputs
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: int =[]
for output in outputs:
if isinstance(lowercase , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(lowercase , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(lowercase , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(f'''Invalid output: {output}''' )
return output_types
@is_tool_test
class a :
def lowerCamelCase__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , """inputs""" ) )
self.assertTrue(hasattr(self.tool , """outputs""" ) )
SCREAMING_SNAKE_CASE_: Optional[int] =self.tool.inputs
for _input in inputs:
if isinstance(_input , lowerCAmelCase ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
SCREAMING_SNAKE_CASE_: Any =self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowerCamelCase__ ( self : str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_: List[Any] =self.tool(*lowerCAmelCase )
# There is a single output
if len(self.tool.outputs ) == 1:
SCREAMING_SNAKE_CASE_: str =[outputs]
self.assertListEqual(output_types(lowerCAmelCase ) , self.tool.outputs )
def lowerCamelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , """description""" ) )
self.assertTrue(hasattr(self.tool , """default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def lowerCamelCase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_: Tuple =self.tool(*lowerCAmelCase )
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: int =[outputs]
self.assertEqual(len(lowerCAmelCase ) , len(self.tool.outputs ) )
for output, output_type in zip(lowerCAmelCase , self.tool.outputs ):
SCREAMING_SNAKE_CASE_: int =AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(lowerCAmelCase , lowerCAmelCase ) )
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_: Union[str, Any] =[]
for _input, input_type in zip(lowerCAmelCase , self.tool.inputs ):
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
SCREAMING_SNAKE_CASE_: Dict =self.tool(*lowerCAmelCase )
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] =[outputs]
self.assertEqual(len(lowerCAmelCase ) , len(self.tool.outputs ) )
| 36
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_UpperCAmelCase = {
"""configuration_maskformer""": ["""MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MaskFormerConfig"""],
"""configuration_maskformer_swin""": ["""MaskFormerSwinConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""MaskFormerFeatureExtractor"""]
_UpperCAmelCase = ["""MaskFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MaskFormerForInstanceSegmentation""",
"""MaskFormerModel""",
"""MaskFormerPreTrainedModel""",
]
_UpperCAmelCase = [
"""MaskFormerSwinBackbone""",
"""MaskFormerSwinModel""",
"""MaskFormerSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 704
|
"""simple docstring"""
from __future__ import annotations
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =sorted(numsa + numsa )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =divmod(len(lowercase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase = [float(x) for x in input("""Enter the elements of first array: """).split()]
_UpperCAmelCase = [float(x) for x in input("""Enter the elements of second array: """).split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 36
| 0
|
"""simple docstring"""
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""facebook/encodec_24khz""": """https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json""",
"""facebook/encodec_48khz""": """https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json""",
}
class a ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCamelCase : List[str] = "encodec"
def __init__( self : List[str] , lowerCAmelCase : int=[1.5, 3.0, 6.0, 1_2.0, 2_4.0] , lowerCAmelCase : Tuple=2_4000 , lowerCAmelCase : List[Any]=1 , lowerCAmelCase : Union[str, Any]=False , lowerCAmelCase : str=None , lowerCAmelCase : Dict=None , lowerCAmelCase : str=128 , lowerCAmelCase : Any=32 , lowerCAmelCase : Any=1 , lowerCAmelCase : List[Any]=[8, 5, 4, 2] , lowerCAmelCase : Union[str, Any]="weight_norm" , lowerCAmelCase : str=7 , lowerCAmelCase : Optional[int]=7 , lowerCAmelCase : Any=3 , lowerCAmelCase : Tuple=2 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : List[str]="reflect" , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Union[str, Any]=1.0 , lowerCAmelCase : Optional[Any]=1024 , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : str=True , **lowerCAmelCase : str , ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =target_bandwidths
SCREAMING_SNAKE_CASE_: List[str] =sampling_rate
SCREAMING_SNAKE_CASE_: int =audio_channels
SCREAMING_SNAKE_CASE_: List[str] =normalize
SCREAMING_SNAKE_CASE_: Any =chunk_length_s
SCREAMING_SNAKE_CASE_: Dict =overlap
SCREAMING_SNAKE_CASE_: Union[str, Any] =hidden_size
SCREAMING_SNAKE_CASE_: Dict =num_filters
SCREAMING_SNAKE_CASE_: Dict =num_residual_layers
SCREAMING_SNAKE_CASE_: Dict =upsampling_ratios
SCREAMING_SNAKE_CASE_: str =norm_type
SCREAMING_SNAKE_CASE_: Union[str, Any] =kernel_size
SCREAMING_SNAKE_CASE_: Union[str, Any] =last_kernel_size
SCREAMING_SNAKE_CASE_: Union[str, Any] =residual_kernel_size
SCREAMING_SNAKE_CASE_: List[Any] =dilation_growth_rate
SCREAMING_SNAKE_CASE_: Dict =use_causal_conv
SCREAMING_SNAKE_CASE_: Optional[Any] =pad_mode
SCREAMING_SNAKE_CASE_: str =compress
SCREAMING_SNAKE_CASE_: str =num_lstm_layers
SCREAMING_SNAKE_CASE_: Union[str, Any] =trim_right_ratio
SCREAMING_SNAKE_CASE_: Any =codebook_size
SCREAMING_SNAKE_CASE_: List[Any] =codebook_dim if codebook_dim is not None else hidden_size
SCREAMING_SNAKE_CASE_: Optional[int] =use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'''self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}''' )
super().__init__(**lowerCAmelCase )
@property
def lowerCamelCase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def lowerCamelCase__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def lowerCamelCase__ ( self : List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def lowerCamelCase__ ( self : int ) -> int:
'''simple docstring'''
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 705
|
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a :
def __init__( self : Any , lowerCAmelCase : Any , lowerCAmelCase : List[str]=13 , lowerCAmelCase : Dict=3 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Dict=True , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : str=0.1 , lowerCAmelCase : List[str]=224 , lowerCAmelCase : List[str]=1000 , lowerCAmelCase : Optional[Any]=[3, 3, 6, 4] , lowerCAmelCase : int=[48, 56, 112, 220] , ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =parent
SCREAMING_SNAKE_CASE_: Any =batch_size
SCREAMING_SNAKE_CASE_: Tuple =num_channels
SCREAMING_SNAKE_CASE_: Union[str, Any] =is_training
SCREAMING_SNAKE_CASE_: Tuple =use_labels
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Tuple =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: List[Any] =num_labels
SCREAMING_SNAKE_CASE_: int =image_size
SCREAMING_SNAKE_CASE_: Optional[Any] =layer_depths
SCREAMING_SNAKE_CASE_: List[Any] =embed_dims
def lowerCamelCase__ ( self : List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_: List[Any] =None
if self.use_labels:
SCREAMING_SNAKE_CASE_: List[str] =ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE_: Tuple =self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowerCAmelCase , layer_scale_init_value=1E-5 , )
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =SwiftFormerModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Any =model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =self.num_labels
SCREAMING_SNAKE_CASE_: Dict =SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Dict =model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
SCREAMING_SNAKE_CASE_: int =SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Tuple =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_: Dict =model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)): str =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_: Tuple ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : Optional[int] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
UpperCamelCase : Tuple = (
{'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase : Any = False
UpperCamelCase : Optional[int] = False
UpperCamelCase : Optional[Any] = False
UpperCamelCase : Dict = False
UpperCamelCase : List[str] = False
def lowerCamelCase__ ( self : Dict ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =SwiftFormerModelTester(self )
SCREAMING_SNAKE_CASE_: Union[str, Any] =ConfigTester(
self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def lowerCamelCase__ ( self : Tuple ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def lowerCamelCase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Any =model_class(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase , nn.Linear ) )
def lowerCamelCase__ ( self : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: int =model_class(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_: Any =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_: Tuple =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def lowerCamelCase__ ( self : int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def lowerCamelCase__ ( self : Any ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@slow
def lowerCamelCase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_: Optional[Any] =SwiftFormerModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def lowerCamelCase__ ( self : Optional[int] ) -> str:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
def check_hidden_states_output(lowerCAmelCase : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : Optional[int] ):
SCREAMING_SNAKE_CASE_: Optional[Any] =model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_: List[str] =model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: Dict =outputs.hidden_states
SCREAMING_SNAKE_CASE_: List[Any] =8
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowerCAmelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Dict =True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_: Any =True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
def _config_zero_init(lowerCAmelCase : str ):
SCREAMING_SNAKE_CASE_: Dict =copy.deepcopy(lowerCAmelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowerCAmelCase , lowerCAmelCase , 1E-10 )
if isinstance(getattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: Tuple =_config_zero_init(getattr(lowerCAmelCase , lowerCAmelCase ) )
setattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return configs_no_init
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_: List[Any] =_config_zero_init(lowerCAmelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Any =model_class(config=lowerCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
pass
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: List[Any] =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self : str ) -> str:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =self.default_image_processor
SCREAMING_SNAKE_CASE_: int =prepare_img()
SCREAMING_SNAKE_CASE_: Union[str, Any] =image_processor(images=lowerCAmelCase , return_tensors="""pt""" ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Dict =model(**lowerCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE_: Optional[Any] =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =torch.tensor([[-2.1_703E00, 2.1_107E00, -2.0_811E00]] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
| 36
| 0
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class a ( _UpperCamelCase ):
def __init__( self : List[str] , lowerCAmelCase : str , lowerCAmelCase : Union[str, Any]=13 , lowerCAmelCase : int=7 , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : List[str]=True , lowerCAmelCase : List[str]=False , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : List[Any]=99 , lowerCAmelCase : Union[str, Any]=32 , lowerCAmelCase : Optional[int]=5 , lowerCAmelCase : Union[str, Any]=4 , lowerCAmelCase : List[str]=37 , lowerCAmelCase : Optional[int]="gelu" , lowerCAmelCase : Any=0.1 , lowerCAmelCase : List[Any]=0.1 , lowerCAmelCase : Union[str, Any]=512 , lowerCAmelCase : Tuple=16 , lowerCAmelCase : Union[str, Any]=2 , lowerCAmelCase : List[Any]=0.0_2 , lowerCAmelCase : Tuple=3 , lowerCAmelCase : Dict=4 , lowerCAmelCase : List[Any]=None , ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =parent
SCREAMING_SNAKE_CASE_: Tuple =batch_size
SCREAMING_SNAKE_CASE_: Optional[int] =seq_length
SCREAMING_SNAKE_CASE_: Optional[Any] =is_training
SCREAMING_SNAKE_CASE_: int =use_input_mask
SCREAMING_SNAKE_CASE_: Any =use_token_type_ids
SCREAMING_SNAKE_CASE_: Any =use_labels
SCREAMING_SNAKE_CASE_: Any =vocab_size
SCREAMING_SNAKE_CASE_: Optional[Any] =hidden_size
SCREAMING_SNAKE_CASE_: List[Any] =num_hidden_layers
SCREAMING_SNAKE_CASE_: Any =num_attention_heads
SCREAMING_SNAKE_CASE_: List[str] =intermediate_size
SCREAMING_SNAKE_CASE_: Any =hidden_act
SCREAMING_SNAKE_CASE_: Dict =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: str =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: List[str] =max_position_embeddings
SCREAMING_SNAKE_CASE_: Optional[int] =type_vocab_size
SCREAMING_SNAKE_CASE_: Dict =type_sequence_label_size
SCREAMING_SNAKE_CASE_: Optional[Any] =initializer_range
SCREAMING_SNAKE_CASE_: Union[str, Any] =num_labels
SCREAMING_SNAKE_CASE_: Optional[Any] =num_choices
SCREAMING_SNAKE_CASE_: int =scope
def lowerCamelCase__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_: Union[str, Any] =None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_: Optional[Any] =random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_: Any =None
SCREAMING_SNAKE_CASE_: List[Any] =None
SCREAMING_SNAKE_CASE_: Tuple =None
if self.use_labels:
SCREAMING_SNAKE_CASE_: int =ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_: Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_: List[str] =ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE_: Optional[int] =self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : str , lowerCAmelCase : int , lowerCAmelCase : List[Any] , lowerCAmelCase : int , lowerCAmelCase : Any ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =DistilBertModel(config=__a )
model.to(__a )
model.eval()
SCREAMING_SNAKE_CASE_: Dict =model(__a , __a )
SCREAMING_SNAKE_CASE_: str =model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : int , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =DistilBertForMaskedLM(config=__a )
model.to(__a )
model.eval()
SCREAMING_SNAKE_CASE_: Optional[Any] =model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , lowerCAmelCase : int , lowerCAmelCase : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =DistilBertForQuestionAnswering(config=__a )
model.to(__a )
model.eval()
SCREAMING_SNAKE_CASE_: Union[str, Any] =model(
__a , attention_mask=__a , start_positions=__a , end_positions=__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Any , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : List[str] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =self.num_labels
SCREAMING_SNAKE_CASE_: List[str] =DistilBertForSequenceClassification(__a )
model.to(__a )
model.eval()
SCREAMING_SNAKE_CASE_: List[Any] =model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , lowerCAmelCase : str , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =self.num_labels
SCREAMING_SNAKE_CASE_: Dict =DistilBertForTokenClassification(config=__a )
model.to(__a )
model.eval()
SCREAMING_SNAKE_CASE_: Optional[int] =model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : int , lowerCAmelCase : Dict , lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =self.num_choices
SCREAMING_SNAKE_CASE_: List[Any] =DistilBertForMultipleChoice(config=__a )
model.to(__a )
model.eval()
SCREAMING_SNAKE_CASE_: Tuple =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE_: str =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE_: int =model(
__a , attention_mask=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =self.prepare_config_and_inputs()
(SCREAMING_SNAKE_CASE_): List[str] =config_and_inputs
SCREAMING_SNAKE_CASE_: Tuple ={"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class a ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
UpperCamelCase : Any = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
UpperCamelCase : Dict = (
{
"feature-extraction": DistilBertModel,
"fill-mask": DistilBertForMaskedLM,
"question-answering": DistilBertForQuestionAnswering,
"text-classification": DistilBertForSequenceClassification,
"token-classification": DistilBertForTokenClassification,
"zero-shot": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase : List[str] = True
UpperCamelCase : Optional[int] = True
UpperCamelCase : Optional[Any] = True
UpperCamelCase : str = True
def lowerCamelCase__ ( self : Any ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =DistilBertModelTester(self )
SCREAMING_SNAKE_CASE_: Optional[Any] =ConfigTester(self , config_class=__a , dim=37 )
def lowerCamelCase__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__a )
def lowerCamelCase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__a )
def lowerCamelCase__ ( self : str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__a )
def lowerCamelCase__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__a )
def lowerCamelCase__ ( self : List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__a )
def lowerCamelCase__ ( self : Tuple ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__a )
@slow
def lowerCamelCase__ ( self : str ) -> str:
'''simple docstring'''
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_: Optional[int] =DistilBertModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@slow
@require_torch_gpu
def lowerCamelCase__ ( self : str ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
SCREAMING_SNAKE_CASE_: Tuple =True
SCREAMING_SNAKE_CASE_: Tuple =model_class(config=__a )
SCREAMING_SNAKE_CASE_: int =self._prepare_for_class(__a , __a )
SCREAMING_SNAKE_CASE_: str =torch.jit.trace(
__a , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__a , os.path.join(__a , """traced_model.pt""" ) )
SCREAMING_SNAKE_CASE_: Optional[int] =torch.jit.load(os.path.join(__a , """traced_model.pt""" ) , map_location=__a )
loaded(inputs_dict["""input_ids"""].to(__a ) , inputs_dict["""attention_mask"""].to(__a ) )
@require_torch
class a ( unittest.TestCase ):
@slow
def lowerCamelCase__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =DistilBertModel.from_pretrained("""distilbert-base-uncased""" )
SCREAMING_SNAKE_CASE_: int =torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
SCREAMING_SNAKE_CASE_: Any =torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Any =model(__a , attention_mask=__a )[0]
SCREAMING_SNAKE_CASE_: str =torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __a )
SCREAMING_SNAKE_CASE_: Optional[int] =torch.tensor(
[[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1E-4 ) )
| 706
|
"""simple docstring"""
from math import pi
def __magic_name__ ( lowercase , lowercase ):
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(9_0, 1_0))
| 36
| 0
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class a :
def __init__( self : Dict , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple=13 , lowerCAmelCase : str=7 , lowerCAmelCase : Any=True , lowerCAmelCase : List[Any]=True , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : List[str]=True , lowerCAmelCase : Optional[Any]=99 , lowerCAmelCase : Union[str, Any]=32 , lowerCAmelCase : Optional[int]=5 , lowerCAmelCase : Tuple=4 , lowerCAmelCase : List[Any]=37 , lowerCAmelCase : Tuple="gelu" , lowerCAmelCase : int=0.1 , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : Union[str, Any]=128 , lowerCAmelCase : int=32 , lowerCAmelCase : int=16 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Dict=0.0_2 , lowerCAmelCase : str=3 , lowerCAmelCase : Optional[Any]=4 , lowerCAmelCase : Optional[int]=None , ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =parent
SCREAMING_SNAKE_CASE_: Dict =batch_size
SCREAMING_SNAKE_CASE_: List[str] =seq_length
SCREAMING_SNAKE_CASE_: Dict =is_training
SCREAMING_SNAKE_CASE_: Union[str, Any] =use_input_mask
SCREAMING_SNAKE_CASE_: Union[str, Any] =use_token_type_ids
SCREAMING_SNAKE_CASE_: int =use_labels
SCREAMING_SNAKE_CASE_: List[str] =vocab_size
SCREAMING_SNAKE_CASE_: Tuple =hidden_size
SCREAMING_SNAKE_CASE_: Tuple =num_hidden_layers
SCREAMING_SNAKE_CASE_: Union[str, Any] =num_attention_heads
SCREAMING_SNAKE_CASE_: List[Any] =intermediate_size
SCREAMING_SNAKE_CASE_: Tuple =hidden_act
SCREAMING_SNAKE_CASE_: Dict =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: int =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: int =max_position_embeddings
SCREAMING_SNAKE_CASE_: int =type_vocab_size
SCREAMING_SNAKE_CASE_: Optional[int] =type_sequence_label_size
SCREAMING_SNAKE_CASE_: Dict =initializer_range
SCREAMING_SNAKE_CASE_: List[Any] =num_labels
SCREAMING_SNAKE_CASE_: List[str] =num_choices
SCREAMING_SNAKE_CASE_: Tuple =scope
def lowerCamelCase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_: Dict =None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_: Optional[Any] =random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_: List[str] =None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_: int =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_: List[str] =None
SCREAMING_SNAKE_CASE_: List[str] =None
SCREAMING_SNAKE_CASE_: Dict =None
if self.use_labels:
SCREAMING_SNAKE_CASE_: Optional[int] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_: Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_: Dict =ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE_: List[str] =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_snake_case , initializer_range=self.initializer_range , )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
): List[Any] =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_: Tuple =True
SCREAMING_SNAKE_CASE_: int =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
SCREAMING_SNAKE_CASE_: Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCamelCase__ ( self : str , lowerCAmelCase : Tuple , lowerCAmelCase : List[str] , lowerCAmelCase : Any , lowerCAmelCase : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =NezhaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
SCREAMING_SNAKE_CASE_: Tuple =model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case )
SCREAMING_SNAKE_CASE_: Union[str, Any] =model(_snake_case , token_type_ids=_snake_case )
SCREAMING_SNAKE_CASE_: Any =model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase__ ( self : str , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : int , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict , lowerCAmelCase : int , ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =True
SCREAMING_SNAKE_CASE_: List[str] =NezhaModel(_snake_case )
model.to(_snake_case )
model.eval()
SCREAMING_SNAKE_CASE_: Any =model(
_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , )
SCREAMING_SNAKE_CASE_: Dict =model(
_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , encoder_hidden_states=_snake_case , )
SCREAMING_SNAKE_CASE_: Any =model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase__ ( self : str , lowerCAmelCase : Dict , lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : Dict , lowerCAmelCase : Any , lowerCAmelCase : Dict , lowerCAmelCase : List[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =NezhaForMaskedLM(config=_snake_case )
model.to(_snake_case )
model.eval()
SCREAMING_SNAKE_CASE_: int =model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple , lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : str ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =NezhaForNextSentencePrediction(config=_snake_case )
model.to(_snake_case )
model.eval()
SCREAMING_SNAKE_CASE_: Union[str, Any] =model(
_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : Dict , lowerCAmelCase : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =NezhaForPreTraining(config=_snake_case )
model.to(_snake_case )
model.eval()
SCREAMING_SNAKE_CASE_: Optional[Any] =model(
_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case , next_sentence_label=_snake_case , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Any , lowerCAmelCase : Any , lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =NezhaForQuestionAnswering(config=_snake_case )
model.to(_snake_case )
model.eval()
SCREAMING_SNAKE_CASE_: Dict =model(
_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , start_positions=_snake_case , end_positions=_snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : List[Any] , lowerCAmelCase : Union[str, Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =self.num_labels
SCREAMING_SNAKE_CASE_: str =NezhaForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
SCREAMING_SNAKE_CASE_: Union[str, Any] =model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =self.num_labels
SCREAMING_SNAKE_CASE_: Optional[Any] =NezhaForTokenClassification(config=_snake_case )
model.to(_snake_case )
model.eval()
SCREAMING_SNAKE_CASE_: List[Any] =model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self : int , lowerCAmelCase : int , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : List[str] , lowerCAmelCase : int ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =self.num_choices
SCREAMING_SNAKE_CASE_: str =NezhaForMultipleChoice(config=_snake_case )
model.to(_snake_case )
model.eval()
SCREAMING_SNAKE_CASE_: Tuple =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE_: Union[str, Any] =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE_: Optional[Any] =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE_: Dict =model(
_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase__ ( self : List[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
): Dict =config_and_inputs
SCREAMING_SNAKE_CASE_: Union[str, Any] ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
UpperCamelCase : str = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase : str = (
{
"feature-extraction": NezhaModel,
"fill-mask": NezhaForMaskedLM,
"question-answering": NezhaForQuestionAnswering,
"text-classification": NezhaForSequenceClassification,
"token-classification": NezhaForTokenClassification,
"zero-shot": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase : int = True
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int] , lowerCAmelCase : str=False ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =super()._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case )
if return_labels:
if model_class in get_values(_snake_case ):
SCREAMING_SNAKE_CASE_: int =torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_snake_case )
SCREAMING_SNAKE_CASE_: Optional[Any] =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_snake_case )
return inputs_dict
def lowerCamelCase__ ( self : Any ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =NezhaModelTester(self )
SCREAMING_SNAKE_CASE_: Any =ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def lowerCamelCase__ ( self : str ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def lowerCamelCase__ ( self : List[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_snake_case )
def lowerCamelCase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
): Tuple =self.model_tester.prepare_config_and_inputs_for_decoder()
SCREAMING_SNAKE_CASE_: Any =None
self.model_tester.create_and_check_model_as_decoder(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , )
def lowerCamelCase__ ( self : Any ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_snake_case )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_snake_case )
def lowerCamelCase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*_snake_case )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_snake_case )
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_snake_case )
def lowerCamelCase__ ( self : int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_snake_case )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_snake_case )
@slow
def lowerCamelCase__ ( self : str ) -> Tuple:
'''simple docstring'''
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_: Optional[int] =NezhaModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@slow
@require_torch_gpu
def lowerCamelCase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
SCREAMING_SNAKE_CASE_: List[Any] =True
SCREAMING_SNAKE_CASE_: int =model_class(config=_snake_case )
SCREAMING_SNAKE_CASE_: Union[str, Any] =self._prepare_for_class(_snake_case , _snake_case )
SCREAMING_SNAKE_CASE_: int =torch.jit.trace(
_snake_case , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_snake_case , os.path.join(_snake_case , """bert.pt""" ) )
SCREAMING_SNAKE_CASE_: str =torch.jit.load(os.path.join(_snake_case , """bert.pt""" ) , map_location=_snake_case )
loaded(inputs_dict["""input_ids"""].to(_snake_case ) , inputs_dict["""attention_mask"""].to(_snake_case ) )
@require_torch
class a ( unittest.TestCase ):
@slow
def lowerCamelCase__ ( self : str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =NezhaModel.from_pretrained("""sijunhe/nezha-cn-base""" )
SCREAMING_SNAKE_CASE_: int =torch.tensor([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE_: List[Any] =torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE_: str =model(_snake_case , attention_mask=_snake_case )[0]
SCREAMING_SNAKE_CASE_: List[str] =torch.Size((1, 6, 768) )
self.assertEqual(output.shape , _snake_case )
SCREAMING_SNAKE_CASE_: List[str] =torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _snake_case , atol=1E-4 ) )
@slow
def lowerCamelCase__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =NezhaForMaskedLM.from_pretrained("""sijunhe/nezha-cn-base""" )
SCREAMING_SNAKE_CASE_: str =torch.tensor([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE_: str =torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE_: List[Any] =model(_snake_case , attention_mask=_snake_case )[0]
SCREAMING_SNAKE_CASE_: List[Any] =torch.Size((1, 6, 2_1128) )
self.assertEqual(output.shape , _snake_case )
SCREAMING_SNAKE_CASE_: Tuple =torch.tensor(
[[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _snake_case , atol=1E-4 ) )
| 707
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : Dict ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict =FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE_: Any =jax.device_count()
SCREAMING_SNAKE_CASE_: Dict =num_samples * [prompt]
SCREAMING_SNAKE_CASE_: List[Any] =sd_pipe.prepare_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =replicate(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =shard(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =jax.random.split(lowerCAmelCase , jax.device_count() )
SCREAMING_SNAKE_CASE_: Tuple =sd_pipe(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , num_inference_steps=25 , jit=lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
SCREAMING_SNAKE_CASE_: Dict =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE_: Dict =images[0, 253:256, 253:256, -1]
SCREAMING_SNAKE_CASE_: Optional[int] =jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE_: Optional[int] =jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int ="""stabilityai/stable-diffusion-2"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =FlaxDPMSolverMultistepScheduler.from_pretrained(lowerCAmelCase , subfolder="""scheduler""" )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =FlaxStableDiffusionPipeline.from_pretrained(
lowerCAmelCase , scheduler=lowerCAmelCase , revision="""bf16""" , dtype=jnp.bfloataa , )
SCREAMING_SNAKE_CASE_: Optional[int] =scheduler_params
SCREAMING_SNAKE_CASE_: Tuple ="""A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE_: Union[str, Any] =jax.device_count()
SCREAMING_SNAKE_CASE_: Optional[Any] =num_samples * [prompt]
SCREAMING_SNAKE_CASE_: List[Any] =sd_pipe.prepare_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =replicate(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =shard(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_: Any =jax.random.split(lowerCAmelCase , jax.device_count() )
SCREAMING_SNAKE_CASE_: Tuple =sd_pipe(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , num_inference_steps=25 , jit=lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
SCREAMING_SNAKE_CASE_: str =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE_: Any =images[0, 253:256, 253:256, -1]
SCREAMING_SNAKE_CASE_: Optional[Any] =jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE_: Optional[int] =jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 36
| 0
|
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: List[Any] =ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=lowercase , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=lowercase , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=lowercase )
return parser.parse_args()
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: int =parse_args()
# Import training_script as a module.
SCREAMING_SNAKE_CASE_: Optional[int] =Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
SCREAMING_SNAKE_CASE_: Optional[int] =script_fpath.stem
SCREAMING_SNAKE_CASE_: Optional[Any] =importlib.import_module(lowercase )
# Patch sys.argv
SCREAMING_SNAKE_CASE_: Any =[args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 708
|
"""simple docstring"""
def __magic_name__ ( lowercase = 200_0000 ):
SCREAMING_SNAKE_CASE_: List[Any] =[0 for i in range(n + 1 )]
SCREAMING_SNAKE_CASE_: Union[str, Any] =1
SCREAMING_SNAKE_CASE_: Optional[Any] =1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =1
SCREAMING_SNAKE_CASE_: Dict =0
for i in range(lowercase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f"""{solution() = }""")
| 36
| 0
|
"""simple docstring"""
from math import sqrt
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Dict =0
for i in range(1 , int(sqrt(snake_case_ ) + 1 ) ):
if n % i == 0 and i != sqrt(snake_case_ ):
total += i + n // i
elif i == sqrt(snake_case_ ):
total += i
return total - n
def __magic_name__ ( lowercase = 1_0000 ):
SCREAMING_SNAKE_CASE_: Optional[Any] =sum(
i
for i in range(1 , snake_case_ )
if sum_of_divisors(sum_of_divisors(snake_case_ ) ) == i and sum_of_divisors(snake_case_ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 709
|
"""simple docstring"""
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""bert""", choices=["""bert"""])
parser.add_argument("""--model_name""", default="""bert-base-uncased""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_bert-base-uncased_0247911.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
_UpperCAmelCase = parser.parse_args()
if args.model_type == "bert":
_UpperCAmelCase = BertForMaskedLM.from_pretrained(args.model_name)
_UpperCAmelCase = """bert"""
else:
raise ValueError("""args.model_type should be \"bert\".""")
_UpperCAmelCase = model.state_dict()
_UpperCAmelCase = {}
for w in ["word_embeddings", "position_embeddings"]:
_UpperCAmelCase = state_dict[f"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""]
_UpperCAmelCase = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
_UpperCAmelCase = state_dict["""cls.predictions.decoder.weight"""]
_UpperCAmelCase = state_dict["""cls.predictions.bias"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[f"""cls.predictions.transform.dense.{w}"""]
_UpperCAmelCase = state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 36
| 0
|
"""simple docstring"""
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Any =('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
SCREAMING_SNAKE_CASE_: Any =(
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(__UpperCamelCase ):
os.makedirs(__UpperCamelCase )
SCREAMING_SNAKE_CASE_: str =model.state_dict()
def to_tf_var_name(lowercase ):
for patt, repl in iter(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_: Tuple =name.replace(__UpperCamelCase , __UpperCamelCase )
return f'''bert/{name}'''
def create_tf_var(lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Tuple =tf.dtypes.as_dtype(tensor.dtype )
SCREAMING_SNAKE_CASE_: str =tf.get_variable(dtype=__UpperCamelCase , shape=tensor.shape , name=__UpperCamelCase , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__UpperCamelCase )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
SCREAMING_SNAKE_CASE_: Tuple =to_tf_var_name(__UpperCamelCase )
SCREAMING_SNAKE_CASE_: List[str] =state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
SCREAMING_SNAKE_CASE_: Any =torch_tensor.T
SCREAMING_SNAKE_CASE_: int =create_tf_var(tensor=__UpperCamelCase , name=__UpperCamelCase , session=__UpperCamelCase )
tf.keras.backend.set_value(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_: List[Any] =session.run(__UpperCamelCase )
print(f'''Successfully created {tf_name}: {np.allclose(__UpperCamelCase , __UpperCamelCase )}''' )
SCREAMING_SNAKE_CASE_: Any =tf.train.Saver(tf.trainable_variables() )
saver.save(__UpperCamelCase , os.path.join(__UpperCamelCase , model_name.replace("""-""" , """_""" ) + """.ckpt""" ) )
def __magic_name__ ( lowercase=None ):
SCREAMING_SNAKE_CASE_: Optional[int] =argparse.ArgumentParser()
parser.add_argument("""--model_name""" , type=__UpperCamelCase , required=__UpperCamelCase , help="""model name e.g. bert-base-uncased""" )
parser.add_argument(
"""--cache_dir""" , type=__UpperCamelCase , default=__UpperCamelCase , required=__UpperCamelCase , help="""Directory containing pytorch model""" )
parser.add_argument("""--pytorch_model_path""" , type=__UpperCamelCase , required=__UpperCamelCase , help="""/path/to/<pytorch-model-name>.bin""" )
parser.add_argument("""--tf_cache_dir""" , type=__UpperCamelCase , required=__UpperCamelCase , help="""Directory in which to save tensorflow model""" )
SCREAMING_SNAKE_CASE_: Dict =parser.parse_args(__UpperCamelCase )
SCREAMING_SNAKE_CASE_: List[Any] =BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=__UpperCamelCase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 710
|
"""simple docstring"""
def __magic_name__ ( lowercase , lowercase ):
return int((input_a, input_a).count(0 ) == 0 )
def __magic_name__ ( ):
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 36
| 0
|
"""simple docstring"""
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class a ( lowerCAmelCase__ , unittest.TestCase ):
UpperCamelCase : Union[str, Any] = RoFormerTokenizer
UpperCamelCase : List[Any] = RoFormerTokenizerFast
UpperCamelCase : Tuple = True
UpperCamelCase : Any = True
def lowerCamelCase__ ( self : str ) -> str:
'''simple docstring'''
super().setUp()
def lowerCamelCase__ ( self : Optional[int] , **lowerCAmelCase : Optional[int] ) -> Tuple:
'''simple docstring'''
return self.tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **_lowerCamelCase )
def lowerCamelCase__ ( self : List[Any] , **lowerCAmelCase : int ) -> Tuple:
'''simple docstring'''
return self.rust_tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **_lowerCamelCase )
def lowerCamelCase__ ( self : Dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] ="""永和服装饰品有限公司,今天天气非常好"""
SCREAMING_SNAKE_CASE_: List[str] ="""永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"""
return input_text, output_text
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =self.get_tokenizer()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] =self.get_chinese_input_output_texts()
SCREAMING_SNAKE_CASE_: Dict =tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , output_text.split() )
SCREAMING_SNAKE_CASE_: Dict =tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE_: int =[2_2943, 2_1332, 3_4431, 4_5904, 117, 306, 1231, 1231, 2653, 3_3994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase )
def lowerCamelCase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =self.get_chinese_input_output_texts()
SCREAMING_SNAKE_CASE_: Optional[int] =tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , output_text.split() )
SCREAMING_SNAKE_CASE_: int =tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE_: Any =[2_2943, 2_1332, 3_4431, 4_5904, 117, 306, 1231, 1231, 2653, 3_3994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase )
def lowerCamelCase__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
pass
| 711
|
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger("""transformers.models.speecht5""")
def __magic_name__ ( lowercase , lowercase , lowercase ):
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE_: Tuple =checkpoint["""input_conv.weight_g"""]
SCREAMING_SNAKE_CASE_: Optional[int] =checkpoint["""input_conv.weight_v"""]
SCREAMING_SNAKE_CASE_: Optional[int] =checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''upsamples.{i}.1.weight_g''']
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''upsamples.{i}.1.weight_v''']
SCREAMING_SNAKE_CASE_: List[str] =checkpoint[f'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE_: Dict =checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_g''']
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_v''']
SCREAMING_SNAKE_CASE_: Union[str, Any] =checkpoint[f'''blocks.{i}.convs1.{j}.1.bias''']
SCREAMING_SNAKE_CASE_: Dict =checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_g''']
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_v''']
SCREAMING_SNAKE_CASE_: List[Any] =checkpoint[f'''blocks.{i}.convs2.{j}.1.bias''']
SCREAMING_SNAKE_CASE_: Tuple =checkpoint["""output_conv.1.weight_g"""]
SCREAMING_SNAKE_CASE_: List[str] =checkpoint["""output_conv.1.weight_v"""]
SCREAMING_SNAKE_CASE_: Optional[int] =checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase=None , lowercase=None , ):
if config_path is not None:
SCREAMING_SNAKE_CASE_: List[Any] =SpeechTaHifiGanConfig.from_pretrained(lowercase )
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE_: Union[str, Any] =SpeechTaHifiGan(lowercase )
SCREAMING_SNAKE_CASE_: Any =torch.load(lowercase )
load_weights(orig_checkpoint["""model"""]["""generator"""] , lowercase , lowercase )
SCREAMING_SNAKE_CASE_: List[Any] =np.load(lowercase )
SCREAMING_SNAKE_CASE_: Any =stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE_: str =stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE_: Dict =torch.from_numpy(lowercase ).float()
SCREAMING_SNAKE_CASE_: Dict =torch.from_numpy(lowercase ).float()
model.save_pretrained(lowercase )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
_UpperCAmelCase = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 36
| 0
|
"""simple docstring"""
def __magic_name__ ( lowercase , lowercase ):
return number | (1 << position)
def __magic_name__ ( lowercase , lowercase ):
return number & ~(1 << position)
def __magic_name__ ( lowercase , lowercase ):
return number ^ (1 << position)
def __magic_name__ ( lowercase , lowercase ):
return ((number >> position) & 1) == 1
def __magic_name__ ( lowercase , lowercase ):
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def __magic_name__ ( lowercase ):
if "cls_token" in name:
SCREAMING_SNAKE_CASE_: Optional[int] =name.replace("""cls_token""" , """vit.embeddings.cls_token""" )
if "mask_token" in name:
SCREAMING_SNAKE_CASE_: Optional[int] =name.replace("""mask_token""" , """decoder.mask_token""" )
if "decoder_pos_embed" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""pos_embed""" , """vit.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""patch_embed.proj""" , """vit.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""patch_embed.norm""" , """vit.embeddings.norm""" )
if "decoder_blocks" in name:
SCREAMING_SNAKE_CASE_: List[Any] =name.replace("""decoder_blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""blocks""" , """vit.encoder.layer""" )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE_: Optional[Any] =name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
SCREAMING_SNAKE_CASE_: Union[str, Any] =name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
SCREAMING_SNAKE_CASE_: Optional[Any] =name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE_: Dict =name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
SCREAMING_SNAKE_CASE_: Tuple =name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
SCREAMING_SNAKE_CASE_: Any =name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""norm.weight""" , """vit.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""norm.bias""" , """vit.layernorm.bias""" )
return name
def __magic_name__ ( lowercase , lowercase ):
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE_: Optional[int] =orig_state_dict.pop(lowercase )
if "qkv" in key:
SCREAMING_SNAKE_CASE_: Dict =key.split(""".""" )
SCREAMING_SNAKE_CASE_: Optional[Any] =int(key_split[1] )
if "decoder_blocks" in key:
SCREAMING_SNAKE_CASE_: int =config.decoder_hidden_size
SCREAMING_SNAKE_CASE_: Optional[int] ="""decoder.decoder_layers."""
if "weight" in key:
SCREAMING_SNAKE_CASE_: Dict =val[:dim, :]
SCREAMING_SNAKE_CASE_: Tuple =val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_: str =val[-dim:, :]
elif "bias" in key:
SCREAMING_SNAKE_CASE_: List[Any] =val[:dim]
SCREAMING_SNAKE_CASE_: Tuple =val[dim : dim * 2]
SCREAMING_SNAKE_CASE_: List[Any] =val[-dim:]
else:
SCREAMING_SNAKE_CASE_: Any =config.hidden_size
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""vit.encoder.layer."""
if "weight" in key:
SCREAMING_SNAKE_CASE_: Optional[Any] =val[:dim, :]
SCREAMING_SNAKE_CASE_: Optional[Any] =val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_: Dict =val[-dim:, :]
elif "bias" in key:
SCREAMING_SNAKE_CASE_: Optional[Any] =val[:dim]
SCREAMING_SNAKE_CASE_: Any =val[dim : dim * 2]
SCREAMING_SNAKE_CASE_: Optional[Any] =val[-dim:]
else:
SCREAMING_SNAKE_CASE_: Tuple =val
return orig_state_dict
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Dict =ViTMAEConfig()
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE_: List[Any] =1024
SCREAMING_SNAKE_CASE_: Dict =4096
SCREAMING_SNAKE_CASE_: Tuple =24
SCREAMING_SNAKE_CASE_: int =16
elif "huge" in checkpoint_url:
SCREAMING_SNAKE_CASE_: Union[str, Any] =14
SCREAMING_SNAKE_CASE_: Any =1280
SCREAMING_SNAKE_CASE_: Dict =5120
SCREAMING_SNAKE_CASE_: Optional[int] =32
SCREAMING_SNAKE_CASE_: Optional[Any] =16
SCREAMING_SNAKE_CASE_: Tuple =ViTMAEForPreTraining(lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =torch.hub.load_state_dict_from_url(lowercase , map_location="""cpu""" )["""model"""]
SCREAMING_SNAKE_CASE_: Optional[Any] =ViTMAEImageProcessor(size=config.image_size )
SCREAMING_SNAKE_CASE_: str =convert_state_dict(lowercase , lowercase )
model.load_state_dict(lowercase )
model.eval()
SCREAMING_SNAKE_CASE_: Tuple ="""https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"""
SCREAMING_SNAKE_CASE_: List[Any] =Image.open(requests.get(lowercase , stream=lowercase ).raw )
SCREAMING_SNAKE_CASE_: int =ViTMAEImageProcessor(size=config.image_size )
SCREAMING_SNAKE_CASE_: int =image_processor(images=lowercase , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE_: Optional[Any] =model(**lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =outputs.logits
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE_: Dict =torch.tensor(
[[-0.7_309, -0.7_128, -1.0_169], [-1.0_161, -0.9_058, -1.1_878], [-1.0_478, -0.9_411, -1.1_911]] )
elif "huge" in checkpoint_url:
SCREAMING_SNAKE_CASE_: Tuple =torch.tensor(
[[-1.1_599, -0.9_199, -1.2_221], [-1.1_952, -0.9_269, -1.2_307], [-1.2_143, -0.9_337, -1.2_262]] )
else:
SCREAMING_SNAKE_CASE_: Any =torch.tensor(
[[-0.9_192, -0.8_481, -1.1_259], [-1.1_349, -1.0_034, -1.2_599], [-1.1_757, -1.0_429, -1.2_726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , lowercase , atol=1e-4 )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_UpperCAmelCase = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 36
| 0
|
"""simple docstring"""
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'The RoBERTa Model transformer with early exiting (DeeRoBERTa). ' , __lowercase , )
class a ( __lowercase ):
UpperCamelCase : str = RobertaConfig
UpperCamelCase : Dict = 'roberta'
def __init__( self : List[Any] , lowerCAmelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(__A )
SCREAMING_SNAKE_CASE_: Optional[Any] =RobertaEmbeddings(__A )
self.init_weights()
@add_start_docstrings(
'RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ' , __lowercase , )
class a ( __lowercase ):
UpperCamelCase : int = RobertaConfig
UpperCamelCase : Tuple = 'roberta'
def __init__( self : List[Any] , lowerCAmelCase : Dict ) -> Dict:
'''simple docstring'''
super().__init__(__A )
SCREAMING_SNAKE_CASE_: List[Any] =config.num_labels
SCREAMING_SNAKE_CASE_: List[Any] =config.num_hidden_layers
SCREAMING_SNAKE_CASE_: Any =DeeRobertaModel(__A )
SCREAMING_SNAKE_CASE_: Dict =nn.Dropout(config.hidden_dropout_prob )
SCREAMING_SNAKE_CASE_: Optional[Any] =nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(__A )
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : Dict=None , lowerCAmelCase : Dict=None , lowerCAmelCase : Tuple=None , lowerCAmelCase : Tuple=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Tuple=None , lowerCAmelCase : Any=None , lowerCAmelCase : Tuple=-1 , lowerCAmelCase : Union[str, Any]=False , ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.num_layers
try:
SCREAMING_SNAKE_CASE_: int =self.roberta(
__A , attention_mask=__A , token_type_ids=__A , position_ids=__A , head_mask=__A , inputs_embeds=__A , )
SCREAMING_SNAKE_CASE_: List[Any] =outputs[1]
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.dropout(__A )
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.classifier(__A )
SCREAMING_SNAKE_CASE_: Any =(logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
SCREAMING_SNAKE_CASE_: str =e.message
SCREAMING_SNAKE_CASE_: Optional[Any] =e.exit_layer
SCREAMING_SNAKE_CASE_: List[str] =outputs[0]
if not self.training:
SCREAMING_SNAKE_CASE_: int =entropy(__A )
SCREAMING_SNAKE_CASE_: List[str] =[]
SCREAMING_SNAKE_CASE_: str =[]
if labels is not None:
if self.num_labels == 1:
# We are doing regression
SCREAMING_SNAKE_CASE_: Optional[Any] =MSELoss()
SCREAMING_SNAKE_CASE_: List[Any] =loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
SCREAMING_SNAKE_CASE_: List[Any] =CrossEntropyLoss()
SCREAMING_SNAKE_CASE_: Optional[Any] =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
SCREAMING_SNAKE_CASE_: Dict =[]
for highway_exit in outputs[-1]:
SCREAMING_SNAKE_CASE_: Optional[Any] =highway_exit[0]
if not self.training:
highway_logits_all.append(__A )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
SCREAMING_SNAKE_CASE_: str =MSELoss()
SCREAMING_SNAKE_CASE_: Union[str, Any] =loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =CrossEntropyLoss()
SCREAMING_SNAKE_CASE_: Union[str, Any] =loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(__A )
if train_highway:
SCREAMING_SNAKE_CASE_: Dict =(sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
SCREAMING_SNAKE_CASE_: Tuple =(loss,) + outputs
if not self.training:
SCREAMING_SNAKE_CASE_: Union[str, Any] =outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
SCREAMING_SNAKE_CASE_: List[str] =(
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 713
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_UpperCAmelCase = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 36
| 0
|
"""simple docstring"""
_UpperCAmelCase = [
"""Audio""",
"""Array2D""",
"""Array3D""",
"""Array4D""",
"""Array5D""",
"""ClassLabel""",
"""Features""",
"""Sequence""",
"""Value""",
"""Image""",
"""Translation""",
"""TranslationVariableLanguages""",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 714
|
"""simple docstring"""
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: int =False
while is_sorted is False: # Until all the indices are traversed keep looping
SCREAMING_SNAKE_CASE_: Tuple =True
for i in range(0 , len(lowercase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =input_list[i + 1], input_list[i]
# swapping if elements not in order
SCREAMING_SNAKE_CASE_: Tuple =False
for i in range(1 , len(lowercase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str =input_list[i + 1], input_list[i]
# swapping if elements not in order
SCREAMING_SNAKE_CASE_: str =False
return input_list
if __name__ == "__main__":
print("""Enter list to be sorted""")
_UpperCAmelCase = [int(x) for x in input().split()]
# inputing elements of the list in one line
_UpperCAmelCase = odd_even_sort(input_list)
print("""The sorted list is""")
print(sorted_list)
| 36
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class a ( unittest.TestCase ):
def __init__( self : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : int=7 , lowerCAmelCase : Tuple=3 , lowerCAmelCase : List[Any]=18 , lowerCAmelCase : Optional[Any]=30 , lowerCAmelCase : int=400 , lowerCAmelCase : Dict=True , lowerCAmelCase : str=None , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : str=True , lowerCAmelCase : Union[str, Any]=[0.5, 0.5, 0.5] , lowerCAmelCase : Any=[0.5, 0.5, 0.5] , lowerCAmelCase : List[str]=False , ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =size if size is not None else {"""height""": 20, """width""": 20}
SCREAMING_SNAKE_CASE_: Union[str, Any] =crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
SCREAMING_SNAKE_CASE_: List[Any] =parent
SCREAMING_SNAKE_CASE_: List[Any] =batch_size
SCREAMING_SNAKE_CASE_: Union[str, Any] =num_channels
SCREAMING_SNAKE_CASE_: List[Any] =image_size
SCREAMING_SNAKE_CASE_: Union[str, Any] =min_resolution
SCREAMING_SNAKE_CASE_: List[Any] =max_resolution
SCREAMING_SNAKE_CASE_: str =do_resize
SCREAMING_SNAKE_CASE_: str =size
SCREAMING_SNAKE_CASE_: str =do_center_crop
SCREAMING_SNAKE_CASE_: int =crop_size
SCREAMING_SNAKE_CASE_: Dict =do_normalize
SCREAMING_SNAKE_CASE_: List[str] =image_mean
SCREAMING_SNAKE_CASE_: Any =image_std
SCREAMING_SNAKE_CASE_: Optional[Any] =do_reduce_labels
def lowerCamelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: List[str] =load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
SCREAMING_SNAKE_CASE_: List[str] =Image.open(dataset[0]["""file"""] )
SCREAMING_SNAKE_CASE_: str =Image.open(dataset[1]["""file"""] )
return image, map
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: str =load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
SCREAMING_SNAKE_CASE_: str =Image.open(ds[0]["""file"""] )
SCREAMING_SNAKE_CASE_: Tuple =Image.open(ds[1]["""file"""] )
SCREAMING_SNAKE_CASE_: Tuple =Image.open(ds[2]["""file"""] )
SCREAMING_SNAKE_CASE_: List[str] =Image.open(ds[3]["""file"""] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class a ( __UpperCAmelCase , unittest.TestCase ):
UpperCamelCase : Union[str, Any] = BeitImageProcessor if is_vision_available() else None
def lowerCamelCase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =BeitImageProcessingTester(self )
@property
def lowerCamelCase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_resize""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """size""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_center_crop""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """center_crop""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_normalize""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """image_mean""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """image_std""" ) )
def lowerCamelCase__ ( self : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 20, """width""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
self.assertEqual(image_processor.do_reduce_labels , __SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_: Tuple =self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=__SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
self.assertEqual(image_processor.do_reduce_labels , __SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self : Dict ) -> List[str]:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Tuple ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_: Dict =prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_: Union[str, Any] =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE_: Optional[int] =image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCamelCase__ ( self : int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_: Optional[int] =prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_: Optional[int] =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE_: Dict =image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_: Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_: Dict =image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE_: Union[str, Any] =image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCamelCase__ ( self : Dict ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_: Optional[int] =prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_: Optional[int] =[]
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
SCREAMING_SNAKE_CASE_: List[Any] =image_processing(image_inputs[0] , maps[0] , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test batched
SCREAMING_SNAKE_CASE_: str =image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test not batched input (PIL images)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] =prepare_semantic_single_inputs()
SCREAMING_SNAKE_CASE_: Any =image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test batched input (PIL images)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any =prepare_semantic_batch_inputs()
SCREAMING_SNAKE_CASE_: Any =image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
2,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
def lowerCamelCase__ ( self : Dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] =prepare_semantic_single_inputs()
SCREAMING_SNAKE_CASE_: Dict =image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 150 )
SCREAMING_SNAKE_CASE_: Optional[int] =True
SCREAMING_SNAKE_CASE_: str =image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
| 715
|
"""simple docstring"""
def __magic_name__ ( lowercase ):
return str(lowercase ) == str(lowercase )[::-1]
def __magic_name__ ( lowercase ):
return int(lowercase ) + int(str(lowercase )[::-1] )
def __magic_name__ ( lowercase = 1_0000 ):
SCREAMING_SNAKE_CASE_: List[str] =[]
for num in range(1 , lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =0
SCREAMING_SNAKE_CASE_: int =num
while iterations < 50:
SCREAMING_SNAKE_CASE_: Optional[Any] =sum_reverse(lowercase )
iterations += 1
if is_palindrome(lowercase ):
break
else:
lychrel_nums.append(lowercase )
return len(lowercase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 36
| 0
|
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class a ( UpperCamelCase_ ):
UpperCamelCase : Union[str, Any] = ['image_processor', 'tokenizer']
UpperCamelCase : str = 'BlipImageProcessor'
UpperCamelCase : str = 'AutoTokenizer'
def __init__( self : List[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : str , lowerCAmelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
super().__init__(__a , __a )
# add QFormer tokenizer
SCREAMING_SNAKE_CASE_: Optional[int] =qformer_tokenizer
def __call__( self : Union[str, Any] , lowerCAmelCase : Tuple = None , lowerCAmelCase : str = None , lowerCAmelCase : Optional[Any] = True , lowerCAmelCase : str = False , lowerCAmelCase : Tuple = None , lowerCAmelCase : Tuple = None , lowerCAmelCase : str = 0 , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : Any = None , lowerCAmelCase : List[Any] = False , lowerCAmelCase : int = False , lowerCAmelCase : str = False , lowerCAmelCase : List[str] = False , lowerCAmelCase : Tuple = False , lowerCAmelCase : Any = True , lowerCAmelCase : str = None , **lowerCAmelCase : Optional[Any] , ) -> BatchFeature:
'''simple docstring'''
if images is None and text is None:
raise ValueError("""You have to specify at least images or text.""" )
SCREAMING_SNAKE_CASE_: Dict =BatchFeature()
if text is not None:
SCREAMING_SNAKE_CASE_: Optional[Any] =self.tokenizer(
text=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_token_type_ids=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , )
encoding.update(__a )
SCREAMING_SNAKE_CASE_: Dict =self.qformer_tokenizer(
text=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_token_type_ids=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , )
SCREAMING_SNAKE_CASE_: Optional[Any] =qformer_text_encoding.pop("""input_ids""" )
SCREAMING_SNAKE_CASE_: List[str] =qformer_text_encoding.pop("""attention_mask""" )
if images is not None:
SCREAMING_SNAKE_CASE_: Optional[int] =self.image_processor(__a , return_tensors=__a )
encoding.update(__a )
return encoding
def lowerCamelCase__ ( self : str , *lowerCAmelCase : Tuple , **lowerCAmelCase : Optional[int] ) -> Dict:
'''simple docstring'''
return self.tokenizer.batch_decode(*__a , **__a )
def lowerCamelCase__ ( self : Tuple , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.decode(*__a , **__a )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCamelCase__ ( self : int ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE_: Tuple =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : List[str] ) -> List[Any]:
'''simple docstring'''
if os.path.isfile(__a ):
raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(__a , exist_ok=__a )
SCREAMING_SNAKE_CASE_: str =os.path.join(__a , """qformer_tokenizer""" )
self.qformer_tokenizer.save_pretrained(__a )
return super().save_pretrained(__a , **__a )
@classmethod
def lowerCamelCase__ ( cls : str , lowerCAmelCase : Optional[int] , **lowerCAmelCase : str ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =AutoTokenizer.from_pretrained(__a , subfolder="""qformer_tokenizer""" )
SCREAMING_SNAKE_CASE_: Dict =cls._get_arguments_from_pretrained(__a , **__a )
args.append(__a )
return cls(*__a )
| 716
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
_UpperCAmelCase = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""DPTFeatureExtractor"""]
_UpperCAmelCase = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 36
| 0
|
"""simple docstring"""
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
_UpperCAmelCase = logging.get_logger(__name__)
def __magic_name__( lowercase , lowercase , lowercase , lowercase ):
def constraint_to_multiple_of(lowercase , lowercase , lowercase=0 , lowercase=None ):
SCREAMING_SNAKE_CASE_: Any =round(val / multiple ) * multiple
if max_val is not None and x > max_val:
SCREAMING_SNAKE_CASE_: Tuple =math.floor(val / multiple ) * multiple
if x < min_val:
SCREAMING_SNAKE_CASE_: Tuple =math.ceil(val / multiple ) * multiple
return x
SCREAMING_SNAKE_CASE_: Optional[int] =(output_size, output_size) if isinstance(_lowercase , _lowercase ) else output_size
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str =get_image_size(_lowercase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any =output_size
# determine new height and width
SCREAMING_SNAKE_CASE_: Tuple =output_height / input_height
SCREAMING_SNAKE_CASE_: Tuple =output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
SCREAMING_SNAKE_CASE_: Tuple =scale_width
else:
# fit height
SCREAMING_SNAKE_CASE_: Any =scale_height
SCREAMING_SNAKE_CASE_: Any =constraint_to_multiple_of(scale_height * input_height , multiple=_lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =constraint_to_multiple_of(scale_width * input_width , multiple=_lowercase )
return (new_height, new_width)
class a ( UpperCAmelCase__ ):
UpperCamelCase : List[Any] = ['pixel_values']
def __init__( self : int , lowerCAmelCase : bool = True , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase : bool = False , lowerCAmelCase : int = 1 , lowerCAmelCase : bool = True , lowerCAmelCase : Union[int, float] = 1 / 255 , lowerCAmelCase : bool = True , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , **lowerCAmelCase : Optional[int] , ) -> None:
'''simple docstring'''
super().__init__(**__A )
SCREAMING_SNAKE_CASE_: Tuple =size if size is not None else {"""height""": 384, """width""": 384}
SCREAMING_SNAKE_CASE_: int =get_size_dict(__A )
SCREAMING_SNAKE_CASE_: Union[str, Any] =do_resize
SCREAMING_SNAKE_CASE_: int =size
SCREAMING_SNAKE_CASE_: Optional[Any] =keep_aspect_ratio
SCREAMING_SNAKE_CASE_: Union[str, Any] =ensure_multiple_of
SCREAMING_SNAKE_CASE_: List[Any] =resample
SCREAMING_SNAKE_CASE_: Optional[int] =do_rescale
SCREAMING_SNAKE_CASE_: Dict =rescale_factor
SCREAMING_SNAKE_CASE_: int =do_normalize
SCREAMING_SNAKE_CASE_: Dict =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE_: List[Any] =image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : np.ndarray , lowerCAmelCase : Dict[str, int] , lowerCAmelCase : bool = False , lowerCAmelCase : int = 1 , lowerCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : Union[str, Any] , ) -> np.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =get_size_dict(__A )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
SCREAMING_SNAKE_CASE_: Union[str, Any] =get_resize_output_image_size(
__A , output_size=(size["""height"""], size["""width"""]) , keep_aspect_ratio=__A , multiple=__A , )
return resize(__A , size=__A , resample=__A , data_format=__A , **__A )
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : np.ndarray , lowerCAmelCase : Union[int, float] , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : List[str] , ) -> Optional[int]:
'''simple docstring'''
return rescale(__A , scale=__A , data_format=__A , **__A )
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : np.ndarray , lowerCAmelCase : Union[float, List[float]] , lowerCAmelCase : Union[float, List[float]] , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
return normalize(__A , mean=__A , std=__A , data_format=__A , **__A )
def lowerCamelCase__ ( self : Any , lowerCAmelCase : ImageInput , lowerCAmelCase : bool = None , lowerCAmelCase : int = None , lowerCAmelCase : bool = None , lowerCAmelCase : int = None , lowerCAmelCase : PILImageResampling = None , lowerCAmelCase : bool = None , lowerCAmelCase : float = None , lowerCAmelCase : bool = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : Optional[Union[str, TensorType]] = None , lowerCAmelCase : ChannelDimension = ChannelDimension.FIRST , **lowerCAmelCase : Tuple , ) -> PIL.Image.Image:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_: Any =size if size is not None else self.size
SCREAMING_SNAKE_CASE_: List[str] =get_size_dict(__A )
SCREAMING_SNAKE_CASE_: int =keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
SCREAMING_SNAKE_CASE_: Dict =ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
SCREAMING_SNAKE_CASE_: Union[str, Any] =resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_: Optional[Any] =do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_: List[str] =rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_: Tuple =do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_: List[Any] =image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_: Any =image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_: Tuple =make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_: Dict =[to_numpy_array(__A ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_: Optional[Any] =[self.resize(image=__A , size=__A , resample=__A ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE_: Optional[Any] =[self.rescale(image=__A , scale=__A ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_: Tuple =[self.normalize(image=__A , mean=__A , std=__A ) for image in images]
SCREAMING_SNAKE_CASE_: Optional[Any] =[to_channel_dimension_format(__A , __A ) for image in images]
SCREAMING_SNAKE_CASE_: Tuple ={"""pixel_values""": images}
return BatchFeature(data=__A , tensor_type=__A )
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : List[Tuple] = None ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__A ) != len(__A ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(__A ):
SCREAMING_SNAKE_CASE_: int =target_sizes.numpy()
SCREAMING_SNAKE_CASE_: str =[]
for idx in range(len(__A ) ):
SCREAMING_SNAKE_CASE_: str =torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=__A )
SCREAMING_SNAKE_CASE_: Any =resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__A )
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE_: Optional[Any] =[semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 717
|
"""simple docstring"""
from __future__ import annotations
import math
import random
from typing import Any
class a :
def __init__( self : str ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: list[Any] =[]
SCREAMING_SNAKE_CASE_: int =0
SCREAMING_SNAKE_CASE_: int =0
def lowerCamelCase__ ( self : Optional[Any] ) -> bool:
'''simple docstring'''
return self.head == self.tail
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
self.data.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =self.tail + 1
def lowerCamelCase__ ( self : int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self.data[self.head]
SCREAMING_SNAKE_CASE_: Optional[int] =self.head + 1
return ret
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
return self.tail - self.head
def lowerCamelCase__ ( self : str ) -> None:
'''simple docstring'''
print(self.data )
print("""**************""" )
print(self.data[self.head : self.tail] )
class a :
def __init__( self : Union[str, Any] , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =data
SCREAMING_SNAKE_CASE_: MyNode | None =None
SCREAMING_SNAKE_CASE_: MyNode | None =None
SCREAMING_SNAKE_CASE_: int =1
def lowerCamelCase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
return self.data
def lowerCamelCase__ ( self : List[Any] ) -> MyNode | None:
'''simple docstring'''
return self.left
def lowerCamelCase__ ( self : Dict ) -> MyNode | None:
'''simple docstring'''
return self.right
def lowerCamelCase__ ( self : Any ) -> int:
'''simple docstring'''
return self.height
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =data
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : MyNode | None ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =node
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : MyNode | None ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =node
def lowerCamelCase__ ( self : int , lowerCAmelCase : int ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =height
def __magic_name__ ( lowercase ):
if node is None:
return 0
return node.get_height()
def __magic_name__ ( lowercase , lowercase ):
if a > b:
return a
return b
def __magic_name__ ( lowercase ):
print("""left rotation node:""" , node.get_data() )
SCREAMING_SNAKE_CASE_: List[Any] =node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(lowercase )
SCREAMING_SNAKE_CASE_: Tuple =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowercase )
SCREAMING_SNAKE_CASE_: int =my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowercase )
return ret
def __magic_name__ ( lowercase ):
print("""right rotation node:""" , node.get_data() )
SCREAMING_SNAKE_CASE_: List[Any] =node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(lowercase )
SCREAMING_SNAKE_CASE_: List[Any] =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowercase )
return ret
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any =node.get_left()
assert left_child is not None
node.set_left(left_rotation(lowercase ) )
return right_rotation(lowercase )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Dict =node.get_right()
assert right_child is not None
node.set_right(right_rotation(lowercase ) )
return left_rotation(lowercase )
def __magic_name__ ( lowercase , lowercase ):
if node is None:
return MyNode(lowercase )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , lowercase ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
SCREAMING_SNAKE_CASE_: Union[str, Any] =node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
SCREAMING_SNAKE_CASE_: Any =right_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: List[Any] =lr_rotation(lowercase )
else:
node.set_right(insert_node(node.get_right() , lowercase ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
SCREAMING_SNAKE_CASE_: Tuple =node.get_right()
assert right_child is not None
if data < right_child.get_data():
SCREAMING_SNAKE_CASE_: Optional[Any] =rl_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: Optional[int] =left_rotation(lowercase )
SCREAMING_SNAKE_CASE_: Tuple =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowercase )
return node
def __magic_name__ ( lowercase ):
while True:
SCREAMING_SNAKE_CASE_: Dict =root.get_right()
if right_child is None:
break
SCREAMING_SNAKE_CASE_: str =right_child
return root.get_data()
def __magic_name__ ( lowercase ):
while True:
SCREAMING_SNAKE_CASE_: str =root.get_left()
if left_child is None:
break
SCREAMING_SNAKE_CASE_: Dict =left_child
return root.get_data()
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: str =root.get_left()
SCREAMING_SNAKE_CASE_: List[Any] =root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
SCREAMING_SNAKE_CASE_: Union[str, Any] =get_left_most(lowercase )
root.set_data(lowercase )
root.set_right(del_node(lowercase , lowercase ) )
elif left_child is not None:
SCREAMING_SNAKE_CASE_: Optional[int] =left_child
elif right_child is not None:
SCREAMING_SNAKE_CASE_: Any =right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("""No such data""" )
return root
else:
root.set_left(del_node(lowercase , lowercase ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(lowercase , lowercase ) )
if get_height(lowercase ) - get_height(lowercase ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
SCREAMING_SNAKE_CASE_: Tuple =left_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =rl_rotation(lowercase )
elif get_height(lowercase ) - get_height(lowercase ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
SCREAMING_SNAKE_CASE_: Optional[Any] =right_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: str =lr_rotation(lowercase )
SCREAMING_SNAKE_CASE_: str =my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(lowercase )
return root
class a :
def __init__( self : int ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: MyNode | None =None
def lowerCamelCase__ ( self : List[Any] ) -> int:
'''simple docstring'''
return get_height(self.root )
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
print("""insert:""" + str(lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: Tuple =insert_node(self.root , lowerCAmelCase )
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
print("""delete:""" + str(lowerCAmelCase ) )
if self.root is None:
print("""Tree is empty!""" )
return
SCREAMING_SNAKE_CASE_: Union[str, Any] =del_node(self.root , lowerCAmelCase )
def __str__( self : List[str] , ) -> str: # a level traversale, gives a more intuitive look on the tree
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =""""""
SCREAMING_SNAKE_CASE_: str =MyQueue()
q.push(self.root )
SCREAMING_SNAKE_CASE_: List[str] =self.get_height()
if layer == 0:
return output
SCREAMING_SNAKE_CASE_: int =0
while not q.is_empty():
SCREAMING_SNAKE_CASE_: int =q.pop()
SCREAMING_SNAKE_CASE_: List[Any] =""" """ * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(lowerCAmelCase )
q.push(lowerCAmelCase )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
SCREAMING_SNAKE_CASE_: List[Any] =cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , lowerCAmelCase ) - 1:
SCREAMING_SNAKE_CASE_: int =layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def __magic_name__ ( ):
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
_UpperCAmelCase = AVLtree()
_UpperCAmelCase = list(range(1_0))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 36
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class a :
UpperCamelCase : Union[str, Any] = 4_2
UpperCamelCase : Optional[int] = None
UpperCamelCase : List[str] = None
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Optional[Any] =Node(1 )
SCREAMING_SNAKE_CASE_: List[str] =Node(2 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =Node(3 )
SCREAMING_SNAKE_CASE_: Tuple =Node(4 )
SCREAMING_SNAKE_CASE_: int =Node(5 )
return tree
def __magic_name__ ( lowercase ):
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def __magic_name__ ( lowercase ):
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def __magic_name__ ( lowercase ):
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def __magic_name__ ( lowercase ):
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: list[Any] =[]
if root is None:
return output
SCREAMING_SNAKE_CASE_: Tuple =deque([root] )
while process_queue:
SCREAMING_SNAKE_CASE_: Optional[Any] =process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: list[Any] =[]
def populate_output(lowercase , lowercase ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(_lowerCamelCase , _lowerCamelCase )
return output
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: list[Any] =[]
def populate_output(lowercase , lowercase ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(_lowerCamelCase , _lowerCamelCase )
return output
def __magic_name__ ( lowercase ):
if root is None:
return []
SCREAMING_SNAKE_CASE_: list[Sequence[Node | None]] =[]
SCREAMING_SNAKE_CASE_: List[str] =0
SCREAMING_SNAKE_CASE_: Optional[Any] =height(_lowerCamelCase )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(_lowerCamelCase , _lowerCamelCase ) )
SCREAMING_SNAKE_CASE_: Any =1
else:
output.append(get_nodes_from_right_to_left(_lowerCamelCase , _lowerCamelCase ) )
SCREAMING_SNAKE_CASE_: Optional[Any] =0
return output
def __magic_name__ ( ): # Main function for testing.
SCREAMING_SNAKE_CASE_: Union[str, Any] =make_tree()
print(f'''In-order Traversal: {inorder(_lowerCamelCase )}''' )
print(f'''Pre-order Traversal: {preorder(_lowerCamelCase )}''' )
print(f'''Post-order Traversal: {postorder(_lowerCamelCase )}''' , """\n""" )
print(f'''Height of Tree: {height(_lowerCamelCase )}''' , """\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(_lowerCamelCase ) , """\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 , height(_lowerCamelCase ) + 1 ):
print(f'''Level {level}:''' , get_nodes_from_left_to_right(_lowerCamelCase , level=_lowerCamelCase ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(_lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 718
|
"""simple docstring"""
import string
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =""""""
for i in sequence:
SCREAMING_SNAKE_CASE_: List[Any] =ord(lowercase )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any =string.ascii_letters
SCREAMING_SNAKE_CASE_: Tuple =string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(lowercase )] if c in letters else c for c in sequence )
def __magic_name__ ( ):
from timeit import timeit
print("""Running performance benchmarks...""" )
SCREAMING_SNAKE_CASE_: int ="""from string import printable ; from __main__ import atbash, atbash_slow"""
print(f'''> atbash_slow(): {timeit("atbash_slow(printable)" , setup=lowercase )} seconds''' )
print(f'''> atbash(): {timeit("atbash(printable)" , setup=lowercase )} seconds''' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(f"""{example} encrypted in atbash: {atbash(example)}""")
benchmark()
| 36
| 0
|
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def __magic_name__ ( lowercase ):
for param in module.parameters():
SCREAMING_SNAKE_CASE_: Union[str, Any] =False
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Tuple ="""cuda""" if torch.cuda.is_available() else """cpu"""
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
SCREAMING_SNAKE_CASE_: Any ="""mps"""
if device == "mps":
print(
"""WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"""
""" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"""
""" with generations.""" )
return device
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Tuple =plt.imshow(lowercase_ )
fig.axes.get_xaxis().set_visible(lowercase_ )
fig.axes.get_yaxis().set_visible(lowercase_ )
plt.show()
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Optional[int] =datetime.now()
SCREAMING_SNAKE_CASE_: Union[str, Any] =current_time.strftime("""%H:%M:%S""" )
return timestamp
| 719
|
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class a :
def __init__( self : Union[str, Any] , lowerCAmelCase : List[str]=2 , lowerCAmelCase : int=3 , lowerCAmelCase : Optional[Any]=64 , lowerCAmelCase : Union[str, Any]=None ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =np.random.default_rng(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =length
SCREAMING_SNAKE_CASE_: Union[str, Any] =rng.normal(size=(length,) ).astype(np.floataa )
SCREAMING_SNAKE_CASE_: Tuple =a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : List[Any] ) -> str:
'''simple docstring'''
return self.length
def __getitem__( self : Union[str, Any] , lowerCAmelCase : Any ) -> List[str]:
'''simple docstring'''
return {"x": self.x[i], "y": self.y[i]}
class a ( torch.nn.Module ):
def __init__( self : Optional[int] , lowerCAmelCase : str=0 , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : Optional[int]=False ) -> Tuple:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_: int =torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE_: int =torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE_: Dict =True
def lowerCamelCase__ ( self : str , lowerCAmelCase : Tuple=None ) -> int:
'''simple docstring'''
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
SCREAMING_SNAKE_CASE_: Union[str, Any] =False
return x * self.a[0] + self.b[0]
class a ( torch.nn.Module ):
def __init__( self : Union[str, Any] , lowerCAmelCase : Any=0 , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : List[Any]=False ) -> str:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_: List[str] =torch.nn.Parameter(torch.tensor(lowerCAmelCase ).float() )
SCREAMING_SNAKE_CASE_: Optional[int] =torch.nn.Parameter(torch.tensor(lowerCAmelCase ).float() )
SCREAMING_SNAKE_CASE_: List[Any] =True
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : int=None ) -> Any:
'''simple docstring'''
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
SCREAMING_SNAKE_CASE_: Optional[int] =False
return x * self.a + self.b
def __magic_name__ ( lowercase , lowercase = 16 ):
from datasets import load_dataset
from transformers import AutoTokenizer
SCREAMING_SNAKE_CASE_: Optional[Any] =AutoTokenizer.from_pretrained("""bert-base-cased""" )
SCREAMING_SNAKE_CASE_: Optional[int] ={"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""}
SCREAMING_SNAKE_CASE_: Any =load_dataset("""csv""" , data_files=lowercase )
SCREAMING_SNAKE_CASE_: Any =datasets["""train"""].unique("""label""" )
SCREAMING_SNAKE_CASE_: List[Any] ={v: i for i, v in enumerate(lowercase )}
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_: Dict =tokenizer(
examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase , max_length=lowercase , padding="""max_length""" )
if "label" in examples:
SCREAMING_SNAKE_CASE_: Optional[int] =[label_to_id[l] for l in examples["""label"""]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE_: List[Any] =datasets.map(
lowercase , batched=lowercase , remove_columns=["""sentence1""", """sentence2""", """label"""] , )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(lowercase , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_: Optional[int] =DataLoader(tokenized_datasets["""train"""] , shuffle=lowercase , collate_fn=lowercase , batch_size=2 )
SCREAMING_SNAKE_CASE_: Dict =DataLoader(tokenized_datasets["""validation"""] , shuffle=lowercase , collate_fn=lowercase , batch_size=1 )
return train_dataloader, eval_dataloader
| 36
| 0
|
"""simple docstring"""
from collections.abc import Callable
class a :
def __init__( self : int , lowerCAmelCase : Optional[int] = None ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =[]
# Stores indexes of each item for supporting updates and deletion.
SCREAMING_SNAKE_CASE_: List[Any] ={}
# Stores current size of heap.
SCREAMING_SNAKE_CASE_: str =0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
SCREAMING_SNAKE_CASE_: Dict =key or (lambda lowerCAmelCase : x)
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return int((i - 1) / 2 ) if i > 0 else None
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : int ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =int(2 * i + 1 )
return left if 0 < left < self.size else None
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : Union[str, Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =int(2 * i + 2 )
return right if 0 < right < self.size else None
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : Dict , lowerCAmelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any =(
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict =self.arr[j], self.arr[i]
def lowerCamelCase__ ( self : str , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.arr[i][1] < self.arr[j][1]
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : Optional[int] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =self._left(_a )
SCREAMING_SNAKE_CASE_: List[Any] =self._right(_a )
SCREAMING_SNAKE_CASE_: Optional[Any] =i
if left is not None and not self._cmp(_a , _a ):
SCREAMING_SNAKE_CASE_: List[str] =left
if right is not None and not self._cmp(_a , _a ):
SCREAMING_SNAKE_CASE_: Any =right
return valid_parent
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : Dict ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self._parent(_a )
while parent is not None and not self._cmp(_a , _a ):
self._swap(_a , _a )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =parent, self._parent(_a )
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : List[str] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =self._get_valid_parent(_a )
while valid_parent != index:
self._swap(_a , _a )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =valid_parent, self._get_valid_parent(_a )
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] ) -> int:
'''simple docstring'''
if item not in self.pos_map:
return
SCREAMING_SNAKE_CASE_: List[Any] =self.pos_map[item]
SCREAMING_SNAKE_CASE_: int =[item, self.key(_a )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(_a )
self._heapify_down(_a )
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : List[str] ) -> int:
'''simple docstring'''
if item not in self.pos_map:
return
SCREAMING_SNAKE_CASE_: Tuple =self.pos_map[item]
del self.pos_map[item]
SCREAMING_SNAKE_CASE_: Optional[int] =self.arr[self.size - 1]
SCREAMING_SNAKE_CASE_: Any =index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(_a )
self._heapify_down(_a )
def lowerCamelCase__ ( self : int , lowerCAmelCase : List[str] , lowerCAmelCase : int ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(_a )] )
else:
SCREAMING_SNAKE_CASE_: Tuple =[item, self.key(_a )]
SCREAMING_SNAKE_CASE_: str =self.size
self.size += 1
self._heapify_up(self.size - 1 )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
return self.arr[0] if self.size else None
def lowerCamelCase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def __magic_name__ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720
|
"""simple docstring"""
def __magic_name__ ( lowercase ):
if upper_limit < 0:
raise ValueError("""Limit for the Catalan sequence must be ≥ 0""" )
SCREAMING_SNAKE_CASE_: Tuple =[0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
SCREAMING_SNAKE_CASE_: Any =1
if upper_limit > 0:
SCREAMING_SNAKE_CASE_: List[str] =1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(lowercase ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""")
print("""\n*** Enter -1 at any time to quit ***""")
print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""")
try:
while True:
_UpperCAmelCase = int(input().strip())
if N < 0:
print("""\n********* Goodbye!! ************""")
break
else:
print(f"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print("""Try another upper limit for the sequence: """, end="""""")
except (NameError, ValueError):
print("""\n********* Invalid input, goodbye! ************\n""")
import doctest
doctest.testmod()
| 36
| 0
|
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
SCREAMING_SNAKE_CASE_: int =FlaxDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-pipe""" , safety_checker=UpperCamelCase__ , cache_dir=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_: Any =[t[-1] for t in os.walk(os.path.join(UpperCamelCase__ , os.listdir(UpperCamelCase__ )[0] , """snapshots""" ) )]
SCREAMING_SNAKE_CASE_: List[str] =[item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(""".bin""" ) for f in files )
@slow
@require_flax
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict =FlaxStableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-pipe""" , safety_checker=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_: int =(
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
SCREAMING_SNAKE_CASE_: Optional[Any] =jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_: List[str] =4
SCREAMING_SNAKE_CASE_: Optional[int] =jax.device_count()
SCREAMING_SNAKE_CASE_: int =num_samples * [prompt]
SCREAMING_SNAKE_CASE_: Dict =pipeline.prepare_inputs(UpperCamelCase__ )
# shard inputs and rng
SCREAMING_SNAKE_CASE_: Any =replicate(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_: str =jax.random.split(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE_: str =shard(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_: str =pipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , jit=UpperCamelCase__ ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_5_1_4_7_4_5 ) < 1E-3
assert np.abs(np.abs(UpperCamelCase__ , dtype=np.floataa ).sum() - 4_9947.875 ) < 5E-1
SCREAMING_SNAKE_CASE_: Optional[int] =pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(UpperCamelCase__ ) == num_samples
def lowerCamelCase__ ( self : Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] =FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""flax""" , safety_checker=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_: Any =(
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
SCREAMING_SNAKE_CASE_: str =jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_: List[Any] =50
SCREAMING_SNAKE_CASE_: Tuple =jax.device_count()
SCREAMING_SNAKE_CASE_: str =num_samples * [prompt]
SCREAMING_SNAKE_CASE_: Optional[Any] =pipeline.prepare_inputs(UpperCamelCase__ )
# shard inputs and rng
SCREAMING_SNAKE_CASE_: Union[str, Any] =replicate(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_: Dict =jax.random.split(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE_: Dict =shard(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_: Optional[Any] =pipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , jit=UpperCamelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_5_6_5_2_4_0_1) ) < 1E-3
assert np.abs((np.abs(UpperCamelCase__ , dtype=np.floataa ).sum() - 238_3808.2) ) < 5E-1
def lowerCamelCase__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str =FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_: List[str] =(
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
SCREAMING_SNAKE_CASE_: Optional[Any] =jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_: List[str] =50
SCREAMING_SNAKE_CASE_: Union[str, Any] =jax.device_count()
SCREAMING_SNAKE_CASE_: Dict =num_samples * [prompt]
SCREAMING_SNAKE_CASE_: Dict =pipeline.prepare_inputs(UpperCamelCase__ )
# shard inputs and rng
SCREAMING_SNAKE_CASE_: Optional[int] =replicate(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_: str =jax.random.split(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE_: List[str] =shard(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_: Any =pipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , jit=UpperCamelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1E-3
assert np.abs((np.abs(UpperCamelCase__ , dtype=np.floataa ).sum() - 237_3516.75) ) < 5E-1
def lowerCamelCase__ ( self : Dict ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int =FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE_: List[str] =(
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
SCREAMING_SNAKE_CASE_: Tuple =jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_: Optional[Any] =50
SCREAMING_SNAKE_CASE_: List[str] =jax.device_count()
SCREAMING_SNAKE_CASE_: Dict =num_samples * [prompt]
SCREAMING_SNAKE_CASE_: Optional[int] =pipeline.prepare_inputs(UpperCamelCase__ )
# shard inputs and rng
SCREAMING_SNAKE_CASE_: List[Any] =replicate(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_: Union[str, Any] =jax.random.split(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE_: Optional[Any] =shard(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_: Optional[Any] =pipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , jit=UpperCamelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1E-3
assert np.abs((np.abs(UpperCamelCase__ , dtype=np.floataa ).sum() - 237_3516.75) ) < 5E-1
def lowerCamelCase__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =FlaxDDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , set_alpha_to_one=UpperCamelCase__ , steps_offset=1 , )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ , )
SCREAMING_SNAKE_CASE_: int =scheduler.create_state()
SCREAMING_SNAKE_CASE_: Dict =scheduler_state
SCREAMING_SNAKE_CASE_: int =(
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
SCREAMING_SNAKE_CASE_: int =jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_: Tuple =50
SCREAMING_SNAKE_CASE_: int =jax.device_count()
SCREAMING_SNAKE_CASE_: Any =num_samples * [prompt]
SCREAMING_SNAKE_CASE_: int =pipeline.prepare_inputs(UpperCamelCase__ )
# shard inputs and rng
SCREAMING_SNAKE_CASE_: Union[str, Any] =replicate(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_: Tuple =jax.random.split(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE_: int =shard(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_: Dict =pipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , jit=UpperCamelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_5_0_4_3_9_4_5) ) < 1E-3
assert np.abs((np.abs(UpperCamelCase__ , dtype=np.floataa ).sum() - 234_7693.5) ) < 5E-1
def lowerCamelCase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =(
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
SCREAMING_SNAKE_CASE_: List[Any] =jax.device_count()
SCREAMING_SNAKE_CASE_: Optional[Any] =num_samples * [prompt]
SCREAMING_SNAKE_CASE_: str =jax.random.split(jax.random.PRNGKey(0 ) , UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=UpperCamelCase__ , )
SCREAMING_SNAKE_CASE_: Union[str, Any] =replicate(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_: int =pipeline.prepare_inputs(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_: str =shard(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_: Any =pipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , jit=UpperCamelCase__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
SCREAMING_SNAKE_CASE_: str =images[2, 0, 256, 10:17, 1]
# With memory efficient attention
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] =FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=UpperCamelCase__ , use_memory_efficient_attention=UpperCamelCase__ , )
SCREAMING_SNAKE_CASE_: Any =replicate(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_: Union[str, Any] =pipeline.prepare_inputs(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_: Dict =shard(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_: List[str] =pipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , jit=UpperCamelCase__ ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
SCREAMING_SNAKE_CASE_: Tuple =images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 721
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
_UpperCAmelCase = {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""",
}
class a ( UpperCAmelCase__ ):
UpperCamelCase : Any = 'albert'
def __init__( self : Dict , lowerCAmelCase : List[str]=3_0000 , lowerCAmelCase : List[Any]=128 , lowerCAmelCase : List[str]=4096 , lowerCAmelCase : str=12 , lowerCAmelCase : str=1 , lowerCAmelCase : Tuple=64 , lowerCAmelCase : Dict=1_6384 , lowerCAmelCase : int=1 , lowerCAmelCase : str="gelu_new" , lowerCAmelCase : Dict=0 , lowerCAmelCase : Optional[Any]=0 , lowerCAmelCase : str=512 , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : List[Any]=0.0_2 , lowerCAmelCase : Union[str, Any]=1E-12 , lowerCAmelCase : Tuple=0.1 , lowerCAmelCase : List[Any]="absolute" , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : int=2 , lowerCAmelCase : Optional[int]=3 , **lowerCAmelCase : int , ) -> Tuple:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =vocab_size
SCREAMING_SNAKE_CASE_: Optional[int] =embedding_size
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_size
SCREAMING_SNAKE_CASE_: Tuple =num_hidden_layers
SCREAMING_SNAKE_CASE_: Any =num_hidden_groups
SCREAMING_SNAKE_CASE_: List[Any] =num_attention_heads
SCREAMING_SNAKE_CASE_: List[Any] =inner_group_num
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_act
SCREAMING_SNAKE_CASE_: int =intermediate_size
SCREAMING_SNAKE_CASE_: Any =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Union[str, Any] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: int =max_position_embeddings
SCREAMING_SNAKE_CASE_: Any =type_vocab_size
SCREAMING_SNAKE_CASE_: int =initializer_range
SCREAMING_SNAKE_CASE_: List[Any] =layer_norm_eps
SCREAMING_SNAKE_CASE_: Dict =classifier_dropout_prob
SCREAMING_SNAKE_CASE_: int =position_embedding_type
class a ( UpperCAmelCase__ ):
@property
def lowerCamelCase__ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_: str ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
SCREAMING_SNAKE_CASE_: Dict ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 36
| 0
|
"""simple docstring"""
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class a ( unittest.TestCase ):
@parameterized.expand([(None,), ("""foo.json""",)] )
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =GenerationConfig(
do_sample=lowerCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase , config_name=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =GenerationConfig.from_pretrained(lowerCAmelCase , config_name=lowerCAmelCase )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , lowerCAmelCase )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , lowerCAmelCase )
def lowerCamelCase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =AutoConfig.from_pretrained("""gpt2""" )
SCREAMING_SNAKE_CASE_: Union[str, Any] =GenerationConfig.from_model_config(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(lowerCAmelCase , lowerCAmelCase )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =GenerationConfig()
SCREAMING_SNAKE_CASE_: List[Any] ={
"""max_new_tokens""": 1024,
"""foo""": """bar""",
}
SCREAMING_SNAKE_CASE_: List[str] =copy.deepcopy(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =generation_config.update(**lowerCAmelCase )
# update_kwargs was not modified (no side effects)
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(lowerCAmelCase , {"""foo""": """bar"""} )
def lowerCamelCase__ ( self : List[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =GenerationConfig()
SCREAMING_SNAKE_CASE_: Any ="""bar"""
with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir:
generation_config.save_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =GenerationConfig.from_pretrained(lowerCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , """bar""" )
SCREAMING_SNAKE_CASE_: Union[str, Any] =GenerationConfig.from_model_config(lowerCAmelCase )
assert not hasattr(lowerCAmelCase , """foo""" ) # no new kwargs should be initialized if from config
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , lowerCAmelCase )
self.assertEqual(default_config.num_beams , 1 )
SCREAMING_SNAKE_CASE_: Tuple =GenerationConfig(
do_sample=lowerCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , lowerCAmelCase )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =GenerationConfig.from_pretrained(lowerCAmelCase , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , lowerCAmelCase )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class a ( unittest.TestCase ):
@classmethod
def lowerCamelCase__ ( cls : int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =TOKEN
HfFolder.save_token(lowerCAmelCase )
@classmethod
def lowerCamelCase__ ( cls : Tuple ) -> Optional[int]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="""test-generation-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" )
except HTTPError:
pass
def lowerCamelCase__ ( self : int ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =GenerationConfig(
do_sample=lowerCAmelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""test-generation-config""" , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_: List[Any] =GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase , getattr(lowerCAmelCase , lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-generation-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCAmelCase , repo_id="""test-generation-config""" , push_to_hub=lowerCAmelCase , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_: str =GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase , getattr(lowerCAmelCase , lowerCAmelCase ) )
def lowerCamelCase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =GenerationConfig(
do_sample=lowerCAmelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_: Tuple =GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase , getattr(lowerCAmelCase , lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCAmelCase , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=lowerCAmelCase , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_: Any =GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase , getattr(lowerCAmelCase , lowerCAmelCase ) )
| 700
|
"""simple docstring"""
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class a ( yaml.SafeLoader ):
def lowerCamelCase__ ( self : int , lowerCAmelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =[self.constructed_objects[key_node] for key_node, _ in node.value]
SCREAMING_SNAKE_CASE_: Any =[tuple(lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else key for key in keys]
SCREAMING_SNAKE_CASE_: Dict =Counter(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =[key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f'''Got duplicate yaml keys: {duplicate_keys}''' )
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : str , lowerCAmelCase : Optional[int]=False ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =super().construct_mapping(lowerCAmelCase , deep=lowerCAmelCase )
self._check_no_duplicates_on_constructed_node(lowerCAmelCase )
return mapping
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Dict =list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
SCREAMING_SNAKE_CASE_: Union[str, Any] =full_content[1:].index("""---""" ) + 1
SCREAMING_SNAKE_CASE_: List[str] ="""\n""".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(lowercase )
class a ( UpperCAmelCase__ ):
# class attributes
UpperCamelCase : Tuple = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def lowerCamelCase__ ( cls : List[Any] , lowerCAmelCase : Path ) -> "DatasetMetadata":
'''simple docstring'''
with open(lowerCAmelCase , encoding="""utf-8""" ) as readme_file:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] =_split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(lowerCAmelCase )
else:
return cls()
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Path ) -> List[str]:
'''simple docstring'''
if path.exists():
with open(lowerCAmelCase , encoding="""utf-8""" ) as readme_file:
SCREAMING_SNAKE_CASE_: str =readme_file.read()
else:
SCREAMING_SNAKE_CASE_: str =None
SCREAMING_SNAKE_CASE_: Tuple =self._to_readme(lowerCAmelCase )
with open(lowerCAmelCase , """w""" , encoding="""utf-8""" ) as readme_file:
readme_file.write(lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : Optional[str] = None ) -> str:
'''simple docstring'''
if readme_content is not None:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] =_split_yaml_from_readme(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] ="""---\n""" + self.to_yaml_string() + """---\n""" + content
else:
SCREAMING_SNAKE_CASE_: List[Any] ="""---\n""" + self.to_yaml_string() + """---\n"""
return full_content
@classmethod
def lowerCamelCase__ ( cls : Optional[int] , lowerCAmelCase : str ) -> "DatasetMetadata":
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =yaml.load(lowerCAmelCase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
SCREAMING_SNAKE_CASE_: List[Any] ={
(key.replace("""-""" , """_""" ) if key.replace("""-""" , """_""" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**lowerCAmelCase )
def lowerCamelCase__ ( self : Dict ) -> str:
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace("""_""" , """-""" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=lowerCAmelCase , allow_unicode=lowerCAmelCase , encoding="""utf-8""" , ).decode("""utf-8""" )
_UpperCAmelCase = {
"""image-classification""": [],
"""translation""": [],
"""image-segmentation""": [],
"""fill-mask""": [],
"""automatic-speech-recognition""": [],
"""token-classification""": [],
"""sentence-similarity""": [],
"""audio-classification""": [],
"""question-answering""": [],
"""summarization""": [],
"""zero-shot-classification""": [],
"""table-to-text""": [],
"""feature-extraction""": [],
"""other""": [],
"""multiple-choice""": [],
"""text-classification""": [],
"""text-to-image""": [],
"""text2text-generation""": [],
"""zero-shot-image-classification""": [],
"""tabular-classification""": [],
"""tabular-regression""": [],
"""image-to-image""": [],
"""tabular-to-text""": [],
"""unconditional-image-generation""": [],
"""text-retrieval""": [],
"""text-to-speech""": [],
"""object-detection""": [],
"""audio-to-audio""": [],
"""text-generation""": [],
"""conversational""": [],
"""table-question-answering""": [],
"""visual-question-answering""": [],
"""image-to-text""": [],
"""reinforcement-learning""": [],
"""voice-activity-detection""": [],
"""time-series-forecasting""": [],
"""document-question-answering""": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
_UpperCAmelCase = ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""")
ap.add_argument("""readme_filepath""")
_UpperCAmelCase = ap.parse_args()
_UpperCAmelCase = Path(args.readme_filepath)
_UpperCAmelCase = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 36
| 0
|
"""simple docstring"""
import unittest
from knapsack import knapsack as k
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =0
SCREAMING_SNAKE_CASE_: List[str] =[0]
SCREAMING_SNAKE_CASE_: str =[0]
SCREAMING_SNAKE_CASE_: Tuple =len(lowercase__ )
self.assertEqual(k.knapsack(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) , 0 )
SCREAMING_SNAKE_CASE_: Tuple =[60]
SCREAMING_SNAKE_CASE_: Union[str, Any] =[10]
SCREAMING_SNAKE_CASE_: List[str] =len(lowercase__ )
self.assertEqual(k.knapsack(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) , 0 )
def lowerCamelCase__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =3
SCREAMING_SNAKE_CASE_: Optional[Any] =[1, 2, 3]
SCREAMING_SNAKE_CASE_: int =[3, 2, 1]
SCREAMING_SNAKE_CASE_: List[Any] =len(lowercase__ )
self.assertEqual(k.knapsack(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) , 5 )
def lowerCamelCase__ ( self : int ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =50
SCREAMING_SNAKE_CASE_: List[str] =[60, 100, 120]
SCREAMING_SNAKE_CASE_: Optional[int] =[10, 20, 30]
SCREAMING_SNAKE_CASE_: Dict =len(lowercase__ )
self.assertEqual(k.knapsack(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) , 220 )
if __name__ == "__main__":
unittest.main()
| 701
|
"""simple docstring"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def __magic_name__ ( lowercase ):
return (data["data"], data["target"])
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Dict =XGBClassifier()
classifier.fit(lowercase , lowercase )
return classifier
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Optional[Any] =load_iris()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =data_handling(lowercase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =train_test_split(
lowercase , lowercase , test_size=0.25 )
SCREAMING_SNAKE_CASE_: Tuple =iris["""target_names"""]
# Create an XGBoost Classifier from the training data
SCREAMING_SNAKE_CASE_: Optional[int] =xgboost(lowercase , lowercase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
lowercase , lowercase , lowercase , display_labels=lowercase , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 36
| 0
|
"""simple docstring"""
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""):
_UpperCAmelCase = True
from torch.cuda.amp import autocast
_UpperCAmelCase = logging.getLogger(__name__)
def __magic_name__ ( lowercase=None , lowercase=None ):
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE_ )
@dataclass
class a :
UpperCamelCase : Any = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
UpperCamelCase : List[Any] = field(
default=UpperCAmelCase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
UpperCamelCase : Any = field(
default=UpperCAmelCase__ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
UpperCamelCase : Optional[int] = field(
default=0.1 , metadata={'help': 'The dropout ratio for the attention probabilities.'} )
UpperCamelCase : Dict = field(
default=0.1 , metadata={'help': 'The dropout ratio for activations inside the fully connected layer.'} )
UpperCamelCase : Any = field(
default=0.1 , metadata={
'help': 'The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.'
} , )
UpperCamelCase : Optional[Any] = field(
default=0.1 , metadata={'help': 'The dropout probabilitiy for all 1D convolutional layers in feature extractor.'} , )
UpperCamelCase : int = field(
default=0.05 , metadata={
'help': (
'Propability of each feature vector along the time axis to be chosen as the start of the vector'
'span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature'
'vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``.'
)
} , )
UpperCamelCase : Dict = field(default=0.0 , metadata={'help': 'The LayerDrop probability.'} )
@dataclass
class a :
UpperCamelCase : int = field(
default=UpperCAmelCase__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
UpperCamelCase : List[Any] = field(
default='train+validation' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
UpperCamelCase : List[Any] = field(
default=UpperCAmelCase__ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
UpperCamelCase : List[Any] = field(
default=UpperCAmelCase__ , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
UpperCamelCase : str = field(
default=UpperCAmelCase__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
UpperCamelCase : int = field(
default=UpperCAmelCase__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of validation examples to this '
'value if set.'
)
} , )
UpperCamelCase : Any = list_field(
default=[',', '?', '.', '!', '-', ';', ':', '""', '%', '\'', '"', '�'] , metadata={'help': 'A list of characters to remove from the transcripts.'} , )
@dataclass
class a :
UpperCamelCase : Tuple = 4_2
UpperCamelCase : List[Any] = True
UpperCamelCase : List[Any] = None
UpperCamelCase : int = None
UpperCamelCase : Dict = None
UpperCamelCase : Tuple = None
def __call__( self : Tuple , lowerCAmelCase : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =[{'input_values': feature['input_values']} for feature in features]
SCREAMING_SNAKE_CASE_: int =[{'input_ids': feature['labels']} for feature in features]
SCREAMING_SNAKE_CASE_: Dict =self.processor.pad(
lowerCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
SCREAMING_SNAKE_CASE_: Dict =self.processor.pad(
labels=lowerCAmelCase , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors="""pt""" , )
# replace padding with -100 to ignore loss correctly
SCREAMING_SNAKE_CASE_: List[Any] =labels_batch['input_ids'].masked_fill(labels_batch.attention_mask.ne(1 ) , -100 )
SCREAMING_SNAKE_CASE_: Any =labels
return batch
class a ( UpperCAmelCase__ ):
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : nn.Module , lowerCAmelCase : Dict[str, Union[torch.Tensor, Any]] ) -> List[str]:
'''simple docstring'''
model.train()
SCREAMING_SNAKE_CASE_: Optional[Any] =self._prepare_inputs(lowerCAmelCase )
if self.use_amp:
with autocast():
SCREAMING_SNAKE_CASE_: Optional[int] =self.compute_loss(lowerCAmelCase , lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_: str =self.compute_loss(lowerCAmelCase , lowerCAmelCase )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
SCREAMING_SNAKE_CASE_: Union[str, Any] =loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
SCREAMING_SNAKE_CASE_: List[str] =loss.sum() / (inputs['labels'] >= 0).sum()
else:
raise ValueError(f'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
SCREAMING_SNAKE_CASE_: Tuple =loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(lowerCAmelCase ).backward()
elif self.use_apex:
with amp.scale_loss(lowerCAmelCase , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(lowerCAmelCase )
else:
loss.backward()
return loss.detach()
def __magic_name__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE_: Union[str, Any] =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE_: Optional[Any] =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE_: Union[str, Any] =parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE_: Optional[Any] =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE_: Union[str, Any] =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" , SCREAMING_SNAKE_CASE_ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
SCREAMING_SNAKE_CASE_: List[Any] =datasets.load_dataset(
"""common_voice""" , data_args.dataset_config_name , split=data_args.train_split_name )
SCREAMING_SNAKE_CASE_: List[Any] =datasets.load_dataset("""common_voice""" , data_args.dataset_config_name , split="""test""" )
# Create and save tokenizer
SCREAMING_SNAKE_CASE_: Dict =f'''[{"".join(data_args.chars_to_ignore )}]'''
def remove_special_characters(lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =re.sub(SCREAMING_SNAKE_CASE_ , """""" , batch["""sentence"""] ).lower() + ' '
return batch
SCREAMING_SNAKE_CASE_: Any =train_dataset.map(SCREAMING_SNAKE_CASE_ , remove_columns=["""sentence"""] )
SCREAMING_SNAKE_CASE_: Any =eval_dataset.map(SCREAMING_SNAKE_CASE_ , remove_columns=["""sentence"""] )
def extract_all_chars(lowercase ):
SCREAMING_SNAKE_CASE_: Any =' '.join(batch["""text"""] )
SCREAMING_SNAKE_CASE_: int =list(set(SCREAMING_SNAKE_CASE_ ) )
return {"vocab": [vocab], "all_text": [all_text]}
SCREAMING_SNAKE_CASE_: List[str] =train_dataset.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , batch_size=-1 , keep_in_memory=SCREAMING_SNAKE_CASE_ , remove_columns=train_dataset.column_names , )
SCREAMING_SNAKE_CASE_: str =train_dataset.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , batch_size=-1 , keep_in_memory=SCREAMING_SNAKE_CASE_ , remove_columns=eval_dataset.column_names , )
SCREAMING_SNAKE_CASE_: Optional[Any] =list(set(vocab_train["""vocab"""][0] ) | set(vocab_test["""vocab"""][0] ) )
SCREAMING_SNAKE_CASE_: Tuple ={v: k for k, v in enumerate(SCREAMING_SNAKE_CASE_ )}
SCREAMING_SNAKE_CASE_: Tuple =vocab_dict[' ']
del vocab_dict[" "]
SCREAMING_SNAKE_CASE_: Optional[Any] =len(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_: Optional[Any] =len(SCREAMING_SNAKE_CASE_ )
with open("""vocab.json""" , """w""" ) as vocab_file:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE_: str =WavaVecaCTCTokenizer(
"""vocab.json""" , unk_token="""[UNK]""" , pad_token="""[PAD]""" , word_delimiter_token="""|""" , )
SCREAMING_SNAKE_CASE_: List[Any] =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0.0 , do_normalize=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_: Optional[Any] =WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_: Dict =WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction="""mean""" , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE_: Union[str, Any] =min(len(SCREAMING_SNAKE_CASE_ ) , data_args.max_train_samples )
SCREAMING_SNAKE_CASE_: List[str] =train_dataset.select(range(SCREAMING_SNAKE_CASE_ ) )
if data_args.max_val_samples is not None:
SCREAMING_SNAKE_CASE_: List[Any] =eval_dataset.select(range(data_args.max_val_samples ) )
SCREAMING_SNAKE_CASE_: Union[str, Any] =torchaudio.transforms.Resample(4_8000 , 1_6000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(lowercase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =torchaudio.load(batch["""path"""] )
SCREAMING_SNAKE_CASE_: Tuple =resampler(SCREAMING_SNAKE_CASE_ ).squeeze().numpy()
SCREAMING_SNAKE_CASE_: Dict =1_6000
SCREAMING_SNAKE_CASE_: int =batch['text']
return batch
SCREAMING_SNAKE_CASE_: Union[str, Any] =train_dataset.map(
SCREAMING_SNAKE_CASE_ , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
SCREAMING_SNAKE_CASE_: int =eval_dataset.map(
SCREAMING_SNAKE_CASE_ , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(lowercase ):
# check that all files have the correct sampling rate
assert (
len(set(batch["""sampling_rate"""] ) ) == 1
), f'''Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}.'''
SCREAMING_SNAKE_CASE_: Tuple =processor(
audio=batch["""speech"""] , text=batch["""target_text"""] , sampling_rate=batch["""sampling_rate"""][0] )
batch.update(SCREAMING_SNAKE_CASE_ )
return batch
SCREAMING_SNAKE_CASE_: List[Any] =train_dataset.map(
SCREAMING_SNAKE_CASE_ , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=SCREAMING_SNAKE_CASE_ , num_proc=data_args.preprocessing_num_workers , )
SCREAMING_SNAKE_CASE_: Tuple =eval_dataset.map(
SCREAMING_SNAKE_CASE_ , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=SCREAMING_SNAKE_CASE_ , num_proc=data_args.preprocessing_num_workers , )
# Metric
SCREAMING_SNAKE_CASE_: List[Any] =datasets.load_metric("""wer""" )
def compute_metrics(lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =pred.predictions
SCREAMING_SNAKE_CASE_: Optional[Any] =np.argmax(SCREAMING_SNAKE_CASE_ , axis=-1 )
SCREAMING_SNAKE_CASE_: str =processor.tokenizer.pad_token_id
SCREAMING_SNAKE_CASE_: Optional[int] =processor.batch_decode(SCREAMING_SNAKE_CASE_ )
# we do not want to group tokens when computing the metrics
SCREAMING_SNAKE_CASE_: Optional[int] =processor.batch_decode(pred.label_ids , group_tokens=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_: Any =wer_metric.compute(predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
SCREAMING_SNAKE_CASE_: Optional[Any] =DataCollatorCTCWithPadding(processor=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ )
# Initialize our Trainer
SCREAMING_SNAKE_CASE_: Dict =CTCTrainer(
model=SCREAMING_SNAKE_CASE_ , data_collator=SCREAMING_SNAKE_CASE_ , args=SCREAMING_SNAKE_CASE_ , compute_metrics=SCREAMING_SNAKE_CASE_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
SCREAMING_SNAKE_CASE_: Union[str, Any] =last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
SCREAMING_SNAKE_CASE_: Tuple =model_args.model_name_or_path
else:
SCREAMING_SNAKE_CASE_: List[Any] =None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
SCREAMING_SNAKE_CASE_: str =trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE_ )
trainer.save_model()
SCREAMING_SNAKE_CASE_: Optional[int] =train_result.metrics
SCREAMING_SNAKE_CASE_: int =(
data_args.max_train_samples if data_args.max_train_samples is not None else len(SCREAMING_SNAKE_CASE_ )
)
SCREAMING_SNAKE_CASE_: List[Any] =min(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
trainer.log_metrics("""train""" , SCREAMING_SNAKE_CASE_ )
trainer.save_metrics("""train""" , SCREAMING_SNAKE_CASE_ )
trainer.save_state()
# Evaluation
SCREAMING_SNAKE_CASE_: int ={}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
SCREAMING_SNAKE_CASE_: Any =trainer.evaluate()
SCREAMING_SNAKE_CASE_: Optional[int] =data_args.max_val_samples if data_args.max_val_samples is not None else len(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_: Tuple =min(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) )
trainer.log_metrics("""eval""" , SCREAMING_SNAKE_CASE_ )
trainer.save_metrics("""eval""" , SCREAMING_SNAKE_CASE_ )
return results
if __name__ == "__main__":
main()
| 702
|
"""simple docstring"""
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =[]
SCREAMING_SNAKE_CASE_: List[str] =[]
SCREAMING_SNAKE_CASE_: Any =[]
for rt in rc.restypes:
SCREAMING_SNAKE_CASE_: Optional[int] =rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
SCREAMING_SNAKE_CASE_: Any ={name: i for i, name in enumerate(lowercase )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =torch.tensor(
lowercase , dtype=torch.intaa , device=protein["""aatype"""].device , )
SCREAMING_SNAKE_CASE_: Optional[Any] =torch.tensor(
lowercase , dtype=torch.intaa , device=protein["""aatype"""].device , )
SCREAMING_SNAKE_CASE_: List[str] =torch.tensor(
lowercase , dtype=torch.floataa , device=protein["""aatype"""].device , )
SCREAMING_SNAKE_CASE_: Optional[Any] =protein["""aatype"""].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
SCREAMING_SNAKE_CASE_: List[str] =restype_atomaa_to_atomaa[protein_aatype]
SCREAMING_SNAKE_CASE_: Any =restype_atomaa_mask[protein_aatype]
SCREAMING_SNAKE_CASE_: Tuple =residx_atomaa_mask
SCREAMING_SNAKE_CASE_: Dict =residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
SCREAMING_SNAKE_CASE_: Dict =restype_atomaa_to_atomaa[protein_aatype]
SCREAMING_SNAKE_CASE_: Optional[int] =residx_atomaa_to_atomaa.long()
# create the corresponding mask
SCREAMING_SNAKE_CASE_: Optional[int] =torch.zeros([21, 37] , dtype=torch.floataa , device=protein["""aatype"""].device )
for restype, restype_letter in enumerate(rc.restypes ):
SCREAMING_SNAKE_CASE_: int =rc.restype_atoa[restype_letter]
SCREAMING_SNAKE_CASE_: Any =rc.residue_atoms[restype_name]
for atom_name in atom_names:
SCREAMING_SNAKE_CASE_: Optional[int] =rc.atom_order[atom_name]
SCREAMING_SNAKE_CASE_: Dict =1
SCREAMING_SNAKE_CASE_: List[str] =restype_atomaa_mask[protein_aatype]
SCREAMING_SNAKE_CASE_: List[Any] =residx_atomaa_mask
return protein
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any =tree_map(lambda lowercase : torch.tensor(lowercase , device=batch["""aatype"""].device ) , lowercase , np.ndarray )
SCREAMING_SNAKE_CASE_: int =tensor_tree_map(lambda lowercase : np.array(lowercase ) , make_atomaa_masks(lowercase ) )
return out
| 36
| 0
|
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class a :
def __init__( self : Optional[Any] , lowerCAmelCase : int , lowerCAmelCase : Dict=13 , lowerCAmelCase : Tuple=30 , lowerCAmelCase : Tuple=2 , lowerCAmelCase : Tuple=3 , lowerCAmelCase : str=True , lowerCAmelCase : int=True , lowerCAmelCase : Tuple=32 , lowerCAmelCase : List[str]=2 , lowerCAmelCase : Union[str, Any]=4 , lowerCAmelCase : Tuple=37 , lowerCAmelCase : Union[str, Any]="gelu" , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : Optional[int]=0.1 , lowerCAmelCase : List[Any]=10 , lowerCAmelCase : List[Any]=0.0_2 , lowerCAmelCase : Tuple=3 , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[Any]=2 , ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =parent
SCREAMING_SNAKE_CASE_: int =batch_size
SCREAMING_SNAKE_CASE_: Optional[int] =image_size
SCREAMING_SNAKE_CASE_: Optional[Any] =patch_size
SCREAMING_SNAKE_CASE_: Optional[int] =num_channels
SCREAMING_SNAKE_CASE_: Dict =is_training
SCREAMING_SNAKE_CASE_: List[Any] =use_labels
SCREAMING_SNAKE_CASE_: List[Any] =hidden_size
SCREAMING_SNAKE_CASE_: Optional[int] =num_hidden_layers
SCREAMING_SNAKE_CASE_: int =num_attention_heads
SCREAMING_SNAKE_CASE_: Tuple =intermediate_size
SCREAMING_SNAKE_CASE_: Union[str, Any] =hidden_act
SCREAMING_SNAKE_CASE_: Any =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Dict =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: Tuple =type_sequence_label_size
SCREAMING_SNAKE_CASE_: Dict =initializer_range
SCREAMING_SNAKE_CASE_: Optional[int] =scope
SCREAMING_SNAKE_CASE_: Union[str, Any] =encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
SCREAMING_SNAKE_CASE_: Tuple =(image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE_: Union[str, Any] =num_patches + 2
def lowerCamelCase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_: str =None
if self.use_labels:
SCREAMING_SNAKE_CASE_: str =ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_: List[Any] =self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self : str ) -> List[str]:
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Any , lowerCAmelCase : Any ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =TFDeiTModel(config=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : int , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =TFDeiTForMaskedImageModeling(config=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =model(lowerCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
SCREAMING_SNAKE_CASE_: str =1
SCREAMING_SNAKE_CASE_: str =TFDeiTForMaskedImageModeling(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_: Any =model(lowerCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : str ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =self.type_sequence_label_size
SCREAMING_SNAKE_CASE_: Dict =TFDeiTForImageClassification(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE_: List[str] =1
SCREAMING_SNAKE_CASE_: Union[str, Any] =TFDeiTForImageClassification(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_: List[Any] =model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_: Any =config_and_inputs
SCREAMING_SNAKE_CASE_: Dict ={'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class a ( __A , __A , unittest.TestCase ):
UpperCamelCase : Dict = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
UpperCamelCase : Dict = (
{
'feature-extraction': TFDeiTModel,
'image-classification': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
UpperCamelCase : Union[str, Any] = False
UpperCamelCase : List[str] = False
UpperCamelCase : int = False
UpperCamelCase : Tuple = False
def lowerCamelCase__ ( self : Dict ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =TFDeiTModelTester(self )
SCREAMING_SNAKE_CASE_: Optional[Any] =ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37 )
def lowerCamelCase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Union[str, Any] =model_class(lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
SCREAMING_SNAKE_CASE_: str =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase , tf.keras.layers.Dense ) )
def lowerCamelCase__ ( self : Any ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: List[Any] =model_class(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_: List[Any] =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_: List[Any] =['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def lowerCamelCase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def lowerCamelCase__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : List[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Dict=False ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def lowerCamelCase__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_: Optional[int] =TFDeiTModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Any =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class a ( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self : Any ) -> Dict:
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def lowerCamelCase__ ( self : List[str] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =TFDeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.default_image_processor
SCREAMING_SNAKE_CASE_: List[Any] =prepare_img()
SCREAMING_SNAKE_CASE_: Any =image_processor(images=lowerCAmelCase , return_tensors="""tf""" )
# forward pass
SCREAMING_SNAKE_CASE_: Optional[Any] =model(**lowerCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE_: str =tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =tf.constant([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
| 703
|
"""simple docstring"""
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_UpperCAmelCase = ["""text""", """image""", """audio"""]
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: str =[]
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(lowercase , lowercase ):
inputs.append(create_inputs(lowercase ) )
else:
raise ValueError(f'''Invalid type requested: {input_type}''' )
return inputs
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: int =[]
for output in outputs:
if isinstance(lowercase , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(lowercase , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(lowercase , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(f'''Invalid output: {output}''' )
return output_types
@is_tool_test
class a :
def lowerCamelCase__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , """inputs""" ) )
self.assertTrue(hasattr(self.tool , """outputs""" ) )
SCREAMING_SNAKE_CASE_: Optional[int] =self.tool.inputs
for _input in inputs:
if isinstance(_input , lowerCAmelCase ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
SCREAMING_SNAKE_CASE_: Any =self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowerCamelCase__ ( self : str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_: List[Any] =self.tool(*lowerCAmelCase )
# There is a single output
if len(self.tool.outputs ) == 1:
SCREAMING_SNAKE_CASE_: str =[outputs]
self.assertListEqual(output_types(lowerCAmelCase ) , self.tool.outputs )
def lowerCamelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , """description""" ) )
self.assertTrue(hasattr(self.tool , """default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def lowerCamelCase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_: Tuple =self.tool(*lowerCAmelCase )
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: int =[outputs]
self.assertEqual(len(lowerCAmelCase ) , len(self.tool.outputs ) )
for output, output_type in zip(lowerCAmelCase , self.tool.outputs ):
SCREAMING_SNAKE_CASE_: int =AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(lowerCAmelCase , lowerCAmelCase ) )
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_: Union[str, Any] =[]
for _input, input_type in zip(lowerCAmelCase , self.tool.inputs ):
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
SCREAMING_SNAKE_CASE_: Dict =self.tool(*lowerCAmelCase )
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] =[outputs]
self.assertEqual(len(lowerCAmelCase ) , len(self.tool.outputs ) )
| 36
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
UpperCamelCase : Any = StableDiffusionInpaintPipeline
UpperCamelCase : Tuple = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
UpperCamelCase : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCamelCase : Any = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCamelCase : Dict = frozenset([] )
def lowerCamelCase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_A , )
SCREAMING_SNAKE_CASE_: List[str] =PNDMScheduler(skip_prk_steps=_A )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: Optional[int] =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: Tuple =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
SCREAMING_SNAKE_CASE_: List[str] =CLIPTextModel(_A )
SCREAMING_SNAKE_CASE_: List[Any] =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
SCREAMING_SNAKE_CASE_: Union[str, Any] ={
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Optional[int]=0 ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
SCREAMING_SNAKE_CASE_: Union[str, Any] =image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE_: int =Image.fromarray(np.uinta(_A ) ).convert("""RGB""" ).resize((64, 64) )
SCREAMING_SNAKE_CASE_: Optional[Any] =Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((64, 64) )
if str(_A ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE_: Dict =torch.manual_seed(_A )
else:
SCREAMING_SNAKE_CASE_: str =torch.Generator(device=_A ).manual_seed(_A )
SCREAMING_SNAKE_CASE_: str ={
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase__ ( self : List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] ="""cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_: List[Any] =self.get_dummy_components()
SCREAMING_SNAKE_CASE_: Dict =StableDiffusionInpaintPipeline(**_A )
SCREAMING_SNAKE_CASE_: Dict =sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_: int =self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE_: List[str] =sd_pipe(**_A ).images
SCREAMING_SNAKE_CASE_: str =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_: Tuple =np.array([0.4_7_2_7, 0.5_7_3_5, 0.3_9_4_1, 0.5_4_4_6, 0.5_9_2_6, 0.4_3_9_4, 0.5_0_6_2, 0.4_6_5_4, 0.4_4_7_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
SCREAMING_SNAKE_CASE_: Union[str, Any] =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
SCREAMING_SNAKE_CASE_: Dict =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""stabilityai/stable-diffusion-2-inpainting"""
SCREAMING_SNAKE_CASE_: Tuple =StableDiffusionInpaintPipeline.from_pretrained(_A , safety_checker=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_: List[Any] ="""Face of a yellow cat, high resolution, sitting on a park bench"""
SCREAMING_SNAKE_CASE_: Dict =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: Tuple =pipe(
prompt=_A , image=_A , mask_image=_A , generator=_A , output_type="""np""" , )
SCREAMING_SNAKE_CASE_: List[str] =output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def lowerCamelCase__ ( self : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
SCREAMING_SNAKE_CASE_: List[str] =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
SCREAMING_SNAKE_CASE_: List[str] =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""stabilityai/stable-diffusion-2-inpainting"""
SCREAMING_SNAKE_CASE_: List[str] =StableDiffusionInpaintPipeline.from_pretrained(
_A , torch_dtype=torch.floataa , safety_checker=_A , )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_: int ="""Face of a yellow cat, high resolution, sitting on a park bench"""
SCREAMING_SNAKE_CASE_: Dict =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: List[Any] =pipe(
prompt=_A , image=_A , mask_image=_A , generator=_A , output_type="""np""" , )
SCREAMING_SNAKE_CASE_: List[str] =output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE_: List[str] =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
SCREAMING_SNAKE_CASE_: Any =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
SCREAMING_SNAKE_CASE_: Tuple ="""stabilityai/stable-diffusion-2-inpainting"""
SCREAMING_SNAKE_CASE_: List[str] =PNDMScheduler.from_pretrained(_A , subfolder="""scheduler""" )
SCREAMING_SNAKE_CASE_: Union[str, Any] =StableDiffusionInpaintPipeline.from_pretrained(
_A , safety_checker=_A , scheduler=_A , torch_dtype=torch.floataa , )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE_: Dict ="""Face of a yellow cat, high resolution, sitting on a park bench"""
SCREAMING_SNAKE_CASE_: str =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: Optional[int] =pipe(
prompt=_A , image=_A , mask_image=_A , generator=_A , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE_: Optional[Any] =torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.6_5 * 10**9
| 704
|
"""simple docstring"""
from __future__ import annotations
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =sorted(numsa + numsa )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =divmod(len(lowercase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase = [float(x) for x in input("""Enter the elements of first array: """).split()]
_UpperCAmelCase = [float(x) for x in input("""Enter the elements of second array: """).split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 36
| 0
|
"""simple docstring"""
from typing import Any
def __magic_name__ ( lowercase ):
if not input_list:
return []
SCREAMING_SNAKE_CASE_: Tuple =[input_list.count(lowerCAmelCase__ ) for value in input_list]
SCREAMING_SNAKE_CASE_: Union[str, Any] =max(lowerCAmelCase__ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowerCAmelCase__ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705
|
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a :
def __init__( self : Any , lowerCAmelCase : Any , lowerCAmelCase : List[str]=13 , lowerCAmelCase : Dict=3 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Dict=True , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : str=0.1 , lowerCAmelCase : List[str]=224 , lowerCAmelCase : List[str]=1000 , lowerCAmelCase : Optional[Any]=[3, 3, 6, 4] , lowerCAmelCase : int=[48, 56, 112, 220] , ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =parent
SCREAMING_SNAKE_CASE_: Any =batch_size
SCREAMING_SNAKE_CASE_: Tuple =num_channels
SCREAMING_SNAKE_CASE_: Union[str, Any] =is_training
SCREAMING_SNAKE_CASE_: Tuple =use_labels
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Tuple =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: List[Any] =num_labels
SCREAMING_SNAKE_CASE_: int =image_size
SCREAMING_SNAKE_CASE_: Optional[Any] =layer_depths
SCREAMING_SNAKE_CASE_: List[Any] =embed_dims
def lowerCamelCase__ ( self : List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_: List[Any] =None
if self.use_labels:
SCREAMING_SNAKE_CASE_: List[str] =ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE_: Tuple =self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowerCAmelCase , layer_scale_init_value=1E-5 , )
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =SwiftFormerModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Any =model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =self.num_labels
SCREAMING_SNAKE_CASE_: Dict =SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Dict =model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
SCREAMING_SNAKE_CASE_: int =SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Tuple =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_: Dict =model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)): str =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_: Tuple ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : Optional[int] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
UpperCamelCase : Tuple = (
{'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase : Any = False
UpperCamelCase : Optional[int] = False
UpperCamelCase : Optional[Any] = False
UpperCamelCase : Dict = False
UpperCamelCase : List[str] = False
def lowerCamelCase__ ( self : Dict ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =SwiftFormerModelTester(self )
SCREAMING_SNAKE_CASE_: Union[str, Any] =ConfigTester(
self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def lowerCamelCase__ ( self : Tuple ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def lowerCamelCase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Any =model_class(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase , nn.Linear ) )
def lowerCamelCase__ ( self : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: int =model_class(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_: Any =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_: Tuple =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def lowerCamelCase__ ( self : int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def lowerCamelCase__ ( self : Any ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@slow
def lowerCamelCase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_: Optional[Any] =SwiftFormerModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def lowerCamelCase__ ( self : Optional[int] ) -> str:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
def check_hidden_states_output(lowerCAmelCase : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : Optional[int] ):
SCREAMING_SNAKE_CASE_: Optional[Any] =model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_: List[str] =model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: Dict =outputs.hidden_states
SCREAMING_SNAKE_CASE_: List[Any] =8
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowerCAmelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Dict =True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_: Any =True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
def _config_zero_init(lowerCAmelCase : str ):
SCREAMING_SNAKE_CASE_: Dict =copy.deepcopy(lowerCAmelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowerCAmelCase , lowerCAmelCase , 1E-10 )
if isinstance(getattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: Tuple =_config_zero_init(getattr(lowerCAmelCase , lowerCAmelCase ) )
setattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return configs_no_init
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_: List[Any] =_config_zero_init(lowerCAmelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Any =model_class(config=lowerCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
pass
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: List[Any] =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self : str ) -> str:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =self.default_image_processor
SCREAMING_SNAKE_CASE_: int =prepare_img()
SCREAMING_SNAKE_CASE_: Union[str, Any] =image_processor(images=lowerCAmelCase , return_tensors="""pt""" ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Dict =model(**lowerCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE_: Optional[Any] =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =torch.tensor([[-2.1_703E00, 2.1_107E00, -2.0_811E00]] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
| 36
| 0
|
"""simple docstring"""
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Tuple =[False] * len(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_: Any =[-1] * len(lowerCAmelCase_ )
def dfs(lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Dict =True
SCREAMING_SNAKE_CASE_: Any =c
for u in graph[v]:
if not visited[u]:
dfs(lowerCAmelCase_ , 1 - c )
for i in range(len(lowerCAmelCase_ ) ):
if not visited[i]:
dfs(lowerCAmelCase_ , 0 )
for i in range(len(lowerCAmelCase_ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_UpperCAmelCase = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 706
|
"""simple docstring"""
from math import pi
def __magic_name__ ( lowercase , lowercase ):
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(9_0, 1_0))
| 36
| 0
|
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
_UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
_UpperCAmelCase = """
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")
>>> repo = \"openai/shap-e-img2img\"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"
>>> image = load_image(image_url).convert(\"RGB\")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")
```
"""
@dataclass
class a ( UpperCAmelCase__ ):
UpperCamelCase : str = 4_2
class a ( UpperCAmelCase__ ):
def __init__( self : List[Any] , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : Tuple , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] , ) -> Optional[int]:
'''simple docstring'''
super().__init__()
self.register_modules(
prior=lowerCAmelCase , image_encoder=lowerCAmelCase , image_processor=lowerCAmelCase , scheduler=lowerCAmelCase , renderer=lowerCAmelCase , )
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : int ) -> Tuple:
'''simple docstring'''
if latents is None:
SCREAMING_SNAKE_CASE_: Any =randn_tensor(lowerCAmelCase , generator=lowerCAmelCase , device=lowerCAmelCase , dtype=lowerCAmelCase )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
SCREAMING_SNAKE_CASE_: Any =latents.to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : Dict=0 ) -> Optional[Any]:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
SCREAMING_SNAKE_CASE_: int =torch.device(f'''cuda:{gpu_id}''' )
SCREAMING_SNAKE_CASE_: List[Any] =[self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCAmelCase , lowerCAmelCase )
@property
def lowerCamelCase__ ( self : Any ) -> List[str]:
'''simple docstring'''
if self.device != torch.device("""meta""" ) or not hasattr(self.image_encoder , """_hf_hook""" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(lowerCAmelCase , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : List[Any] , ) -> Union[str, Any]:
'''simple docstring'''
if isinstance(lowerCAmelCase , lowerCAmelCase ) and isinstance(image[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE_: Any =torch.cat(lowerCAmelCase , axis=0 ) if image[0].ndim == 4 else torch.stack(lowerCAmelCase , axis=0 )
if not isinstance(lowerCAmelCase , torch.Tensor ):
SCREAMING_SNAKE_CASE_: List[str] =self.image_processor(lowerCAmelCase , return_tensors="""pt""" ).pixel_values[0].unsqueeze(0 )
SCREAMING_SNAKE_CASE_: Tuple =image.to(dtype=self.image_encoder.dtype , device=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =self.image_encoder(lowerCAmelCase )["""last_hidden_state"""]
SCREAMING_SNAKE_CASE_: Dict =image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
SCREAMING_SNAKE_CASE_: Tuple =image_embeds.repeat_interleave(lowerCAmelCase , dim=0 )
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE_: str =torch.zeros_like(lowerCAmelCase )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
SCREAMING_SNAKE_CASE_: Tuple =torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(lowerCAmelCase )
def __call__( self : Tuple , lowerCAmelCase : Any , lowerCAmelCase : Tuple = 1 , lowerCAmelCase : List[str] = 25 , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : Optional[Any] = None , lowerCAmelCase : Dict = 4.0 , lowerCAmelCase : str = 64 , lowerCAmelCase : Optional[Any] = "pil" , lowerCAmelCase : int = True , ) -> Any:
'''simple docstring'''
if isinstance(lowerCAmelCase , PIL.Image.Image ):
SCREAMING_SNAKE_CASE_: List[Any] =1
elif isinstance(lowerCAmelCase , torch.Tensor ):
SCREAMING_SNAKE_CASE_: List[Any] =image.shape[0]
elif isinstance(lowerCAmelCase , lowerCAmelCase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
SCREAMING_SNAKE_CASE_: Optional[int] =len(lowerCAmelCase )
else:
raise ValueError(
f'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(lowerCAmelCase )}''' )
SCREAMING_SNAKE_CASE_: Dict =self._execution_device
SCREAMING_SNAKE_CASE_: Dict =batch_size * num_images_per_prompt
SCREAMING_SNAKE_CASE_: Optional[int] =guidance_scale > 1.0
SCREAMING_SNAKE_CASE_: Dict =self._encode_image(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# prior
self.scheduler.set_timesteps(lowerCAmelCase , device=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =self.scheduler.timesteps
SCREAMING_SNAKE_CASE_: Tuple =self.prior.config.num_embeddings
SCREAMING_SNAKE_CASE_: List[Any] =self.prior.config.embedding_dim
SCREAMING_SNAKE_CASE_: List[str] =self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
SCREAMING_SNAKE_CASE_: Dict =latents.reshape(latents.shape[0] , lowerCAmelCase , lowerCAmelCase )
for i, t in enumerate(self.progress_bar(lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE_: Optional[int] =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE_: List[Any] =self.scheduler.scale_model_input(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =self.prior(
lowerCAmelCase , timestep=lowerCAmelCase , proj_embedding=lowerCAmelCase , ).predicted_image_embedding
# remove the variance
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str =noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE_: Any =noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
SCREAMING_SNAKE_CASE_: str =self.scheduler.step(
lowerCAmelCase , timestep=lowerCAmelCase , sample=lowerCAmelCase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =[]
for i, latent in enumerate(lowerCAmelCase ):
print()
SCREAMING_SNAKE_CASE_: Dict =self.renderer.decode(
latent[None, :] , lowerCAmelCase , size=lowerCAmelCase , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =torch.stack(lowerCAmelCase )
if output_type not in ["np", "pil"]:
raise ValueError(f'''Only the output types `pil` and `np` are supported not output_type={output_type}''' )
SCREAMING_SNAKE_CASE_: Optional[Any] =images.cpu().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_: Optional[Any] =[self.numpy_to_pil(lowerCAmelCase ) for image in images]
# Offload last model to CPU
if hasattr(self , """final_offload_hook""" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=lowerCAmelCase )
| 707
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : Dict ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict =FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE_: Any =jax.device_count()
SCREAMING_SNAKE_CASE_: Dict =num_samples * [prompt]
SCREAMING_SNAKE_CASE_: List[Any] =sd_pipe.prepare_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =replicate(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =shard(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =jax.random.split(lowerCAmelCase , jax.device_count() )
SCREAMING_SNAKE_CASE_: Tuple =sd_pipe(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , num_inference_steps=25 , jit=lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
SCREAMING_SNAKE_CASE_: Dict =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE_: Dict =images[0, 253:256, 253:256, -1]
SCREAMING_SNAKE_CASE_: Optional[int] =jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE_: Optional[int] =jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int ="""stabilityai/stable-diffusion-2"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =FlaxDPMSolverMultistepScheduler.from_pretrained(lowerCAmelCase , subfolder="""scheduler""" )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =FlaxStableDiffusionPipeline.from_pretrained(
lowerCAmelCase , scheduler=lowerCAmelCase , revision="""bf16""" , dtype=jnp.bfloataa , )
SCREAMING_SNAKE_CASE_: Optional[int] =scheduler_params
SCREAMING_SNAKE_CASE_: Tuple ="""A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE_: Union[str, Any] =jax.device_count()
SCREAMING_SNAKE_CASE_: Optional[Any] =num_samples * [prompt]
SCREAMING_SNAKE_CASE_: List[Any] =sd_pipe.prepare_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =replicate(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =shard(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_: Any =jax.random.split(lowerCAmelCase , jax.device_count() )
SCREAMING_SNAKE_CASE_: Tuple =sd_pipe(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , num_inference_steps=25 , jit=lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
SCREAMING_SNAKE_CASE_: str =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE_: Any =images[0, 253:256, 253:256, -1]
SCREAMING_SNAKE_CASE_: Optional[Any] =jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE_: Optional[int] =jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 36
| 0
|
"""simple docstring"""
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[str] =[False] * len(_A )
SCREAMING_SNAKE_CASE_: Tuple =[-1] * len(_A )
def dfs(lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =True
SCREAMING_SNAKE_CASE_: List[str] =c
for u in graph[v]:
if not visited[u]:
dfs(_A , 1 - c )
for i in range(len(_A ) ):
if not visited[i]:
dfs(_A , 0 )
for i in range(len(_A ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
_UpperCAmelCase = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 708
|
"""simple docstring"""
def __magic_name__ ( lowercase = 200_0000 ):
SCREAMING_SNAKE_CASE_: List[Any] =[0 for i in range(n + 1 )]
SCREAMING_SNAKE_CASE_: Union[str, Any] =1
SCREAMING_SNAKE_CASE_: Optional[Any] =1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =1
SCREAMING_SNAKE_CASE_: Dict =0
for i in range(lowercase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f"""{solution() = }""")
| 36
| 0
|
"""simple docstring"""
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =checkpoints.load_tax_checkpoint(A_ )
SCREAMING_SNAKE_CASE_: Dict =flatten_dict(A_ )
return flax_params
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[str] ={}
SCREAMING_SNAKE_CASE_: List[str] ={
"""token_embedder""": """embeddings""",
"""encoder_norm""": """layernorm""",
"""kernel""": """weight""",
""".out""": """.output""",
"""scale""": """weight""",
"""embedders_0.pos_embedding""": """row_embedder.weight""",
"""embedders_1.pos_embedding""": """column_embedder.weight""",
}
SCREAMING_SNAKE_CASE_: Optional[Any] ={
"""query""": """attention.query""",
"""key""": """attention.key""",
"""value""": """attention.value""",
"""output.dense""": """output""",
"""encoder_decoder_attention.o""": """encoder_decoder_attention.attention.o""",
"""pre_self_attention_layer_norm""": """self_attention.layer_norm""",
"""pre_cross_attention_layer_norm""": """encoder_decoder_attention.layer_norm""",
"""mlp.""": """mlp.DenseReluDense.""",
"""pre_mlp_layer_norm""": """mlp.layer_norm""",
"""self_attention.o""": """self_attention.attention.o""",
"""decoder.embeddings.embedding""": """decoder.embed_tokens.weight""",
"""decoder.relpos_bias.rel_embedding""": """decoder.layer.0.self_attention.attention.relative_attention_bias.weight""",
"""decoder.decoder_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.logits_dense.weight""": """decoder.lm_head.weight""",
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
SCREAMING_SNAKE_CASE_: Optional[int] =""".""".join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
SCREAMING_SNAKE_CASE_: List[str] =new_key.replace(A_ , A_ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
SCREAMING_SNAKE_CASE_: Optional[int] =new_key.replace(A_ , A_ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
SCREAMING_SNAKE_CASE_: Optional[Any] =re.sub(R"""layers_(\d+)""" , R"""layer.\1""" , A_ )
SCREAMING_SNAKE_CASE_: Union[str, Any] =new_key.replace("""encoder""" , """encoder.encoder""" )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
SCREAMING_SNAKE_CASE_: List[Any] =re.sub(R"""layers_(\d+)""" , R"""layer.\1""" , A_ )
SCREAMING_SNAKE_CASE_: Union[str, Any] =flax_dict[key]
SCREAMING_SNAKE_CASE_: Union[str, Any] ={}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
SCREAMING_SNAKE_CASE_: List[Any] =torch.from_numpy(converted_dict[key].T )
else:
SCREAMING_SNAKE_CASE_: Tuple =torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def __magic_name__ ( lowercase , lowercase , lowercase=False , lowercase=False ):
SCREAMING_SNAKE_CASE_: int =get_flax_param(A_ )
if not use_large:
SCREAMING_SNAKE_CASE_: Dict =PixaStructVisionConfig()
SCREAMING_SNAKE_CASE_: Union[str, Any] =PixaStructTextConfig()
else:
SCREAMING_SNAKE_CASE_: Tuple =PixaStructVisionConfig(
hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 )
SCREAMING_SNAKE_CASE_: List[str] =PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 )
SCREAMING_SNAKE_CASE_: int =PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=A_ )
SCREAMING_SNAKE_CASE_: int =PixaStructForConditionalGeneration(A_ )
SCREAMING_SNAKE_CASE_: Optional[int] =rename_and_convert_flax_params(A_ )
model.load_state_dict(A_ )
SCREAMING_SNAKE_CASE_: Optional[Any] =AutoTokenizer.from_pretrained("""ybelkada/test-pix2struct-tokenizer""" )
SCREAMING_SNAKE_CASE_: Tuple =PixaStructImageProcessor()
SCREAMING_SNAKE_CASE_: List[Any] =PixaStructProcessor(image_processor=A_ , tokenizer=A_ )
if use_large:
SCREAMING_SNAKE_CASE_: Union[str, Any] =4096
SCREAMING_SNAKE_CASE_: int =True
# mkdir if needed
os.makedirs(A_ , exist_ok=A_ )
model.save_pretrained(A_ )
processor.save_pretrained(A_ )
print("""Model saved in {}""".format(A_ ) )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--t5x_checkpoint_path""", default=None, type=str, help="""Path to the original T5x checkpoint.""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--use_large""", action="""store_true""", help="""Use large model.""")
parser.add_argument("""--is_vqa""", action="""store_true""", help="""Use large model.""")
_UpperCAmelCase = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 709
|
"""simple docstring"""
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""bert""", choices=["""bert"""])
parser.add_argument("""--model_name""", default="""bert-base-uncased""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_bert-base-uncased_0247911.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
_UpperCAmelCase = parser.parse_args()
if args.model_type == "bert":
_UpperCAmelCase = BertForMaskedLM.from_pretrained(args.model_name)
_UpperCAmelCase = """bert"""
else:
raise ValueError("""args.model_type should be \"bert\".""")
_UpperCAmelCase = model.state_dict()
_UpperCAmelCase = {}
for w in ["word_embeddings", "position_embeddings"]:
_UpperCAmelCase = state_dict[f"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""]
_UpperCAmelCase = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
_UpperCAmelCase = state_dict["""cls.predictions.decoder.weight"""]
_UpperCAmelCase = state_dict["""cls.predictions.bias"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[f"""cls.predictions.transform.dense.{w}"""]
_UpperCAmelCase = state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 36
| 0
|
"""simple docstring"""
def __magic_name__ ( lowercase = 10**12 ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =1
SCREAMING_SNAKE_CASE_: Optional[int] =0
SCREAMING_SNAKE_CASE_: int =1
SCREAMING_SNAKE_CASE_: Optional[int] =1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f"""{solution() = }""")
| 710
|
"""simple docstring"""
def __magic_name__ ( lowercase , lowercase ):
return int((input_a, input_a).count(0 ) == 0 )
def __magic_name__ ( ):
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 36
| 0
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class a ( unittest.TestCase ):
UpperCamelCase : List[Any] = MODEL_FOR_CAUSAL_LM_MAPPING
UpperCamelCase : List[str] = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def lowerCamelCase__ ( self : Dict ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""pt""" )
# Using `do_sample=False` to force deterministic output
SCREAMING_SNAKE_CASE_: Tuple =text_generator("""This is a test""" , do_sample=__a )
self.assertEqual(
__a , [
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
] , )
SCREAMING_SNAKE_CASE_: List[str] =text_generator(["""This is a test""", """This is a second test"""] )
self.assertEqual(
__a , [
[
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"""
""" oscope. oscope. FiliFili@@"""
)
}
],
] , )
SCREAMING_SNAKE_CASE_: List[str] =text_generator("""This is a test""" , do_sample=__a , num_return_sequences=2 , return_tensors=__a )
self.assertEqual(
__a , [
{"""generated_token_ids""": ANY(__a )},
{"""generated_token_ids""": ANY(__a )},
] , )
SCREAMING_SNAKE_CASE_: Tuple =text_generator.model.config.eos_token_id
SCREAMING_SNAKE_CASE_: Tuple ="""<pad>"""
SCREAMING_SNAKE_CASE_: Union[str, Any] =text_generator(
["""This is a test""", """This is a second test"""] , do_sample=__a , num_return_sequences=2 , batch_size=2 , return_tensors=__a , )
self.assertEqual(
__a , [
[
{"""generated_token_ids""": ANY(__a )},
{"""generated_token_ids""": ANY(__a )},
],
[
{"""generated_token_ids""": ANY(__a )},
{"""generated_token_ids""": ANY(__a )},
],
] , )
@require_tf
def lowerCamelCase__ ( self : Optional[int] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""tf""" )
# Using `do_sample=False` to force deterministic output
SCREAMING_SNAKE_CASE_: str =text_generator("""This is a test""" , do_sample=__a )
self.assertEqual(
__a , [
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
] , )
SCREAMING_SNAKE_CASE_: Optional[Any] =text_generator(["""This is a test""", """This is a second test"""] , do_sample=__a )
self.assertEqual(
__a , [
[
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"""
""" Cannes 閲閲Cannes Cannes Cannes 攵 please,"""
)
}
],
] , )
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =TextGenerationPipeline(model=__a , tokenizer=__a )
return text_generator, ["This is a test", "Another test"]
def lowerCamelCase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""Hello I believe in"""
SCREAMING_SNAKE_CASE_: str =pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
SCREAMING_SNAKE_CASE_: Tuple =text_generator(__a )
self.assertEqual(
__a , [{"""generated_text""": """Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"""}] , )
SCREAMING_SNAKE_CASE_: List[str] =text_generator(__a , stop_sequence=""" fe""" )
self.assertEqual(__a , [{"""generated_text""": """Hello I believe in fe"""}] )
def lowerCamelCase__ ( self : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =text_generator.model
SCREAMING_SNAKE_CASE_: Any =text_generator.tokenizer
SCREAMING_SNAKE_CASE_: List[str] =text_generator("""This is a test""" )
self.assertEqual(__a , [{"""generated_text""": ANY(__a )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
SCREAMING_SNAKE_CASE_: Tuple =text_generator("""This is a test""" , return_full_text=__a )
self.assertEqual(__a , [{"""generated_text""": ANY(__a )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
SCREAMING_SNAKE_CASE_: Optional[int] =pipeline(task="""text-generation""" , model=__a , tokenizer=__a , return_full_text=__a )
SCREAMING_SNAKE_CASE_: Optional[Any] =text_generator("""This is a test""" )
self.assertEqual(__a , [{"""generated_text""": ANY(__a )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
SCREAMING_SNAKE_CASE_: List[str] =text_generator("""This is a test""" , return_full_text=__a )
self.assertEqual(__a , [{"""generated_text""": ANY(__a )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
SCREAMING_SNAKE_CASE_: Any =text_generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=__a )
self.assertEqual(
__a , [
[{"""generated_text""": ANY(__a )}, {"""generated_text""": ANY(__a )}],
[{"""generated_text""": ANY(__a )}, {"""generated_text""": ANY(__a )}],
] , )
if text_generator.tokenizer.pad_token is not None:
SCREAMING_SNAKE_CASE_: Union[str, Any] =text_generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=__a )
self.assertEqual(
__a , [
[{"""generated_text""": ANY(__a )}, {"""generated_text""": ANY(__a )}],
[{"""generated_text""": ANY(__a )}, {"""generated_text""": ANY(__a )}],
] , )
with self.assertRaises(__a ):
SCREAMING_SNAKE_CASE_: Any =text_generator("""test""" , return_full_text=__a , return_text=__a )
with self.assertRaises(__a ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =text_generator("""test""" , return_full_text=__a , return_tensors=__a )
with self.assertRaises(__a ):
SCREAMING_SNAKE_CASE_: Optional[Any] =text_generator("""test""" , return_text=__a , return_tensors=__a )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
SCREAMING_SNAKE_CASE_: Dict =text_generator("""""" )
self.assertEqual(__a , [{"""generated_text""": ANY(__a )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
SCREAMING_SNAKE_CASE_: int =text_generator("""""" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
SCREAMING_SNAKE_CASE_: Tuple =["""RwkvForCausalLM""", """XGLMForCausalLM""", """GPTNeoXForCausalLM"""]
if (
tokenizer.model_max_length < 1_0000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("""This is a test""" * 500 , max_new_tokens=20 )
SCREAMING_SNAKE_CASE_: List[Any] =text_generator("""This is a test""" * 500 , handle_long_generation="""hole""" , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(__a ):
text_generator(
"""This is a test""" * 500 , handle_long_generation="""hole""" , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def lowerCamelCase__ ( self : List[str] ) -> str:
'''simple docstring'''
import torch
# Classic `model_kwargs`
SCREAMING_SNAKE_CASE_: Tuple =pipeline(
model="""hf-internal-testing/tiny-random-bloom""" , model_kwargs={"""device_map""": """auto""", """torch_dtype""": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
SCREAMING_SNAKE_CASE_: Optional[Any] =pipe("""This is a test""" )
self.assertEqual(
__a , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
SCREAMING_SNAKE_CASE_: Optional[int] =pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
SCREAMING_SNAKE_CASE_: Optional[int] =pipe("""This is a test""" )
self.assertEqual(
__a , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
SCREAMING_SNAKE_CASE_: Dict =pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
SCREAMING_SNAKE_CASE_: Tuple =pipe("""This is a test""" )
self.assertEqual(
__a , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
@require_torch
@require_torch_gpu
def lowerCamelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
import torch
SCREAMING_SNAKE_CASE_: Optional[int] =pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device=0 , torch_dtype=torch.floataa )
pipe("""This is a test""" )
@require_torch
@require_accelerate
@require_torch_gpu
def lowerCamelCase__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
import torch
SCREAMING_SNAKE_CASE_: Any =pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.floataa )
pipe("""This is a test""" , do_sample=__a , top_p=0.5 )
def lowerCamelCase__ ( self : Dict ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str ="""Hello world"""
SCREAMING_SNAKE_CASE_: List[Any] =pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
if text_generator.model.framework == "tf":
SCREAMING_SNAKE_CASE_: List[str] =logging.get_logger("""transformers.generation.tf_utils""" )
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =logging.get_logger("""transformers.generation.utils""" )
SCREAMING_SNAKE_CASE_: List[Any] ="""Both `max_new_tokens`""" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(__a ) as cl:
SCREAMING_SNAKE_CASE_: Any =text_generator(__a , max_length=10 , max_new_tokens=1 )
self.assertIn(__a , cl.out )
# The user only sets one -> no warning
with CaptureLogger(__a ) as cl:
SCREAMING_SNAKE_CASE_: Tuple =text_generator(__a , max_new_tokens=1 )
self.assertNotIn(__a , cl.out )
with CaptureLogger(__a ) as cl:
SCREAMING_SNAKE_CASE_: Optional[int] =text_generator(__a , max_length=10 )
self.assertNotIn(__a , cl.out )
| 711
|
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger("""transformers.models.speecht5""")
def __magic_name__ ( lowercase , lowercase , lowercase ):
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE_: Tuple =checkpoint["""input_conv.weight_g"""]
SCREAMING_SNAKE_CASE_: Optional[int] =checkpoint["""input_conv.weight_v"""]
SCREAMING_SNAKE_CASE_: Optional[int] =checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''upsamples.{i}.1.weight_g''']
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''upsamples.{i}.1.weight_v''']
SCREAMING_SNAKE_CASE_: List[str] =checkpoint[f'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE_: Dict =checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_g''']
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_v''']
SCREAMING_SNAKE_CASE_: Union[str, Any] =checkpoint[f'''blocks.{i}.convs1.{j}.1.bias''']
SCREAMING_SNAKE_CASE_: Dict =checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_g''']
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_v''']
SCREAMING_SNAKE_CASE_: List[Any] =checkpoint[f'''blocks.{i}.convs2.{j}.1.bias''']
SCREAMING_SNAKE_CASE_: Tuple =checkpoint["""output_conv.1.weight_g"""]
SCREAMING_SNAKE_CASE_: List[str] =checkpoint["""output_conv.1.weight_v"""]
SCREAMING_SNAKE_CASE_: Optional[int] =checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase=None , lowercase=None , ):
if config_path is not None:
SCREAMING_SNAKE_CASE_: List[Any] =SpeechTaHifiGanConfig.from_pretrained(lowercase )
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE_: Union[str, Any] =SpeechTaHifiGan(lowercase )
SCREAMING_SNAKE_CASE_: Any =torch.load(lowercase )
load_weights(orig_checkpoint["""model"""]["""generator"""] , lowercase , lowercase )
SCREAMING_SNAKE_CASE_: List[Any] =np.load(lowercase )
SCREAMING_SNAKE_CASE_: Any =stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE_: str =stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE_: Dict =torch.from_numpy(lowercase ).float()
SCREAMING_SNAKE_CASE_: Dict =torch.from_numpy(lowercase ).float()
model.save_pretrained(lowercase )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
_UpperCAmelCase = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 36
| 0
|
"""simple docstring"""
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
_UpperCAmelCase = {
"""Acehnese Arabic""": """ace_Arab""",
"""Acehnese Latin""": """ace_Latn""",
"""Mesopotamian Arabic""": """acm_Arab""",
"""Ta'izzi-Adeni Arabic""": """acq_Arab""",
"""Tunisian Arabic""": """aeb_Arab""",
"""Afrikaans""": """afr_Latn""",
"""South Levantine Arabic""": """ajp_Arab""",
"""Akan""": """aka_Latn""",
"""Amharic""": """amh_Ethi""",
"""North Levantine Arabic""": """apc_Arab""",
"""Modern Standard Arabic""": """arb_Arab""",
"""Modern Standard Arabic Romanized""": """arb_Latn""",
"""Najdi Arabic""": """ars_Arab""",
"""Moroccan Arabic""": """ary_Arab""",
"""Egyptian Arabic""": """arz_Arab""",
"""Assamese""": """asm_Beng""",
"""Asturian""": """ast_Latn""",
"""Awadhi""": """awa_Deva""",
"""Central Aymara""": """ayr_Latn""",
"""South Azerbaijani""": """azb_Arab""",
"""North Azerbaijani""": """azj_Latn""",
"""Bashkir""": """bak_Cyrl""",
"""Bambara""": """bam_Latn""",
"""Balinese""": """ban_Latn""",
"""Belarusian""": """bel_Cyrl""",
"""Bemba""": """bem_Latn""",
"""Bengali""": """ben_Beng""",
"""Bhojpuri""": """bho_Deva""",
"""Banjar Arabic""": """bjn_Arab""",
"""Banjar Latin""": """bjn_Latn""",
"""Standard Tibetan""": """bod_Tibt""",
"""Bosnian""": """bos_Latn""",
"""Buginese""": """bug_Latn""",
"""Bulgarian""": """bul_Cyrl""",
"""Catalan""": """cat_Latn""",
"""Cebuano""": """ceb_Latn""",
"""Czech""": """ces_Latn""",
"""Chokwe""": """cjk_Latn""",
"""Central Kurdish""": """ckb_Arab""",
"""Crimean Tatar""": """crh_Latn""",
"""Welsh""": """cym_Latn""",
"""Danish""": """dan_Latn""",
"""German""": """deu_Latn""",
"""Southwestern Dinka""": """dik_Latn""",
"""Dyula""": """dyu_Latn""",
"""Dzongkha""": """dzo_Tibt""",
"""Greek""": """ell_Grek""",
"""English""": """eng_Latn""",
"""Esperanto""": """epo_Latn""",
"""Estonian""": """est_Latn""",
"""Basque""": """eus_Latn""",
"""Ewe""": """ewe_Latn""",
"""Faroese""": """fao_Latn""",
"""Fijian""": """fij_Latn""",
"""Finnish""": """fin_Latn""",
"""Fon""": """fon_Latn""",
"""French""": """fra_Latn""",
"""Friulian""": """fur_Latn""",
"""Nigerian Fulfulde""": """fuv_Latn""",
"""Scottish Gaelic""": """gla_Latn""",
"""Irish""": """gle_Latn""",
"""Galician""": """glg_Latn""",
"""Guarani""": """grn_Latn""",
"""Gujarati""": """guj_Gujr""",
"""Haitian Creole""": """hat_Latn""",
"""Hausa""": """hau_Latn""",
"""Hebrew""": """heb_Hebr""",
"""Hindi""": """hin_Deva""",
"""Chhattisgarhi""": """hne_Deva""",
"""Croatian""": """hrv_Latn""",
"""Hungarian""": """hun_Latn""",
"""Armenian""": """hye_Armn""",
"""Igbo""": """ibo_Latn""",
"""Ilocano""": """ilo_Latn""",
"""Indonesian""": """ind_Latn""",
"""Icelandic""": """isl_Latn""",
"""Italian""": """ita_Latn""",
"""Javanese""": """jav_Latn""",
"""Japanese""": """jpn_Jpan""",
"""Kabyle""": """kab_Latn""",
"""Jingpho""": """kac_Latn""",
"""Kamba""": """kam_Latn""",
"""Kannada""": """kan_Knda""",
"""Kashmiri Arabic""": """kas_Arab""",
"""Kashmiri Devanagari""": """kas_Deva""",
"""Georgian""": """kat_Geor""",
"""Central Kanuri Arabic""": """knc_Arab""",
"""Central Kanuri Latin""": """knc_Latn""",
"""Kazakh""": """kaz_Cyrl""",
"""Kabiyè""": """kbp_Latn""",
"""Kabuverdianu""": """kea_Latn""",
"""Khmer""": """khm_Khmr""",
"""Kikuyu""": """kik_Latn""",
"""Kinyarwanda""": """kin_Latn""",
"""Kyrgyz""": """kir_Cyrl""",
"""Kimbundu""": """kmb_Latn""",
"""Northern Kurdish""": """kmr_Latn""",
"""Kikongo""": """kon_Latn""",
"""Korean""": """kor_Hang""",
"""Lao""": """lao_Laoo""",
"""Ligurian""": """lij_Latn""",
"""Limburgish""": """lim_Latn""",
"""Lingala""": """lin_Latn""",
"""Lithuanian""": """lit_Latn""",
"""Lombard""": """lmo_Latn""",
"""Latgalian""": """ltg_Latn""",
"""Luxembourgish""": """ltz_Latn""",
"""Luba-Kasai""": """lua_Latn""",
"""Ganda""": """lug_Latn""",
"""Luo""": """luo_Latn""",
"""Mizo""": """lus_Latn""",
"""Standard Latvian""": """lvs_Latn""",
"""Magahi""": """mag_Deva""",
"""Maithili""": """mai_Deva""",
"""Malayalam""": """mal_Mlym""",
"""Marathi""": """mar_Deva""",
"""Minangkabau Arabic """: """min_Arab""",
"""Minangkabau Latin""": """min_Latn""",
"""Macedonian""": """mkd_Cyrl""",
"""Plateau Malagasy""": """plt_Latn""",
"""Maltese""": """mlt_Latn""",
"""Meitei Bengali""": """mni_Beng""",
"""Halh Mongolian""": """khk_Cyrl""",
"""Mossi""": """mos_Latn""",
"""Maori""": """mri_Latn""",
"""Burmese""": """mya_Mymr""",
"""Dutch""": """nld_Latn""",
"""Norwegian Nynorsk""": """nno_Latn""",
"""Norwegian Bokmål""": """nob_Latn""",
"""Nepali""": """npi_Deva""",
"""Northern Sotho""": """nso_Latn""",
"""Nuer""": """nus_Latn""",
"""Nyanja""": """nya_Latn""",
"""Occitan""": """oci_Latn""",
"""West Central Oromo""": """gaz_Latn""",
"""Odia""": """ory_Orya""",
"""Pangasinan""": """pag_Latn""",
"""Eastern Panjabi""": """pan_Guru""",
"""Papiamento""": """pap_Latn""",
"""Western Persian""": """pes_Arab""",
"""Polish""": """pol_Latn""",
"""Portuguese""": """por_Latn""",
"""Dari""": """prs_Arab""",
"""Southern Pashto""": """pbt_Arab""",
"""Ayacucho Quechua""": """quy_Latn""",
"""Romanian""": """ron_Latn""",
"""Rundi""": """run_Latn""",
"""Russian""": """rus_Cyrl""",
"""Sango""": """sag_Latn""",
"""Sanskrit""": """san_Deva""",
"""Santali""": """sat_Olck""",
"""Sicilian""": """scn_Latn""",
"""Shan""": """shn_Mymr""",
"""Sinhala""": """sin_Sinh""",
"""Slovak""": """slk_Latn""",
"""Slovenian""": """slv_Latn""",
"""Samoan""": """smo_Latn""",
"""Shona""": """sna_Latn""",
"""Sindhi""": """snd_Arab""",
"""Somali""": """som_Latn""",
"""Southern Sotho""": """sot_Latn""",
"""Spanish""": """spa_Latn""",
"""Tosk Albanian""": """als_Latn""",
"""Sardinian""": """srd_Latn""",
"""Serbian""": """srp_Cyrl""",
"""Swati""": """ssw_Latn""",
"""Sundanese""": """sun_Latn""",
"""Swedish""": """swe_Latn""",
"""Swahili""": """swh_Latn""",
"""Silesian""": """szl_Latn""",
"""Tamil""": """tam_Taml""",
"""Tatar""": """tat_Cyrl""",
"""Telugu""": """tel_Telu""",
"""Tajik""": """tgk_Cyrl""",
"""Tagalog""": """tgl_Latn""",
"""Thai""": """tha_Thai""",
"""Tigrinya""": """tir_Ethi""",
"""Tamasheq Latin""": """taq_Latn""",
"""Tamasheq Tifinagh""": """taq_Tfng""",
"""Tok Pisin""": """tpi_Latn""",
"""Tswana""": """tsn_Latn""",
"""Tsonga""": """tso_Latn""",
"""Turkmen""": """tuk_Latn""",
"""Tumbuka""": """tum_Latn""",
"""Turkish""": """tur_Latn""",
"""Twi""": """twi_Latn""",
"""Central Atlas Tamazight""": """tzm_Tfng""",
"""Uyghur""": """uig_Arab""",
"""Ukrainian""": """ukr_Cyrl""",
"""Umbundu""": """umb_Latn""",
"""Urdu""": """urd_Arab""",
"""Northern Uzbek""": """uzn_Latn""",
"""Venetian""": """vec_Latn""",
"""Vietnamese""": """vie_Latn""",
"""Waray""": """war_Latn""",
"""Wolof""": """wol_Latn""",
"""Xhosa""": """xho_Latn""",
"""Eastern Yiddish""": """ydd_Hebr""",
"""Yoruba""": """yor_Latn""",
"""Yue Chinese""": """yue_Hant""",
"""Chinese Simplified""": """zho_Hans""",
"""Chinese Traditional""": """zho_Hant""",
"""Standard Malay""": """zsm_Latn""",
"""Zulu""": """zul_Latn""",
}
class a ( lowercase_ ):
UpperCamelCase : List[str] = 'facebook/nllb-200-distilled-600M'
UpperCamelCase : Dict = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
UpperCamelCase : Tuple = 'translator'
UpperCamelCase : Dict = AutoTokenizer
UpperCamelCase : Optional[Any] = AutoModelForSeqaSeqLM
UpperCamelCase : List[str] = LANGUAGE_CODES
UpperCamelCase : Dict = ['text', 'text', 'text']
UpperCamelCase : List[Any] = ['text']
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Any , lowerCAmelCase : List[Any] ) -> int:
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(f'''{src_lang} is not a supported language.''' )
if tgt_lang not in self.lang_to_code:
raise ValueError(f'''{tgt_lang} is not a supported language.''' )
SCREAMING_SNAKE_CASE_: Dict =self.lang_to_code[src_lang]
SCREAMING_SNAKE_CASE_: Dict =self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
lowerCamelCase_ , return_tensors="""pt""" , src_lang=lowerCamelCase_ , tgt_lang=lowerCamelCase_ )
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.model.generate(**lowerCamelCase_ )
def lowerCamelCase__ ( self : int , lowerCAmelCase : Any ) -> Dict:
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowerCamelCase_ )
| 712
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def __magic_name__ ( lowercase ):
if "cls_token" in name:
SCREAMING_SNAKE_CASE_: Optional[int] =name.replace("""cls_token""" , """vit.embeddings.cls_token""" )
if "mask_token" in name:
SCREAMING_SNAKE_CASE_: Optional[int] =name.replace("""mask_token""" , """decoder.mask_token""" )
if "decoder_pos_embed" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""pos_embed""" , """vit.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""patch_embed.proj""" , """vit.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""patch_embed.norm""" , """vit.embeddings.norm""" )
if "decoder_blocks" in name:
SCREAMING_SNAKE_CASE_: List[Any] =name.replace("""decoder_blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""blocks""" , """vit.encoder.layer""" )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE_: Optional[Any] =name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
SCREAMING_SNAKE_CASE_: Union[str, Any] =name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
SCREAMING_SNAKE_CASE_: Optional[Any] =name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE_: Dict =name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
SCREAMING_SNAKE_CASE_: Tuple =name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
SCREAMING_SNAKE_CASE_: Any =name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""norm.weight""" , """vit.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""norm.bias""" , """vit.layernorm.bias""" )
return name
def __magic_name__ ( lowercase , lowercase ):
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE_: Optional[int] =orig_state_dict.pop(lowercase )
if "qkv" in key:
SCREAMING_SNAKE_CASE_: Dict =key.split(""".""" )
SCREAMING_SNAKE_CASE_: Optional[Any] =int(key_split[1] )
if "decoder_blocks" in key:
SCREAMING_SNAKE_CASE_: int =config.decoder_hidden_size
SCREAMING_SNAKE_CASE_: Optional[int] ="""decoder.decoder_layers."""
if "weight" in key:
SCREAMING_SNAKE_CASE_: Dict =val[:dim, :]
SCREAMING_SNAKE_CASE_: Tuple =val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_: str =val[-dim:, :]
elif "bias" in key:
SCREAMING_SNAKE_CASE_: List[Any] =val[:dim]
SCREAMING_SNAKE_CASE_: Tuple =val[dim : dim * 2]
SCREAMING_SNAKE_CASE_: List[Any] =val[-dim:]
else:
SCREAMING_SNAKE_CASE_: Any =config.hidden_size
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""vit.encoder.layer."""
if "weight" in key:
SCREAMING_SNAKE_CASE_: Optional[Any] =val[:dim, :]
SCREAMING_SNAKE_CASE_: Optional[Any] =val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_: Dict =val[-dim:, :]
elif "bias" in key:
SCREAMING_SNAKE_CASE_: Optional[Any] =val[:dim]
SCREAMING_SNAKE_CASE_: Any =val[dim : dim * 2]
SCREAMING_SNAKE_CASE_: Optional[Any] =val[-dim:]
else:
SCREAMING_SNAKE_CASE_: Tuple =val
return orig_state_dict
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Dict =ViTMAEConfig()
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE_: List[Any] =1024
SCREAMING_SNAKE_CASE_: Dict =4096
SCREAMING_SNAKE_CASE_: Tuple =24
SCREAMING_SNAKE_CASE_: int =16
elif "huge" in checkpoint_url:
SCREAMING_SNAKE_CASE_: Union[str, Any] =14
SCREAMING_SNAKE_CASE_: Any =1280
SCREAMING_SNAKE_CASE_: Dict =5120
SCREAMING_SNAKE_CASE_: Optional[int] =32
SCREAMING_SNAKE_CASE_: Optional[Any] =16
SCREAMING_SNAKE_CASE_: Tuple =ViTMAEForPreTraining(lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =torch.hub.load_state_dict_from_url(lowercase , map_location="""cpu""" )["""model"""]
SCREAMING_SNAKE_CASE_: Optional[Any] =ViTMAEImageProcessor(size=config.image_size )
SCREAMING_SNAKE_CASE_: str =convert_state_dict(lowercase , lowercase )
model.load_state_dict(lowercase )
model.eval()
SCREAMING_SNAKE_CASE_: Tuple ="""https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"""
SCREAMING_SNAKE_CASE_: List[Any] =Image.open(requests.get(lowercase , stream=lowercase ).raw )
SCREAMING_SNAKE_CASE_: int =ViTMAEImageProcessor(size=config.image_size )
SCREAMING_SNAKE_CASE_: int =image_processor(images=lowercase , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE_: Optional[Any] =model(**lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =outputs.logits
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE_: Dict =torch.tensor(
[[-0.7_309, -0.7_128, -1.0_169], [-1.0_161, -0.9_058, -1.1_878], [-1.0_478, -0.9_411, -1.1_911]] )
elif "huge" in checkpoint_url:
SCREAMING_SNAKE_CASE_: Tuple =torch.tensor(
[[-1.1_599, -0.9_199, -1.2_221], [-1.1_952, -0.9_269, -1.2_307], [-1.2_143, -0.9_337, -1.2_262]] )
else:
SCREAMING_SNAKE_CASE_: Any =torch.tensor(
[[-0.9_192, -0.8_481, -1.1_259], [-1.1_349, -1.0_034, -1.2_599], [-1.1_757, -1.0_429, -1.2_726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , lowercase , atol=1e-4 )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_UpperCAmelCase = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 36
| 0
|
"""simple docstring"""
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"""--original_config_file""",
default=None,
type=str,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--scheduler_type""",
default="""pndm""",
type=str,
help="""Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']""",
)
parser.add_argument(
"""--pipeline_type""",
default=None,
type=str,
help=(
"""The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\'"""
""". If `None` pipeline will be automatically inferred."""
),
)
parser.add_argument(
"""--image_size""",
default=None,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--prediction_type""",
default=None,
type=str,
help=(
"""The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable"""
""" Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
parser.add_argument(
"""--stable_unclip""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.""",
)
parser.add_argument(
"""--stable_unclip_prior""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.""",
)
parser.add_argument(
"""--clip_stats_path""",
type=str,
help="""Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.""",
required=False,
)
parser.add_argument(
"""--controlnet""", action="""store_true""", default=None, help="""Set flag if this is a controlnet checkpoint."""
)
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--vae_path""",
type=str,
default=None,
required=False,
help="""Set to a path, hub id to an already converted vae to not convert it again.""",
)
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 713
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_UpperCAmelCase = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 36
| 0
|
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class a ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
UpperCamelCase : List[Any] = IFInpaintingPipeline
UpperCamelCase : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
UpperCamelCase : Dict = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCamelCase : Any = PipelineTesterMixin.required_optional_params - {'latents'}
def lowerCamelCase__ ( self : Tuple ) -> Dict:
'''simple docstring'''
return self._get_dummy_components()
def lowerCamelCase__ ( self : str , lowerCAmelCase : Dict , lowerCAmelCase : List[Any]=0 ) -> int:
'''simple docstring'''
if str(lowerCAmelCase ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE_: Tuple =torch.manual_seed(lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_: Dict =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any ={
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCamelCase__ ( self : Dict ) -> str:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowerCamelCase__ ( self : Tuple ) -> int:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCamelCase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCamelCase__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
self._test_save_load_local()
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 714
|
"""simple docstring"""
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: int =False
while is_sorted is False: # Until all the indices are traversed keep looping
SCREAMING_SNAKE_CASE_: Tuple =True
for i in range(0 , len(lowercase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =input_list[i + 1], input_list[i]
# swapping if elements not in order
SCREAMING_SNAKE_CASE_: Tuple =False
for i in range(1 , len(lowercase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str =input_list[i + 1], input_list[i]
# swapping if elements not in order
SCREAMING_SNAKE_CASE_: str =False
return input_list
if __name__ == "__main__":
print("""Enter list to be sorted""")
_UpperCAmelCase = [int(x) for x in input().split()]
# inputing elements of the list in one line
_UpperCAmelCase = odd_even_sort(input_list)
print("""The sorted list is""")
print(sorted_list)
| 36
| 0
|
"""simple docstring"""
from math import asin, atan, cos, radians, sin, sqrt, tan
_UpperCAmelCase = 6_3_7_8_1_3_7.0
_UpperCAmelCase = 6_3_5_6_7_5_2.3_1_4_2_4_5
_UpperCAmelCase = 6_3_7_8_1_3_7
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: str =(AXIS_A - AXIS_B) / AXIS_A
SCREAMING_SNAKE_CASE_: Optional[int] =atan((1 - flattening) * tan(radians(a__ ) ) )
SCREAMING_SNAKE_CASE_: Tuple =atan((1 - flattening) * tan(radians(a__ ) ) )
SCREAMING_SNAKE_CASE_: List[Any] =radians(a__ )
SCREAMING_SNAKE_CASE_: Any =radians(a__ )
# Equation
SCREAMING_SNAKE_CASE_: Tuple =sin((phi_a - phi_a) / 2 )
SCREAMING_SNAKE_CASE_: List[str] =sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
SCREAMING_SNAKE_CASE_: str =sqrt(sin_sq_phi + (cos(a__ ) * cos(a__ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(a__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715
|
"""simple docstring"""
def __magic_name__ ( lowercase ):
return str(lowercase ) == str(lowercase )[::-1]
def __magic_name__ ( lowercase ):
return int(lowercase ) + int(str(lowercase )[::-1] )
def __magic_name__ ( lowercase = 1_0000 ):
SCREAMING_SNAKE_CASE_: List[str] =[]
for num in range(1 , lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =0
SCREAMING_SNAKE_CASE_: int =num
while iterations < 50:
SCREAMING_SNAKE_CASE_: Optional[Any] =sum_reverse(lowercase )
iterations += 1
if is_palindrome(lowercase ):
break
else:
lychrel_nums.append(lowercase )
return len(lowercase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 36
| 0
|
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_UpperCAmelCase = 1_6
_UpperCAmelCase = 3_2
def __magic_name__ ( lowercase , lowercase = 16 , lowercase = "bert-base-cased" ):
SCREAMING_SNAKE_CASE_: int =AutoTokenizer.from_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE_: Optional[Any] =load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_: Optional[int] =tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE_: Any =datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=snake_case__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE_: str =tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(snake_case__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_: Optional[Any] =DataLoader(
tokenized_datasets["""train"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
SCREAMING_SNAKE_CASE_: int =DataLoader(
tokenized_datasets["""validation"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
return train_dataloader, eval_dataloader
def __magic_name__ ( lowercase , lowercase ):
# Initialize accelerator
SCREAMING_SNAKE_CASE_: Any =Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE_: Any =config["""lr"""]
SCREAMING_SNAKE_CASE_: Dict =int(config["""num_epochs"""] )
SCREAMING_SNAKE_CASE_: Union[str, Any] =int(config["""seed"""] )
SCREAMING_SNAKE_CASE_: Union[str, Any] =int(config["""batch_size"""] )
SCREAMING_SNAKE_CASE_: List[Any] =args.model_name_or_path
set_seed(snake_case__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str =get_dataloaders(snake_case__ , snake_case__ , snake_case__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE_: Any =AutoModelForSequenceClassification.from_pretrained(snake_case__ , return_dict=snake_case__ )
# Instantiate optimizer
SCREAMING_SNAKE_CASE_: str =(
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
SCREAMING_SNAKE_CASE_: Dict =optimizer_cls(params=model.parameters() , lr=snake_case__ )
if accelerator.state.deepspeed_plugin is not None:
SCREAMING_SNAKE_CASE_: Dict =accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =1
SCREAMING_SNAKE_CASE_: Any =(len(snake_case__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
SCREAMING_SNAKE_CASE_: str =get_linear_schedule_with_warmup(
optimizer=snake_case__ , num_warmup_steps=0 , num_training_steps=snake_case__ , )
else:
SCREAMING_SNAKE_CASE_: Dict =DummyScheduler(snake_case__ , total_num_steps=snake_case__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# We need to keep track of how many total steps we have iterated over
SCREAMING_SNAKE_CASE_: int =0
# We also need to keep track of the stating epoch so files are named properly
SCREAMING_SNAKE_CASE_: Optional[int] =0
# Now we train the model
SCREAMING_SNAKE_CASE_: Dict =evaluate.load("""glue""" , """mrpc""" )
SCREAMING_SNAKE_CASE_: Any =0
SCREAMING_SNAKE_CASE_: Optional[int] ={}
for epoch in range(snake_case__ , snake_case__ ):
model.train()
for step, batch in enumerate(snake_case__ ):
SCREAMING_SNAKE_CASE_: Tuple =model(**snake_case__ )
SCREAMING_SNAKE_CASE_: Union[str, Any] =outputs.loss
SCREAMING_SNAKE_CASE_: Optional[int] =loss / gradient_accumulation_steps
accelerator.backward(snake_case__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
SCREAMING_SNAKE_CASE_: List[str] =0
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Any =model(**snake_case__ )
SCREAMING_SNAKE_CASE_: Union[str, Any] =outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] =accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(snake_case__ ) - 1:
SCREAMING_SNAKE_CASE_: Tuple =predictions[: len(eval_dataloader.dataset ) - samples_seen]
SCREAMING_SNAKE_CASE_: List[Any] =references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=snake_case__ , references=snake_case__ , )
SCREAMING_SNAKE_CASE_: Dict =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , snake_case__ )
SCREAMING_SNAKE_CASE_: Any =eval_metric["""accuracy"""]
if best_performance < eval_metric["accuracy"]:
SCREAMING_SNAKE_CASE_: List[Any] =eval_metric["""accuracy"""]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """all_results.json""" ) , """w""" ) as f:
json.dump(snake_case__ , snake_case__ )
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Any =argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=snake_case__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=snake_case__ , )
parser.add_argument(
"""--output_dir""" , type=snake_case__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--performance_lower_bound""" , type=snake_case__ , default=snake_case__ , help="""Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.""" , )
parser.add_argument(
"""--num_epochs""" , type=snake_case__ , default=3 , help="""Number of train epochs.""" , )
SCREAMING_SNAKE_CASE_: Any =parser.parse_args()
SCREAMING_SNAKE_CASE_: List[str] ={"""lr""": 2e-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 716
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
_UpperCAmelCase = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""DPTFeatureExtractor"""]
_UpperCAmelCase = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 36
| 0
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =[[1, 2, 4], [1, 2, 3, 4]]
SCREAMING_SNAKE_CASE_: Any =DisjunctiveConstraint(UpperCAmelCase__ )
self.assertTrue(isinstance(dc.token_ids , UpperCAmelCase__ ) )
with self.assertRaises(UpperCAmelCase__ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(UpperCAmelCase__ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def lowerCamelCase__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =[[1, 2], [1, 2, 3, 4]]
with self.assertRaises(UpperCAmelCase__ ):
DisjunctiveConstraint(UpperCAmelCase__ ) # fails here
def lowerCamelCase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =[[1, 2, 3], [1, 2, 4]]
SCREAMING_SNAKE_CASE_: Tuple =DisjunctiveConstraint(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE_: Union[str, Any] =dc.update(1 )
SCREAMING_SNAKE_CASE_: Optional[Any] =stepped is True and completed is False and reset is False
self.assertTrue(UpperCAmelCase__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
SCREAMING_SNAKE_CASE_: Dict =dc.update(2 )
SCREAMING_SNAKE_CASE_: Tuple =stepped is True and completed is False and reset is False
self.assertTrue(UpperCAmelCase__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
SCREAMING_SNAKE_CASE_: List[Any] =dc.update(3 )
SCREAMING_SNAKE_CASE_: List[str] =stepped is True and completed is True and reset is False
self.assertTrue(UpperCAmelCase__ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def lowerCamelCase__ ( self : str ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =[[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
SCREAMING_SNAKE_CASE_: Dict =DisjunctiveConstraint(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE_: Union[str, Any] =dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
SCREAMING_SNAKE_CASE_: Any =dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
SCREAMING_SNAKE_CASE_: Optional[Any] =dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
SCREAMING_SNAKE_CASE_: Any =dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
SCREAMING_SNAKE_CASE_: Any =dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
SCREAMING_SNAKE_CASE_: List[str] =dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
SCREAMING_SNAKE_CASE_: Tuple =dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 717
|
"""simple docstring"""
from __future__ import annotations
import math
import random
from typing import Any
class a :
def __init__( self : str ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: list[Any] =[]
SCREAMING_SNAKE_CASE_: int =0
SCREAMING_SNAKE_CASE_: int =0
def lowerCamelCase__ ( self : Optional[Any] ) -> bool:
'''simple docstring'''
return self.head == self.tail
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
self.data.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =self.tail + 1
def lowerCamelCase__ ( self : int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self.data[self.head]
SCREAMING_SNAKE_CASE_: Optional[int] =self.head + 1
return ret
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
return self.tail - self.head
def lowerCamelCase__ ( self : str ) -> None:
'''simple docstring'''
print(self.data )
print("""**************""" )
print(self.data[self.head : self.tail] )
class a :
def __init__( self : Union[str, Any] , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =data
SCREAMING_SNAKE_CASE_: MyNode | None =None
SCREAMING_SNAKE_CASE_: MyNode | None =None
SCREAMING_SNAKE_CASE_: int =1
def lowerCamelCase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
return self.data
def lowerCamelCase__ ( self : List[Any] ) -> MyNode | None:
'''simple docstring'''
return self.left
def lowerCamelCase__ ( self : Dict ) -> MyNode | None:
'''simple docstring'''
return self.right
def lowerCamelCase__ ( self : Any ) -> int:
'''simple docstring'''
return self.height
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =data
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : MyNode | None ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =node
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : MyNode | None ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =node
def lowerCamelCase__ ( self : int , lowerCAmelCase : int ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =height
def __magic_name__ ( lowercase ):
if node is None:
return 0
return node.get_height()
def __magic_name__ ( lowercase , lowercase ):
if a > b:
return a
return b
def __magic_name__ ( lowercase ):
print("""left rotation node:""" , node.get_data() )
SCREAMING_SNAKE_CASE_: List[Any] =node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(lowercase )
SCREAMING_SNAKE_CASE_: Tuple =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowercase )
SCREAMING_SNAKE_CASE_: int =my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowercase )
return ret
def __magic_name__ ( lowercase ):
print("""right rotation node:""" , node.get_data() )
SCREAMING_SNAKE_CASE_: List[Any] =node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(lowercase )
SCREAMING_SNAKE_CASE_: List[Any] =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowercase )
return ret
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any =node.get_left()
assert left_child is not None
node.set_left(left_rotation(lowercase ) )
return right_rotation(lowercase )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Dict =node.get_right()
assert right_child is not None
node.set_right(right_rotation(lowercase ) )
return left_rotation(lowercase )
def __magic_name__ ( lowercase , lowercase ):
if node is None:
return MyNode(lowercase )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , lowercase ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
SCREAMING_SNAKE_CASE_: Union[str, Any] =node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
SCREAMING_SNAKE_CASE_: Any =right_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: List[Any] =lr_rotation(lowercase )
else:
node.set_right(insert_node(node.get_right() , lowercase ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
SCREAMING_SNAKE_CASE_: Tuple =node.get_right()
assert right_child is not None
if data < right_child.get_data():
SCREAMING_SNAKE_CASE_: Optional[Any] =rl_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: Optional[int] =left_rotation(lowercase )
SCREAMING_SNAKE_CASE_: Tuple =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowercase )
return node
def __magic_name__ ( lowercase ):
while True:
SCREAMING_SNAKE_CASE_: Dict =root.get_right()
if right_child is None:
break
SCREAMING_SNAKE_CASE_: str =right_child
return root.get_data()
def __magic_name__ ( lowercase ):
while True:
SCREAMING_SNAKE_CASE_: str =root.get_left()
if left_child is None:
break
SCREAMING_SNAKE_CASE_: Dict =left_child
return root.get_data()
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: str =root.get_left()
SCREAMING_SNAKE_CASE_: List[Any] =root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
SCREAMING_SNAKE_CASE_: Union[str, Any] =get_left_most(lowercase )
root.set_data(lowercase )
root.set_right(del_node(lowercase , lowercase ) )
elif left_child is not None:
SCREAMING_SNAKE_CASE_: Optional[int] =left_child
elif right_child is not None:
SCREAMING_SNAKE_CASE_: Any =right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("""No such data""" )
return root
else:
root.set_left(del_node(lowercase , lowercase ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(lowercase , lowercase ) )
if get_height(lowercase ) - get_height(lowercase ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
SCREAMING_SNAKE_CASE_: Tuple =left_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =rl_rotation(lowercase )
elif get_height(lowercase ) - get_height(lowercase ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
SCREAMING_SNAKE_CASE_: Optional[Any] =right_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: str =lr_rotation(lowercase )
SCREAMING_SNAKE_CASE_: str =my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(lowercase )
return root
class a :
def __init__( self : int ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: MyNode | None =None
def lowerCamelCase__ ( self : List[Any] ) -> int:
'''simple docstring'''
return get_height(self.root )
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
print("""insert:""" + str(lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: Tuple =insert_node(self.root , lowerCAmelCase )
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
print("""delete:""" + str(lowerCAmelCase ) )
if self.root is None:
print("""Tree is empty!""" )
return
SCREAMING_SNAKE_CASE_: Union[str, Any] =del_node(self.root , lowerCAmelCase )
def __str__( self : List[str] , ) -> str: # a level traversale, gives a more intuitive look on the tree
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =""""""
SCREAMING_SNAKE_CASE_: str =MyQueue()
q.push(self.root )
SCREAMING_SNAKE_CASE_: List[str] =self.get_height()
if layer == 0:
return output
SCREAMING_SNAKE_CASE_: int =0
while not q.is_empty():
SCREAMING_SNAKE_CASE_: int =q.pop()
SCREAMING_SNAKE_CASE_: List[Any] =""" """ * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(lowerCAmelCase )
q.push(lowerCAmelCase )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
SCREAMING_SNAKE_CASE_: List[Any] =cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , lowerCAmelCase ) - 1:
SCREAMING_SNAKE_CASE_: int =layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def __magic_name__ ( ):
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
_UpperCAmelCase = AVLtree()
_UpperCAmelCase = list(range(1_0))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 36
| 0
|
"""simple docstring"""
import torch
from diffusers import DiffusionPipeline
class a ( SCREAMING_SNAKE_CASE__ ):
def __init__( self : Optional[int] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : str ) -> str:
'''simple docstring'''
super().__init__()
self.register_modules(unet=snake_case__ , scheduler=snake_case__ )
def __call__( self : Any ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
SCREAMING_SNAKE_CASE_: List[str] =1
SCREAMING_SNAKE_CASE_: int =self.unet(snake_case__ , snake_case__ ).sample
SCREAMING_SNAKE_CASE_: Optional[Any] =self.scheduler.step(snake_case__ , snake_case__ , snake_case__ ).prev_sample
SCREAMING_SNAKE_CASE_: List[Any] =scheduler_output - scheduler_output + torch.ones_like(snake_case__ )
return result
| 718
|
"""simple docstring"""
import string
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =""""""
for i in sequence:
SCREAMING_SNAKE_CASE_: List[Any] =ord(lowercase )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any =string.ascii_letters
SCREAMING_SNAKE_CASE_: Tuple =string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(lowercase )] if c in letters else c for c in sequence )
def __magic_name__ ( ):
from timeit import timeit
print("""Running performance benchmarks...""" )
SCREAMING_SNAKE_CASE_: int ="""from string import printable ; from __main__ import atbash, atbash_slow"""
print(f'''> atbash_slow(): {timeit("atbash_slow(printable)" , setup=lowercase )} seconds''' )
print(f'''> atbash(): {timeit("atbash(printable)" , setup=lowercase )} seconds''' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(f"""{example} encrypted in atbash: {atbash(example)}""")
benchmark()
| 36
| 0
|
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class a ( __lowerCamelCase ):
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def lowerCamelCase__ ( self : Optional[int] ) -> str:
'''simple docstring'''
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_: Optional[int] =pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def lowerCamelCase__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_: Optional[int] =pa.array(TypedSequence([1, 2, 3] , try_type=Value("""bool""" ) , type=Value("""int64""" ) ) )
def lowerCamelCase__ ( self : Tuple ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =pa.array(TypedSequence([1, 2, 3] , type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def lowerCamelCase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
SCREAMING_SNAKE_CASE_: int =pa.array(TypedSequence(["""foo""", """bar"""] , type=Value("""int64""" ) ) )
def lowerCamelCase__ ( self : int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =pa.array(TypedSequence([1, 2, 3] , try_type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def lowerCamelCase__ ( self : List[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =pa.array(TypedSequence(["""foo""", """bar"""] , try_type=Value("""int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
def lowerCamelCase__ ( self : Any ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def lowerCamelCase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
SCREAMING_SNAKE_CASE_: int =pa.array(TypedSequence(["""foo""", """bar"""] , type=ArrayaD((1, 3) , """int64""" ) ) )
def lowerCamelCase__ ( self : str ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def lowerCamelCase__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =pa.array(TypedSequence(["""foo""", """bar"""] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def lowerCamelCase__ ( self : str ) -> str:
'''simple docstring'''
import PIL.Image
SCREAMING_SNAKE_CASE_: Optional[int] =PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"""datasets.arrow_writer.cast_to_python_objects""" , side_effect=SCREAMING_SNAKE_CASE_ ) as mock_cast_to_python_objects:
SCREAMING_SNAKE_CASE_: Tuple =pa.array(TypedSequence([{"""path""": None, """bytes""": B"""image_bytes"""}, pil_image] , type=Image() ) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] =mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("""optimize_list_casting""" , SCREAMING_SNAKE_CASE_ )
self.assertFalse(kwargs["""optimize_list_casting"""] )
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: str =pa.BufferReader(__a ) if isinstance(__a , pa.Buffer ) else pa.memory_map(__a )
SCREAMING_SNAKE_CASE_: Optional[Any] =pa.ipc.open_stream(__a )
SCREAMING_SNAKE_CASE_: List[str] =f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Dict =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE_: int =pa.schema(__a ) if fields else None
with ArrowWriter(stream=__a , schema=__a , writer_batch_size=__a ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE_: Any ={"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: List[Any] =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE_: Union[str, Any] =Features({"""labels""": ClassLabel(names=["""neg""", """pos"""] )} )
with ArrowWriter(stream=__a , features=__a ) as writer:
writer.write({"""labels""": 0} )
writer.write({"""labels""": 1} )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
SCREAMING_SNAKE_CASE_: Tuple =pa.BufferReader(output.getvalue() )
SCREAMING_SNAKE_CASE_: int =pa.ipc.open_stream(__a )
SCREAMING_SNAKE_CASE_: List[Any] =f.read_all()
SCREAMING_SNAKE_CASE_: Dict =pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(__a )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =pa.BufferOutputStream()
with ArrowWriter(
stream=__a , writer_batch_size=__a , hash_salt="""split_name""" , check_duplicates=__a , ) as writer:
with pytest.raises(__a ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=[1, 2] )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 10] )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =pa.BufferOutputStream()
with ArrowWriter(
stream=__a , writer_batch_size=__a , hash_salt="""split_name""" , check_duplicates=__a , ) as writer:
with pytest.raises(__a ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=10 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=10 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str =writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 10] )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[str] =pa.BufferOutputStream()
with ArrowWriter(
stream=__a , writer_batch_size=__a , hash_salt="""split_name""" , check_duplicates=__a , ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=2 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE_: str =pa.schema(__a ) if fields else None
with ArrowWriter(stream=__a , schema=__a , writer_batch_size=__a ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
writer.write_batch({"""col_1""": [], """col_2""": []} )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE_: str ={"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: int =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE_: Union[str, Any] =pa.schema(__a ) if fields else None
with ArrowWriter(stream=__a , schema=__a , writer_batch_size=__a ) as writer:
writer.write_table(pa.Table.from_pydict({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE_: Optional[int] ={"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: int =pa.BufferOutputStream()
SCREAMING_SNAKE_CASE_: Optional[int] =pa.schema(__a ) if fields else None
with ArrowWriter(stream=__a , schema=__a , writer_batch_size=__a ) as writer:
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""foo"""], """col_2""": [1]} ) )
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""bar"""], """col_2""": [2]} ) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
SCREAMING_SNAKE_CASE_: List[str] ={"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __magic_name__ ( ):
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE_: Any ={"""col_1""": pa.string(), """col_2""": pa.intaa()}
SCREAMING_SNAKE_CASE_: Dict =os.path.join(__a , """test.arrow""" )
with ArrowWriter(path=__a , schema=pa.schema(__a ) ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(__a , metadata=writer._schema.metadata )
_check_output(__a , 1 )
def __magic_name__ ( lowercase ):
if pa.types.is_list(__a ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def __magic_name__ ( lowercase , lowercase ):
if isinstance(lst[0] , __a ):
change_first_primitive_element_in_list(lst[0] , __a )
else:
SCREAMING_SNAKE_CASE_: List[str] =value
@pytest.mark.parametrize("""optimized_int_type, expected_dtype""" , [(None, pa.intaa()), (Value("""int32""" ), pa.intaa())] )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Dict =pa.array(TypedSequence(__a , optimized_int_type=__a ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"""col, expected_dtype""" , [
("""attention_mask""", pa.inta()),
("""special_tokens_mask""", pa.inta()),
("""token_type_ids""", pa.inta()),
("""input_ids""", pa.intaa()),
("""other""", pa.intaa()),
] , )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __magic_name__ ( lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: str =pa.array(OptimizedTypedSequence(__a , col=__a ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
SCREAMING_SNAKE_CASE_: Optional[int] =copy.deepcopy(__a )
SCREAMING_SNAKE_CASE_: List[str] =np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(__a , __a )
SCREAMING_SNAKE_CASE_: int =pa.array(OptimizedTypedSequence(__a , col=__a ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("""raise_exception""" , [False, True] )
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Dict =str(tmp_path / """dataset-train.arrow""" )
try:
with ArrowWriter(path=__a ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[str] ="""mock://dataset-train.arrow"""
with ArrowWriter(path=__a , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(__a ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(__a )
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: List[Any] =pa.BufferOutputStream()
with ParquetWriter(stream=__a ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int =writer.finalize()
assert num_examples == 2
assert num_bytes > 0
SCREAMING_SNAKE_CASE_: Optional[Any] =pa.BufferReader(output.getvalue() )
SCREAMING_SNAKE_CASE_: Dict =pq.read_table(__a )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("""embed_local_files""" , [False, True] )
def __magic_name__ ( lowercase , lowercase ):
import PIL.Image
SCREAMING_SNAKE_CASE_: Optional[Any] =str(tmp_path / """test_image_rgb.jpg""" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(__a , format="""png""" )
SCREAMING_SNAKE_CASE_: Any =pa.BufferOutputStream()
with ParquetWriter(
stream=__a , features=Features({"""image""": Image()} ) , embed_local_files=__a ) as writer:
writer.write({"""image""": image_path} )
writer.finalize()
SCREAMING_SNAKE_CASE_: Optional[int] =pa.BufferReader(output.getvalue() )
SCREAMING_SNAKE_CASE_: int =pq.read_table(__a )
SCREAMING_SNAKE_CASE_: List[str] =pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["""image"""][0]["""path"""] , __a )
with open(__a , """rb""" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Tuple =pa.schema([pa.field("""col_1""" , pa.string() , nullable=__a )] )
SCREAMING_SNAKE_CASE_: Union[str, Any] =pa.BufferOutputStream()
with ArrowWriter(stream=__a ) as writer:
writer._build_writer(inferred_schema=__a )
assert writer._schema == pa.schema([pa.field("""col_1""" , pa.string() )] )
| 719
|
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class a :
def __init__( self : Union[str, Any] , lowerCAmelCase : List[str]=2 , lowerCAmelCase : int=3 , lowerCAmelCase : Optional[Any]=64 , lowerCAmelCase : Union[str, Any]=None ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =np.random.default_rng(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =length
SCREAMING_SNAKE_CASE_: Union[str, Any] =rng.normal(size=(length,) ).astype(np.floataa )
SCREAMING_SNAKE_CASE_: Tuple =a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : List[Any] ) -> str:
'''simple docstring'''
return self.length
def __getitem__( self : Union[str, Any] , lowerCAmelCase : Any ) -> List[str]:
'''simple docstring'''
return {"x": self.x[i], "y": self.y[i]}
class a ( torch.nn.Module ):
def __init__( self : Optional[int] , lowerCAmelCase : str=0 , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : Optional[int]=False ) -> Tuple:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_: int =torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE_: int =torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE_: Dict =True
def lowerCamelCase__ ( self : str , lowerCAmelCase : Tuple=None ) -> int:
'''simple docstring'''
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
SCREAMING_SNAKE_CASE_: Union[str, Any] =False
return x * self.a[0] + self.b[0]
class a ( torch.nn.Module ):
def __init__( self : Union[str, Any] , lowerCAmelCase : Any=0 , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : List[Any]=False ) -> str:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_: List[str] =torch.nn.Parameter(torch.tensor(lowerCAmelCase ).float() )
SCREAMING_SNAKE_CASE_: Optional[int] =torch.nn.Parameter(torch.tensor(lowerCAmelCase ).float() )
SCREAMING_SNAKE_CASE_: List[Any] =True
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : int=None ) -> Any:
'''simple docstring'''
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
SCREAMING_SNAKE_CASE_: Optional[int] =False
return x * self.a + self.b
def __magic_name__ ( lowercase , lowercase = 16 ):
from datasets import load_dataset
from transformers import AutoTokenizer
SCREAMING_SNAKE_CASE_: Optional[Any] =AutoTokenizer.from_pretrained("""bert-base-cased""" )
SCREAMING_SNAKE_CASE_: Optional[int] ={"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""}
SCREAMING_SNAKE_CASE_: Any =load_dataset("""csv""" , data_files=lowercase )
SCREAMING_SNAKE_CASE_: Any =datasets["""train"""].unique("""label""" )
SCREAMING_SNAKE_CASE_: List[Any] ={v: i for i, v in enumerate(lowercase )}
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_: Dict =tokenizer(
examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase , max_length=lowercase , padding="""max_length""" )
if "label" in examples:
SCREAMING_SNAKE_CASE_: Optional[int] =[label_to_id[l] for l in examples["""label"""]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE_: List[Any] =datasets.map(
lowercase , batched=lowercase , remove_columns=["""sentence1""", """sentence2""", """label"""] , )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(lowercase , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_: Optional[int] =DataLoader(tokenized_datasets["""train"""] , shuffle=lowercase , collate_fn=lowercase , batch_size=2 )
SCREAMING_SNAKE_CASE_: Dict =DataLoader(tokenized_datasets["""validation"""] , shuffle=lowercase , collate_fn=lowercase , batch_size=1 )
return train_dataloader, eval_dataloader
| 36
| 0
|
"""simple docstring"""
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
_UpperCAmelCase = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
_UpperCAmelCase = get_tests_dir("""fixtures/vocab.json""")
_UpperCAmelCase = get_tests_dir("""fixtures""")
class a ( unittest.TestCase ):
UpperCamelCase : Optional[Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
def lowerCamelCase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =0
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(snake_case_ , snake_case_ )
def lowerCamelCase__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_: Union[str, Any] =WavaVecaConfig()
SCREAMING_SNAKE_CASE_: Optional[int] =AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
# save in new folder
model_config.save_pretrained(snake_case_ )
processor.save_pretrained(snake_case_ )
SCREAMING_SNAKE_CASE_: List[Any] =AutoProcessor.from_pretrained(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
def lowerCamelCase__ ( self : Dict ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(snake_case_ , os.path.join(snake_case_ , snake_case_ ) )
copyfile(snake_case_ , os.path.join(snake_case_ , """vocab.json""" ) )
SCREAMING_SNAKE_CASE_: Dict =AutoProcessor.from_pretrained(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
def lowerCamelCase__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_: List[Any] =WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE_: Optional[int] =AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
SCREAMING_SNAKE_CASE_: Optional[Any] =WavaVecaProcessor(snake_case_ , snake_case_ )
# save in new folder
processor.save_pretrained(snake_case_ )
# drop `processor_class` in tokenizer
with open(os.path.join(snake_case_ , snake_case_ ) , """r""" ) as f:
SCREAMING_SNAKE_CASE_: Any =json.load(snake_case_ )
config_dict.pop("""processor_class""" )
with open(os.path.join(snake_case_ , snake_case_ ) , """w""" ) as f:
f.write(json.dumps(snake_case_ ) )
SCREAMING_SNAKE_CASE_: Optional[Any] =AutoProcessor.from_pretrained(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
def lowerCamelCase__ ( self : List[str] ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_: Optional[int] =WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE_: Tuple =AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
SCREAMING_SNAKE_CASE_: str =WavaVecaProcessor(snake_case_ , snake_case_ )
# save in new folder
processor.save_pretrained(snake_case_ )
# drop `processor_class` in feature extractor
with open(os.path.join(snake_case_ , snake_case_ ) , """r""" ) as f:
SCREAMING_SNAKE_CASE_: str =json.load(snake_case_ )
config_dict.pop("""processor_class""" )
with open(os.path.join(snake_case_ , snake_case_ ) , """w""" ) as f:
f.write(json.dumps(snake_case_ ) )
SCREAMING_SNAKE_CASE_: Tuple =AutoProcessor.from_pretrained(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
def lowerCamelCase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_: int =WavaVecaConfig(processor_class="""Wav2Vec2Processor""" )
model_config.save_pretrained(snake_case_ )
# copy relevant files
copyfile(snake_case_ , os.path.join(snake_case_ , """vocab.json""" ) )
# create emtpy sample processor
with open(os.path.join(snake_case_ , snake_case_ ) , """w""" ) as f:
f.write("""{}""" )
SCREAMING_SNAKE_CASE_: Tuple =AutoProcessor.from_pretrained(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
def lowerCamelCase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
with self.assertRaises(snake_case_ ):
SCREAMING_SNAKE_CASE_: List[str] =AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(snake_case_ ):
SCREAMING_SNAKE_CASE_: Dict =AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=snake_case_ )
SCREAMING_SNAKE_CASE_: int =AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=snake_case_ )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
SCREAMING_SNAKE_CASE_: Optional[Any] =processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
SCREAMING_SNAKE_CASE_: Optional[Any] =processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE_: Dict =AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=snake_case_ , use_fast=snake_case_ )
SCREAMING_SNAKE_CASE_: Union[str, Any] =new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def lowerCamelCase__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
try:
AutoConfig.register("""custom""" , snake_case_ )
AutoFeatureExtractor.register(snake_case_ , snake_case_ )
AutoTokenizer.register(snake_case_ , slow_tokenizer_class=snake_case_ )
AutoProcessor.register(snake_case_ , snake_case_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case_ ):
AutoProcessor.register(snake_case_ , snake_case_ )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE_: int =CustomFeatureExtractor.from_pretrained(snake_case_ )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE_: Optional[Any] =os.path.join(snake_case_ , """vocab.txt""" )
with open(snake_case_ , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE_: Optional[Any] =CustomTokenizer(snake_case_ )
SCREAMING_SNAKE_CASE_: List[Any] =CustomProcessor(snake_case_ , snake_case_ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(snake_case_ )
SCREAMING_SNAKE_CASE_: str =AutoProcessor.from_pretrained(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCamelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
class a ( _snake_case ):
UpperCamelCase : Dict = False
class a ( _snake_case ):
UpperCamelCase : Tuple = False
class a ( _snake_case ):
UpperCamelCase : Dict = """AutoFeatureExtractor"""
UpperCamelCase : Any = """AutoTokenizer"""
UpperCamelCase : Union[str, Any] = False
try:
AutoConfig.register("""custom""" , snake_case_ )
AutoFeatureExtractor.register(snake_case_ , snake_case_ )
AutoTokenizer.register(snake_case_ , slow_tokenizer_class=snake_case_ )
AutoProcessor.register(snake_case_ , snake_case_ )
# If remote code is not set, the default is to use local classes.
SCREAMING_SNAKE_CASE_: str =AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
SCREAMING_SNAKE_CASE_: str =AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=snake_case_ )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
SCREAMING_SNAKE_CASE_: Union[str, Any] =AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=snake_case_ )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCamelCase__ ( self : str ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(processor.__class__.__name__ , """BertTokenizerFast""" )
def lowerCamelCase__ ( self : Dict ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-convnext""" )
self.assertEqual(processor.__class__.__name__ , """ConvNextImageProcessor""" )
@is_staging_test
class a ( unittest.TestCase ):
UpperCamelCase : int = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
@classmethod
def lowerCamelCase__ ( cls : Dict ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =TOKEN
HfFolder.save_token(snake_case_ )
@classmethod
def lowerCamelCase__ ( cls : str ) -> List[Any]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="""test-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-processor""" )
except HTTPError:
pass
def lowerCamelCase__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =WavaVecaProcessor.from_pretrained(snake_case_ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(snake_case_ , """test-processor""" ) , push_to_hub=snake_case_ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_: Optional[Any] =WavaVecaProcessor.from_pretrained(f'''{USER}/test-processor''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(snake_case_ , getattr(new_processor.feature_extractor , snake_case_ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def lowerCamelCase__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =WavaVecaProcessor.from_pretrained(snake_case_ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(snake_case_ , """test-processor-org""" ) , push_to_hub=snake_case_ , use_auth_token=self._token , organization="""valid_org""" , )
SCREAMING_SNAKE_CASE_: Dict =WavaVecaProcessor.from_pretrained("""valid_org/test-processor-org""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(snake_case_ , getattr(new_processor.feature_extractor , snake_case_ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def lowerCamelCase__ ( self : Any ) -> Dict:
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
SCREAMING_SNAKE_CASE_: Tuple =CustomFeatureExtractor.from_pretrained(snake_case_ )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE_: Optional[Any] =os.path.join(snake_case_ , """vocab.txt""" )
with open(snake_case_ , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE_: Optional[Any] =CustomTokenizer(snake_case_ )
SCREAMING_SNAKE_CASE_: Optional[int] =CustomProcessor(snake_case_ , snake_case_ )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f'''{USER}/test-dynamic-processor''' , token=self._token )
SCREAMING_SNAKE_CASE_: Union[str, Any] =Repository(snake_case_ , clone_from=f'''{USER}/test-dynamic-processor''' , token=self._token )
processor.save_pretrained(snake_case_ )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor""",
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(snake_case_ , """tokenizer_config.json""" ) ) as f:
SCREAMING_SNAKE_CASE_: str =json.load(snake_case_ )
self.assertDictEqual(
tokenizer_config["""auto_map"""] , {
"""AutoTokenizer""": ["""custom_tokenization.CustomTokenizer""", None],
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(snake_case_ , """custom_feature_extraction.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(snake_case_ , """custom_tokenization.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(snake_case_ , """custom_processing.py""" ) ) )
repo.push_to_hub()
SCREAMING_SNAKE_CASE_: Tuple =AutoProcessor.from_pretrained(f'''{USER}/test-dynamic-processor''' , trust_remote_code=snake_case_ )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , """CustomProcessor""" )
| 720
|
"""simple docstring"""
def __magic_name__ ( lowercase ):
if upper_limit < 0:
raise ValueError("""Limit for the Catalan sequence must be ≥ 0""" )
SCREAMING_SNAKE_CASE_: Tuple =[0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
SCREAMING_SNAKE_CASE_: Any =1
if upper_limit > 0:
SCREAMING_SNAKE_CASE_: List[str] =1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(lowercase ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""")
print("""\n*** Enter -1 at any time to quit ***""")
print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""")
try:
while True:
_UpperCAmelCase = int(input().strip())
if N < 0:
print("""\n********* Goodbye!! ************""")
break
else:
print(f"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print("""Try another upper limit for the sequence: """, end="""""")
except (NameError, ValueError):
print("""\n********* Invalid input, goodbye! ************\n""")
import doctest
doctest.testmod()
| 36
| 0
|
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
] )
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="""utf-8""" , check=UpperCAmelCase__ , )
assert hasattr(self , """env""" )
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =f'''{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}'''
# distributed data settings
SCREAMING_SNAKE_CASE_: str ={'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=UpperCAmelCase__ , instance_count=UpperCAmelCase__ , instance_type=self.instance_type , debugger_hook_config=UpperCAmelCase__ , hyperparameters={**self.env.distributed_hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=UpperCAmelCase__ , py_version="""py36""" , )
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : Dict ) -> List[Any]:
'''simple docstring'''
TrainingJobAnalytics(UpperCAmelCase__ ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(2,)] )
def lowerCamelCase__ ( self : str , lowerCAmelCase : int ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =self.create_estimator(UpperCAmelCase__ )
# run training
estimator.fit()
# result dataframe
SCREAMING_SNAKE_CASE_: Any =TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
SCREAMING_SNAKE_CASE_: Tuple =list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
SCREAMING_SNAKE_CASE_: Optional[int] =list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
SCREAMING_SNAKE_CASE_: Dict =(
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , UpperCAmelCase__ )
| 721
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
_UpperCAmelCase = {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""",
}
class a ( UpperCAmelCase__ ):
UpperCamelCase : Any = 'albert'
def __init__( self : Dict , lowerCAmelCase : List[str]=3_0000 , lowerCAmelCase : List[Any]=128 , lowerCAmelCase : List[str]=4096 , lowerCAmelCase : str=12 , lowerCAmelCase : str=1 , lowerCAmelCase : Tuple=64 , lowerCAmelCase : Dict=1_6384 , lowerCAmelCase : int=1 , lowerCAmelCase : str="gelu_new" , lowerCAmelCase : Dict=0 , lowerCAmelCase : Optional[Any]=0 , lowerCAmelCase : str=512 , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : List[Any]=0.0_2 , lowerCAmelCase : Union[str, Any]=1E-12 , lowerCAmelCase : Tuple=0.1 , lowerCAmelCase : List[Any]="absolute" , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : int=2 , lowerCAmelCase : Optional[int]=3 , **lowerCAmelCase : int , ) -> Tuple:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =vocab_size
SCREAMING_SNAKE_CASE_: Optional[int] =embedding_size
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_size
SCREAMING_SNAKE_CASE_: Tuple =num_hidden_layers
SCREAMING_SNAKE_CASE_: Any =num_hidden_groups
SCREAMING_SNAKE_CASE_: List[Any] =num_attention_heads
SCREAMING_SNAKE_CASE_: List[Any] =inner_group_num
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_act
SCREAMING_SNAKE_CASE_: int =intermediate_size
SCREAMING_SNAKE_CASE_: Any =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Union[str, Any] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: int =max_position_embeddings
SCREAMING_SNAKE_CASE_: Any =type_vocab_size
SCREAMING_SNAKE_CASE_: int =initializer_range
SCREAMING_SNAKE_CASE_: List[Any] =layer_norm_eps
SCREAMING_SNAKE_CASE_: Dict =classifier_dropout_prob
SCREAMING_SNAKE_CASE_: int =position_embedding_type
class a ( UpperCAmelCase__ ):
@property
def lowerCamelCase__ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_: str ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
SCREAMING_SNAKE_CASE_: Dict ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 36
| 0
|
"""simple docstring"""
def __magic_name__ ( lowercase ):
if n == 1 or not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return 0
elif n == 2:
return 1
else:
SCREAMING_SNAKE_CASE_: List[Any] =[0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =0
SCREAMING_SNAKE_CASE_: Any =2
while digits < n:
index += 1
SCREAMING_SNAKE_CASE_: List[str] =len(str(fibonacci(_lowerCAmelCase ) ) )
return index
def __magic_name__ ( lowercase = 1000 ):
return fibonacci_digits_index(_lowerCAmelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 700
|
"""simple docstring"""
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class a ( yaml.SafeLoader ):
def lowerCamelCase__ ( self : int , lowerCAmelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =[self.constructed_objects[key_node] for key_node, _ in node.value]
SCREAMING_SNAKE_CASE_: Any =[tuple(lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else key for key in keys]
SCREAMING_SNAKE_CASE_: Dict =Counter(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =[key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f'''Got duplicate yaml keys: {duplicate_keys}''' )
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : str , lowerCAmelCase : Optional[int]=False ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =super().construct_mapping(lowerCAmelCase , deep=lowerCAmelCase )
self._check_no_duplicates_on_constructed_node(lowerCAmelCase )
return mapping
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Dict =list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
SCREAMING_SNAKE_CASE_: Union[str, Any] =full_content[1:].index("""---""" ) + 1
SCREAMING_SNAKE_CASE_: List[str] ="""\n""".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(lowercase )
class a ( UpperCAmelCase__ ):
# class attributes
UpperCamelCase : Tuple = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def lowerCamelCase__ ( cls : List[Any] , lowerCAmelCase : Path ) -> "DatasetMetadata":
'''simple docstring'''
with open(lowerCAmelCase , encoding="""utf-8""" ) as readme_file:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] =_split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(lowerCAmelCase )
else:
return cls()
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Path ) -> List[str]:
'''simple docstring'''
if path.exists():
with open(lowerCAmelCase , encoding="""utf-8""" ) as readme_file:
SCREAMING_SNAKE_CASE_: str =readme_file.read()
else:
SCREAMING_SNAKE_CASE_: str =None
SCREAMING_SNAKE_CASE_: Tuple =self._to_readme(lowerCAmelCase )
with open(lowerCAmelCase , """w""" , encoding="""utf-8""" ) as readme_file:
readme_file.write(lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : Optional[str] = None ) -> str:
'''simple docstring'''
if readme_content is not None:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] =_split_yaml_from_readme(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] ="""---\n""" + self.to_yaml_string() + """---\n""" + content
else:
SCREAMING_SNAKE_CASE_: List[Any] ="""---\n""" + self.to_yaml_string() + """---\n"""
return full_content
@classmethod
def lowerCamelCase__ ( cls : Optional[int] , lowerCAmelCase : str ) -> "DatasetMetadata":
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =yaml.load(lowerCAmelCase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
SCREAMING_SNAKE_CASE_: List[Any] ={
(key.replace("""-""" , """_""" ) if key.replace("""-""" , """_""" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**lowerCAmelCase )
def lowerCamelCase__ ( self : Dict ) -> str:
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace("""_""" , """-""" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=lowerCAmelCase , allow_unicode=lowerCAmelCase , encoding="""utf-8""" , ).decode("""utf-8""" )
_UpperCAmelCase = {
"""image-classification""": [],
"""translation""": [],
"""image-segmentation""": [],
"""fill-mask""": [],
"""automatic-speech-recognition""": [],
"""token-classification""": [],
"""sentence-similarity""": [],
"""audio-classification""": [],
"""question-answering""": [],
"""summarization""": [],
"""zero-shot-classification""": [],
"""table-to-text""": [],
"""feature-extraction""": [],
"""other""": [],
"""multiple-choice""": [],
"""text-classification""": [],
"""text-to-image""": [],
"""text2text-generation""": [],
"""zero-shot-image-classification""": [],
"""tabular-classification""": [],
"""tabular-regression""": [],
"""image-to-image""": [],
"""tabular-to-text""": [],
"""unconditional-image-generation""": [],
"""text-retrieval""": [],
"""text-to-speech""": [],
"""object-detection""": [],
"""audio-to-audio""": [],
"""text-generation""": [],
"""conversational""": [],
"""table-question-answering""": [],
"""visual-question-answering""": [],
"""image-to-text""": [],
"""reinforcement-learning""": [],
"""voice-activity-detection""": [],
"""time-series-forecasting""": [],
"""document-question-answering""": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
_UpperCAmelCase = ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""")
ap.add_argument("""readme_filepath""")
_UpperCAmelCase = ap.parse_args()
_UpperCAmelCase = Path(args.readme_filepath)
_UpperCAmelCase = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 36
| 0
|
"""simple docstring"""
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_UpperCAmelCase : str = 1_6
_UpperCAmelCase : Optional[Any] = 3_2
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase , lowercase = 16 ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =AutoTokenizer.from_pretrained("""bert-base-cased""" )
SCREAMING_SNAKE_CASE_: List[Any] =DatasetDict(
{
"""train""": dataset["""train"""].select(lowerCamelCase_ ),
"""validation""": dataset["""train"""].select(lowerCamelCase_ ),
"""test""": dataset["""validation"""],
} )
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_: Dict =tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE_: str =datasets.map(
lowerCamelCase_ , batched=lowerCamelCase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE_: str =tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE_: str =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE_: str =16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE_: Optional[int] =8
else:
SCREAMING_SNAKE_CASE_: Union[str, Any] =None
return tokenizer.pad(
lowerCamelCase_ , padding="""longest""" , max_length=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_tensors="""pt""" , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_: Optional[int] =DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_: str =DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_: List[str] =DataLoader(
tokenized_datasets["""test"""] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=lowerCamelCase_ )
return train_dataloader, eval_dataloader, test_dataloader
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: str =[]
# Download the dataset
SCREAMING_SNAKE_CASE_: int =load_dataset("""glue""" , """mrpc""" )
# Create our splits
SCREAMING_SNAKE_CASE_: Union[str, Any] =StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
SCREAMING_SNAKE_CASE_: Dict =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE_: Optional[Any] =config['''lr''']
SCREAMING_SNAKE_CASE_: Tuple =int(config["""num_epochs"""] )
SCREAMING_SNAKE_CASE_: List[str] =int(config["""seed"""] )
SCREAMING_SNAKE_CASE_: Union[str, Any] =int(config["""batch_size"""] )
SCREAMING_SNAKE_CASE_: Optional[int] =evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
SCREAMING_SNAKE_CASE_: Optional[int] =1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
SCREAMING_SNAKE_CASE_: Dict =batch_size // MAX_GPU_BATCH_SIZE
SCREAMING_SNAKE_CASE_: List[Any] =MAX_GPU_BATCH_SIZE
set_seed(lowerCamelCase_ )
# New Code #
# Create our folds:
SCREAMING_SNAKE_CASE_: Optional[Any] =kfold.split(np.zeros(datasets["""train"""].num_rows ) , datasets["""train"""]["""label"""] )
SCREAMING_SNAKE_CASE_: Optional[int] =[]
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_: List[Any] =get_fold_dataloaders(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE_: Dict =AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCamelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE_: List[str] =model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE_: int =AdamW(params=model.parameters() , lr=lowerCamelCase_ )
# Instantiate scheduler
SCREAMING_SNAKE_CASE_: List[Any] =get_linear_schedule_with_warmup(
optimizer=lowerCamelCase_ , num_warmup_steps=100 , num_training_steps=(len(lowerCamelCase_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE_: Optional[int] =accelerator.prepare(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Now we train the model
for epoch in range(lowerCamelCase_ ):
model.train()
for step, batch in enumerate(lowerCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE_: List[str] =model(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE_: Dict =outputs.loss
SCREAMING_SNAKE_CASE_: int =loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE_: int =model(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE_: List[str] =outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowerCamelCase_ , references=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_: int =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , lowerCamelCase_ )
# New Code #
# We also run predictions on the test set at the very end
SCREAMING_SNAKE_CASE_: Any =[]
for step, batch in enumerate(lowerCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE_: str =model(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE_: Tuple =outputs.logits
SCREAMING_SNAKE_CASE_: Any =accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(lowerCamelCase_ , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
SCREAMING_SNAKE_CASE_: int =torch.cat(lowerCamelCase_ , dim=0 )
SCREAMING_SNAKE_CASE_: str =torch.stack(lowerCamelCase_ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
SCREAMING_SNAKE_CASE_: Any =metric.compute(predictions=lowerCamelCase_ , references=lowerCamelCase_ )
accelerator.print("""Average test metrics from all folds:""" , lowerCamelCase_ )
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Tuple =argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowerCamelCase_ , default=lowerCamelCase_ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
# New Code #
parser.add_argument("""--num_folds""" , type=lowerCamelCase_ , default=3 , help="""The number of splits to perform across the dataset""" )
SCREAMING_SNAKE_CASE_: List[str] =parser.parse_args()
SCREAMING_SNAKE_CASE_: Optional[Any] ={'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
main()
| 701
|
"""simple docstring"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def __magic_name__ ( lowercase ):
return (data["data"], data["target"])
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Dict =XGBClassifier()
classifier.fit(lowercase , lowercase )
return classifier
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Optional[Any] =load_iris()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =data_handling(lowercase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =train_test_split(
lowercase , lowercase , test_size=0.25 )
SCREAMING_SNAKE_CASE_: Tuple =iris["""target_names"""]
# Create an XGBoost Classifier from the training data
SCREAMING_SNAKE_CASE_: Optional[int] =xgboost(lowercase , lowercase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
lowercase , lowercase , lowercase , display_labels=lowercase , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 36
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class a :
def __init__( self : List[str] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any]=3 , lowerCAmelCase : str=32 , lowerCAmelCase : Dict=3 , lowerCAmelCase : Optional[int]=10 , lowerCAmelCase : Any=[8, 16, 32, 64] , lowerCAmelCase : str=[1, 1, 2, 1] , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : str="relu" , lowerCAmelCase : Any=3 , lowerCAmelCase : List[str]=None , lowerCAmelCase : List[Any]=["stage2", "stage3", "stage4"] , lowerCAmelCase : int=[2, 3, 4] , lowerCAmelCase : int=1 , ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =parent
SCREAMING_SNAKE_CASE_: int =batch_size
SCREAMING_SNAKE_CASE_: str =image_size
SCREAMING_SNAKE_CASE_: Dict =num_channels
SCREAMING_SNAKE_CASE_: str =embeddings_size
SCREAMING_SNAKE_CASE_: int =hidden_sizes
SCREAMING_SNAKE_CASE_: str =depths
SCREAMING_SNAKE_CASE_: List[str] =is_training
SCREAMING_SNAKE_CASE_: Tuple =use_labels
SCREAMING_SNAKE_CASE_: Any =hidden_act
SCREAMING_SNAKE_CASE_: List[str] =num_labels
SCREAMING_SNAKE_CASE_: Any =scope
SCREAMING_SNAKE_CASE_: List[str] =len(_lowercase )
SCREAMING_SNAKE_CASE_: List[str] =out_features
SCREAMING_SNAKE_CASE_: Dict =out_indices
SCREAMING_SNAKE_CASE_: int =num_groups
def lowerCamelCase__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_: Dict =None
if self.use_labels:
SCREAMING_SNAKE_CASE_: Optional[int] =ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE_: Any =self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def lowerCamelCase__ ( self : str , lowerCAmelCase : Any , lowerCAmelCase : Optional[int] , lowerCAmelCase : int ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =BitModel(config=_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE_: Union[str, Any] =model(_lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase__ ( self : Any , lowerCAmelCase : str , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self.num_labels
SCREAMING_SNAKE_CASE_: Optional[int] =BitForImageClassification(_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE_: List[str] =model(_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : int , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Any ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =BitBackbone(config=_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE_: Optional[Any] =model(_lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
SCREAMING_SNAKE_CASE_: Optional[int] =None
SCREAMING_SNAKE_CASE_: Dict =BitBackbone(config=_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE_: List[str] =model(_lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCamelCase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any =config_and_inputs
SCREAMING_SNAKE_CASE_: int ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
UpperCamelCase : List[str] = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
UpperCamelCase : List[str] = (
{'feature-extraction': BitModel, 'image-classification': BitForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase : str = False
UpperCamelCase : int = False
UpperCamelCase : Optional[int] = False
UpperCamelCase : str = False
UpperCamelCase : List[Any] = False
def lowerCamelCase__ ( self : Dict ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =BitModelTester(self )
SCREAMING_SNAKE_CASE_: str =ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase )
def lowerCamelCase__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return
@unittest.skip(reason="""Bit does not output attentions""" )
def lowerCamelCase__ ( self : Tuple ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason="""Bit does not use inputs_embeds""" )
def lowerCamelCase__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason="""Bit does not support input and output embeddings""" )
def lowerCamelCase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : str ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Optional[Any] =model_class(_lowercase )
SCREAMING_SNAKE_CASE_: Tuple =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_: Tuple =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_: Any =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowercase )
def lowerCamelCase__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def lowerCamelCase__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowercase )
def lowerCamelCase__ ( self : Dict ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Dict =model_class(config=_lowercase )
for name, module in model.named_modules():
if isinstance(_lowercase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def lowerCamelCase__ ( self : int ) -> int:
'''simple docstring'''
def check_hidden_states_output(lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str] ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_: List[str] =model(**self._prepare_for_class(_lowercase , _lowercase ) )
SCREAMING_SNAKE_CASE_: Any =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.model_tester.num_stages
self.assertEqual(len(_lowercase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_: List[Any] =["""preactivation""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
SCREAMING_SNAKE_CASE_: Optional[int] =layer_type
SCREAMING_SNAKE_CASE_: Optional[int] =True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_: int =True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
@unittest.skip(reason="""Bit does not use feedforward chunking""" )
def lowerCamelCase__ ( self : List[str] ) -> Any:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Dict ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase )
@slow
def lowerCamelCase__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_: Dict =BitModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: str =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self : Dict ) -> str:
'''simple docstring'''
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_lowercase )
SCREAMING_SNAKE_CASE_: str =self.default_image_processor
SCREAMING_SNAKE_CASE_: Optional[Any] =prepare_img()
SCREAMING_SNAKE_CASE_: Optional[Any] =image_processor(images=_lowercase , return_tensors="""pt""" ).to(_lowercase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Optional[Any] =model(**_lowercase )
# verify the logits
SCREAMING_SNAKE_CASE_: Optional[int] =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowercase )
SCREAMING_SNAKE_CASE_: Optional[Any] =torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowercase , atol=1E-4 ) )
@require_torch
class a ( UpperCAmelCase_ , unittest.TestCase ):
UpperCamelCase : str = (BitBackbone,) if is_torch_available() else ()
UpperCamelCase : List[Any] = BitConfig
UpperCamelCase : str = False
def lowerCamelCase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =BitModelTester(self )
| 702
|
"""simple docstring"""
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =[]
SCREAMING_SNAKE_CASE_: List[str] =[]
SCREAMING_SNAKE_CASE_: Any =[]
for rt in rc.restypes:
SCREAMING_SNAKE_CASE_: Optional[int] =rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
SCREAMING_SNAKE_CASE_: Any ={name: i for i, name in enumerate(lowercase )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =torch.tensor(
lowercase , dtype=torch.intaa , device=protein["""aatype"""].device , )
SCREAMING_SNAKE_CASE_: Optional[Any] =torch.tensor(
lowercase , dtype=torch.intaa , device=protein["""aatype"""].device , )
SCREAMING_SNAKE_CASE_: List[str] =torch.tensor(
lowercase , dtype=torch.floataa , device=protein["""aatype"""].device , )
SCREAMING_SNAKE_CASE_: Optional[Any] =protein["""aatype"""].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
SCREAMING_SNAKE_CASE_: List[str] =restype_atomaa_to_atomaa[protein_aatype]
SCREAMING_SNAKE_CASE_: Any =restype_atomaa_mask[protein_aatype]
SCREAMING_SNAKE_CASE_: Tuple =residx_atomaa_mask
SCREAMING_SNAKE_CASE_: Dict =residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
SCREAMING_SNAKE_CASE_: Dict =restype_atomaa_to_atomaa[protein_aatype]
SCREAMING_SNAKE_CASE_: Optional[int] =residx_atomaa_to_atomaa.long()
# create the corresponding mask
SCREAMING_SNAKE_CASE_: Optional[int] =torch.zeros([21, 37] , dtype=torch.floataa , device=protein["""aatype"""].device )
for restype, restype_letter in enumerate(rc.restypes ):
SCREAMING_SNAKE_CASE_: int =rc.restype_atoa[restype_letter]
SCREAMING_SNAKE_CASE_: Any =rc.residue_atoms[restype_name]
for atom_name in atom_names:
SCREAMING_SNAKE_CASE_: Optional[int] =rc.atom_order[atom_name]
SCREAMING_SNAKE_CASE_: Dict =1
SCREAMING_SNAKE_CASE_: List[str] =restype_atomaa_mask[protein_aatype]
SCREAMING_SNAKE_CASE_: List[Any] =residx_atomaa_mask
return protein
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any =tree_map(lambda lowercase : torch.tensor(lowercase , device=batch["""aatype"""].device ) , lowercase , np.ndarray )
SCREAMING_SNAKE_CASE_: int =tensor_tree_map(lambda lowercase : np.array(lowercase ) , make_atomaa_masks(lowercase ) )
return out
| 36
| 0
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =SwinvaConfig()
SCREAMING_SNAKE_CASE_: Union[str, Any] =swinva_name.split("""_""" )
SCREAMING_SNAKE_CASE_: Tuple =name_split[1]
if "to" in name_split[3]:
SCREAMING_SNAKE_CASE_: List[str] =int(name_split[3][-3:] )
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =int(name_split[3] )
if "to" in name_split[2]:
SCREAMING_SNAKE_CASE_: List[Any] =int(name_split[2][-2:] )
else:
SCREAMING_SNAKE_CASE_: int =int(name_split[2][6:] )
if model_size == "tiny":
SCREAMING_SNAKE_CASE_: Any =96
SCREAMING_SNAKE_CASE_: Tuple =(2, 2, 6, 2)
SCREAMING_SNAKE_CASE_: Dict =(3, 6, 12, 24)
elif model_size == "small":
SCREAMING_SNAKE_CASE_: List[Any] =96
SCREAMING_SNAKE_CASE_: Tuple =(2, 2, 18, 2)
SCREAMING_SNAKE_CASE_: List[Any] =(3, 6, 12, 24)
elif model_size == "base":
SCREAMING_SNAKE_CASE_: Dict =128
SCREAMING_SNAKE_CASE_: List[str] =(2, 2, 18, 2)
SCREAMING_SNAKE_CASE_: Union[str, Any] =(4, 8, 16, 32)
else:
SCREAMING_SNAKE_CASE_: Union[str, Any] =192
SCREAMING_SNAKE_CASE_: Any =(2, 2, 18, 2)
SCREAMING_SNAKE_CASE_: Optional[Any] =(6, 12, 24, 48)
if "to" in swinva_name:
SCREAMING_SNAKE_CASE_: int =(12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
SCREAMING_SNAKE_CASE_: List[str] =2_1841
SCREAMING_SNAKE_CASE_: List[Any] ="huggingface/label-files"
SCREAMING_SNAKE_CASE_: List[Any] ="imagenet-22k-id2label.json"
SCREAMING_SNAKE_CASE_: Dict =json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE_: List[str] ={int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_: Tuple =idalabel
SCREAMING_SNAKE_CASE_: Any ={v: k for k, v in idalabel.items()}
else:
SCREAMING_SNAKE_CASE_: Dict =1000
SCREAMING_SNAKE_CASE_: str ="huggingface/label-files"
SCREAMING_SNAKE_CASE_: str ="imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE_: int =json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE_: int ={int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_: List[Any] =idalabel
SCREAMING_SNAKE_CASE_: List[str] ={v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_: int =img_size
SCREAMING_SNAKE_CASE_: Tuple =num_classes
SCREAMING_SNAKE_CASE_: Dict =embed_dim
SCREAMING_SNAKE_CASE_: str =depths
SCREAMING_SNAKE_CASE_: Any =num_heads
SCREAMING_SNAKE_CASE_: int =window_size
return config
def __magic_name__ ( lowercase ):
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE_: Any =name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
SCREAMING_SNAKE_CASE_: Tuple ="encoder." + name
if "attn.proj" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
SCREAMING_SNAKE_CASE_: Tuple =name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
SCREAMING_SNAKE_CASE_: Union[str, Any] =name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""mlp.fc2""" , """output.dense""" )
if "q_bias" in name:
SCREAMING_SNAKE_CASE_: Any =name.replace("""q_bias""" , """query.bias""" )
if "k_bias" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""k_bias""" , """key.bias""" )
if "v_bias" in name:
SCREAMING_SNAKE_CASE_: Tuple =name.replace("""v_bias""" , """value.bias""" )
if "cpb_mlp" in name:
SCREAMING_SNAKE_CASE_: Union[str, Any] =name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" )
if name == "norm.weight":
SCREAMING_SNAKE_CASE_: List[Any] ="layernorm.weight"
if name == "norm.bias":
SCREAMING_SNAKE_CASE_: List[Any] ="layernorm.bias"
if "head" in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""head""" , """classifier""" )
else:
SCREAMING_SNAKE_CASE_: List[Any] ="swinv2." + name
return name
def __magic_name__ ( lowercase , lowercase ):
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE_: List[str] =orig_state_dict.pop(__SCREAMING_SNAKE_CASE )
if "mask" in key:
continue
elif "qkv" in key:
SCREAMING_SNAKE_CASE_: List[str] =key.split(""".""" )
SCREAMING_SNAKE_CASE_: Optional[int] =int(key_split[1] )
SCREAMING_SNAKE_CASE_: Dict =int(key_split[3] )
SCREAMING_SNAKE_CASE_: Union[str, Any] =model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
SCREAMING_SNAKE_CASE_: List[str] =val[:dim, :]
SCREAMING_SNAKE_CASE_: Optional[int] =val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_: Tuple =val[-dim:, :]
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =val[:dim]
SCREAMING_SNAKE_CASE_: List[Any] =val[
dim : dim * 2
]
SCREAMING_SNAKE_CASE_: Optional[int] =val[-dim:]
else:
SCREAMING_SNAKE_CASE_: Optional[int] =val
return orig_state_dict
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =timm.create_model(__SCREAMING_SNAKE_CASE , pretrained=__SCREAMING_SNAKE_CASE )
timm_model.eval()
SCREAMING_SNAKE_CASE_: int =get_swinva_config(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_: int =SwinvaForImageClassification(__SCREAMING_SNAKE_CASE )
model.eval()
SCREAMING_SNAKE_CASE_: List[Any] =convert_state_dict(timm_model.state_dict() , __SCREAMING_SNAKE_CASE )
model.load_state_dict(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_: Optional[int] ="http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE_: List[Any] =AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swinva_name.replace("""_""" , """-""" ) ) )
SCREAMING_SNAKE_CASE_: str =Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw )
SCREAMING_SNAKE_CASE_: Union[str, Any] =image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE_: int =timm_model(inputs["""pixel_values"""] )
SCREAMING_SNAKE_CASE_: Tuple =model(**__SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1e-3 )
print(f'''Saving model {swinva_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__SCREAMING_SNAKE_CASE )
model.push_to_hub(
repo_path_or_name=Path(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , organization="""nandwalritik""" , commit_message="""Add model""" , )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swinv2_name""",
default="""swinv2_tiny_patch4_window8_256""",
type=str,
help="""Name of the Swinv2 timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_UpperCAmelCase = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 703
|
"""simple docstring"""
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_UpperCAmelCase = ["""text""", """image""", """audio"""]
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: str =[]
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(lowercase , lowercase ):
inputs.append(create_inputs(lowercase ) )
else:
raise ValueError(f'''Invalid type requested: {input_type}''' )
return inputs
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: int =[]
for output in outputs:
if isinstance(lowercase , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(lowercase , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(lowercase , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(f'''Invalid output: {output}''' )
return output_types
@is_tool_test
class a :
def lowerCamelCase__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , """inputs""" ) )
self.assertTrue(hasattr(self.tool , """outputs""" ) )
SCREAMING_SNAKE_CASE_: Optional[int] =self.tool.inputs
for _input in inputs:
if isinstance(_input , lowerCAmelCase ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
SCREAMING_SNAKE_CASE_: Any =self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowerCamelCase__ ( self : str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_: List[Any] =self.tool(*lowerCAmelCase )
# There is a single output
if len(self.tool.outputs ) == 1:
SCREAMING_SNAKE_CASE_: str =[outputs]
self.assertListEqual(output_types(lowerCAmelCase ) , self.tool.outputs )
def lowerCamelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , """description""" ) )
self.assertTrue(hasattr(self.tool , """default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def lowerCamelCase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_: Tuple =self.tool(*lowerCAmelCase )
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: int =[outputs]
self.assertEqual(len(lowerCAmelCase ) , len(self.tool.outputs ) )
for output, output_type in zip(lowerCAmelCase , self.tool.outputs ):
SCREAMING_SNAKE_CASE_: int =AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(lowerCAmelCase , lowerCAmelCase ) )
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_: Union[str, Any] =[]
for _input, input_type in zip(lowerCAmelCase , self.tool.inputs ):
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
SCREAMING_SNAKE_CASE_: Dict =self.tool(*lowerCAmelCase )
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] =[outputs]
self.assertEqual(len(lowerCAmelCase ) , len(self.tool.outputs ) )
| 36
| 0
|
def __magic_name__ ( lowercase = 10**9 ):
SCREAMING_SNAKE_CASE_: Dict =1
SCREAMING_SNAKE_CASE_: Union[str, Any] =2
SCREAMING_SNAKE_CASE_: Optional[Any] =0
SCREAMING_SNAKE_CASE_: List[Any] =0
SCREAMING_SNAKE_CASE_: Optional[Any] =0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
SCREAMING_SNAKE_CASE_: int =2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""")
| 704
|
"""simple docstring"""
from __future__ import annotations
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =sorted(numsa + numsa )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =divmod(len(lowercase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase = [float(x) for x in input("""Enter the elements of first array: """).split()]
_UpperCAmelCase = [float(x) for x in input("""Enter the elements of second array: """).split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 36
| 0
|
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
_UpperCAmelCase = True
except (ImportError, ModuleNotFoundError):
_UpperCAmelCase = False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def __magic_name__ ( lowercase ):
re.sub("""<n>""" , """""" , _lowerCamelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_lowerCamelCase ) )
| 705
|
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a :
def __init__( self : Any , lowerCAmelCase : Any , lowerCAmelCase : List[str]=13 , lowerCAmelCase : Dict=3 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Dict=True , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : str=0.1 , lowerCAmelCase : List[str]=224 , lowerCAmelCase : List[str]=1000 , lowerCAmelCase : Optional[Any]=[3, 3, 6, 4] , lowerCAmelCase : int=[48, 56, 112, 220] , ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =parent
SCREAMING_SNAKE_CASE_: Any =batch_size
SCREAMING_SNAKE_CASE_: Tuple =num_channels
SCREAMING_SNAKE_CASE_: Union[str, Any] =is_training
SCREAMING_SNAKE_CASE_: Tuple =use_labels
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Tuple =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: List[Any] =num_labels
SCREAMING_SNAKE_CASE_: int =image_size
SCREAMING_SNAKE_CASE_: Optional[Any] =layer_depths
SCREAMING_SNAKE_CASE_: List[Any] =embed_dims
def lowerCamelCase__ ( self : List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_: List[Any] =None
if self.use_labels:
SCREAMING_SNAKE_CASE_: List[str] =ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE_: Tuple =self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowerCAmelCase , layer_scale_init_value=1E-5 , )
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =SwiftFormerModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Any =model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =self.num_labels
SCREAMING_SNAKE_CASE_: Dict =SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Dict =model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
SCREAMING_SNAKE_CASE_: int =SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Tuple =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_: Dict =model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)): str =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_: Tuple ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : Optional[int] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
UpperCamelCase : Tuple = (
{'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase : Any = False
UpperCamelCase : Optional[int] = False
UpperCamelCase : Optional[Any] = False
UpperCamelCase : Dict = False
UpperCamelCase : List[str] = False
def lowerCamelCase__ ( self : Dict ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =SwiftFormerModelTester(self )
SCREAMING_SNAKE_CASE_: Union[str, Any] =ConfigTester(
self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def lowerCamelCase__ ( self : Tuple ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def lowerCamelCase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Any =model_class(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase , nn.Linear ) )
def lowerCamelCase__ ( self : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: int =model_class(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_: Any =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_: Tuple =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def lowerCamelCase__ ( self : int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def lowerCamelCase__ ( self : Any ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@slow
def lowerCamelCase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_: Optional[Any] =SwiftFormerModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def lowerCamelCase__ ( self : Optional[int] ) -> str:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
def check_hidden_states_output(lowerCAmelCase : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : Optional[int] ):
SCREAMING_SNAKE_CASE_: Optional[Any] =model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_: List[str] =model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: Dict =outputs.hidden_states
SCREAMING_SNAKE_CASE_: List[Any] =8
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowerCAmelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Dict =True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_: Any =True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
def _config_zero_init(lowerCAmelCase : str ):
SCREAMING_SNAKE_CASE_: Dict =copy.deepcopy(lowerCAmelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowerCAmelCase , lowerCAmelCase , 1E-10 )
if isinstance(getattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: Tuple =_config_zero_init(getattr(lowerCAmelCase , lowerCAmelCase ) )
setattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return configs_no_init
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_: List[Any] =_config_zero_init(lowerCAmelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Any =model_class(config=lowerCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
pass
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: List[Any] =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self : str ) -> str:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =self.default_image_processor
SCREAMING_SNAKE_CASE_: int =prepare_img()
SCREAMING_SNAKE_CASE_: Union[str, Any] =image_processor(images=lowerCAmelCase , return_tensors="""pt""" ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Dict =model(**lowerCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE_: Optional[Any] =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =torch.tensor([[-2.1_703E00, 2.1_107E00, -2.0_811E00]] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
| 36
| 0
|
"""simple docstring"""
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
class a :
UpperCamelCase : List[Any] = 4_2
UpperCamelCase : List[str] = None
@staticmethod
def lowerCamelCase__ ( ) -> Optional[int]:
'''simple docstring'''
raise NotImplementedError
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : str , **lowerCAmelCase : Any ) -> List[Any]:
'''simple docstring'''
raise NotImplementedError
def lowerCamelCase__ ( self : int , lowerCAmelCase : Tuple ) -> Tuple:
'''simple docstring'''
raise NotImplementedError
def lowerCamelCase__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
if not self.is_available():
raise RuntimeError(
f'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' )
@classmethod
def lowerCamelCase__ ( cls : Any ) -> Union[str, Any]:
'''simple docstring'''
return f'''`pip install {cls.pip_package or cls.name}`'''
class a ( UpperCAmelCase__ ):
UpperCamelCase : Union[str, Any] = 'optuna'
@staticmethod
def lowerCamelCase__ ( ) -> Optional[Any]:
'''simple docstring'''
return is_optuna_available()
def lowerCamelCase__ ( self : Any , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : str , **lowerCAmelCase : Tuple ) -> Optional[Any]:
'''simple docstring'''
return run_hp_search_optuna(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return default_hp_space_optuna(__UpperCamelCase )
class a ( UpperCAmelCase__ ):
UpperCamelCase : str = 'ray'
UpperCamelCase : Optional[Any] = '\'ray[tune]\''
@staticmethod
def lowerCamelCase__ ( ) -> int:
'''simple docstring'''
return is_ray_available()
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : Tuple , lowerCAmelCase : int , lowerCAmelCase : str , **lowerCAmelCase : Any ) -> Optional[Any]:
'''simple docstring'''
return run_hp_search_ray(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : Optional[int] ) -> List[Any]:
'''simple docstring'''
return default_hp_space_ray(__UpperCamelCase )
class a ( UpperCAmelCase__ ):
UpperCamelCase : Dict = 'sigopt'
@staticmethod
def lowerCamelCase__ ( ) -> int:
'''simple docstring'''
return is_sigopt_available()
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : str , **lowerCAmelCase : Tuple ) -> Any:
'''simple docstring'''
return run_hp_search_sigopt(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return default_hp_space_sigopt(__UpperCamelCase )
class a ( UpperCAmelCase__ ):
UpperCamelCase : Union[str, Any] = 'wandb'
@staticmethod
def lowerCamelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
return is_wandb_available()
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : int , lowerCAmelCase : str , **lowerCAmelCase : List[Any] ) -> List[Any]:
'''simple docstring'''
return run_hp_search_wandb(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : Any ) -> Union[str, Any]:
'''simple docstring'''
return default_hp_space_wandb(__UpperCamelCase )
_UpperCAmelCase = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =[backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(_SCREAMING_SNAKE_CASE ) > 0:
SCREAMING_SNAKE_CASE_: List[Any] =available_backends[0].name
if len(_SCREAMING_SNAKE_CASE ) > 1:
logger.info(
f'''{len(_SCREAMING_SNAKE_CASE )} hyperparameter search backends available. Using {name} as the default.''' )
return name
raise RuntimeError(
"""No hyperparameter search backend available.\n"""
+ """\n""".join(
f''' - To install {backend.name} run {backend.pip_install()}'''
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 706
|
"""simple docstring"""
from math import pi
def __magic_name__ ( lowercase , lowercase ):
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(9_0, 1_0))
| 36
| 0
|
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
SCREAMING_SNAKE_CASE_: List[Any] =FlaxDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-pipe""" , safety_checker=lowerCAmelCase , cache_dir=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =[t[-1] for t in os.walk(os.path.join(lowerCAmelCase , os.listdir(lowerCAmelCase )[0] , """snapshots""" ) )]
SCREAMING_SNAKE_CASE_: Union[str, Any] =[item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(""".bin""" ) for f in files )
@slow
@require_flax
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =FlaxStableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-pipe""" , safety_checker=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =(
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
SCREAMING_SNAKE_CASE_: Union[str, Any] =jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_: str =4
SCREAMING_SNAKE_CASE_: Any =jax.device_count()
SCREAMING_SNAKE_CASE_: List[str] =num_samples * [prompt]
SCREAMING_SNAKE_CASE_: Union[str, Any] =pipeline.prepare_inputs(lowerCAmelCase )
# shard inputs and rng
SCREAMING_SNAKE_CASE_: int =replicate(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =jax.random.split(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =shard(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =pipeline(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , jit=lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_5_1_4_7_4_5 ) < 1E-3
assert np.abs(np.abs(lowerCAmelCase , dtype=np.floataa ).sum() - 4_9947.875 ) < 5E-1
SCREAMING_SNAKE_CASE_: Union[str, Any] =pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(lowerCAmelCase ) == num_samples
def lowerCamelCase__ ( self : List[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""flax""" , safety_checker=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =(
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
SCREAMING_SNAKE_CASE_: Dict =jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_: List[str] =50
SCREAMING_SNAKE_CASE_: str =jax.device_count()
SCREAMING_SNAKE_CASE_: str =num_samples * [prompt]
SCREAMING_SNAKE_CASE_: List[str] =pipeline.prepare_inputs(lowerCAmelCase )
# shard inputs and rng
SCREAMING_SNAKE_CASE_: Tuple =replicate(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =jax.random.split(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =shard(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =pipeline(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , jit=lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_5_6_5_2_4_0_1) ) < 1E-3
assert np.abs((np.abs(lowerCAmelCase , dtype=np.floataa ).sum() - 238_3808.2) ) < 5E-1
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =(
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
SCREAMING_SNAKE_CASE_: int =jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_: List[Any] =50
SCREAMING_SNAKE_CASE_: Union[str, Any] =jax.device_count()
SCREAMING_SNAKE_CASE_: Tuple =num_samples * [prompt]
SCREAMING_SNAKE_CASE_: Optional[Any] =pipeline.prepare_inputs(lowerCAmelCase )
# shard inputs and rng
SCREAMING_SNAKE_CASE_: Dict =replicate(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =jax.random.split(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =shard(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =pipeline(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , jit=lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1E-3
assert np.abs((np.abs(lowerCAmelCase , dtype=np.floataa ).sum() - 237_3516.75) ) < 5E-1
def lowerCamelCase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE_: str =(
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
SCREAMING_SNAKE_CASE_: Union[str, Any] =jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =50
SCREAMING_SNAKE_CASE_: List[str] =jax.device_count()
SCREAMING_SNAKE_CASE_: List[Any] =num_samples * [prompt]
SCREAMING_SNAKE_CASE_: int =pipeline.prepare_inputs(lowerCAmelCase )
# shard inputs and rng
SCREAMING_SNAKE_CASE_: Dict =replicate(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =jax.random.split(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =shard(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =pipeline(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , jit=lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1E-3
assert np.abs((np.abs(lowerCAmelCase , dtype=np.floataa ).sum() - 237_3516.75) ) < 5E-1
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =FlaxDDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , set_alpha_to_one=lowerCAmelCase , steps_offset=1 , )
SCREAMING_SNAKE_CASE_: Optional[int] =FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , scheduler=lowerCAmelCase , safety_checker=lowerCAmelCase , )
SCREAMING_SNAKE_CASE_: str =scheduler.create_state()
SCREAMING_SNAKE_CASE_: Union[str, Any] =scheduler_state
SCREAMING_SNAKE_CASE_: List[Any] =(
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
SCREAMING_SNAKE_CASE_: List[Any] =jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_: Tuple =50
SCREAMING_SNAKE_CASE_: Optional[int] =jax.device_count()
SCREAMING_SNAKE_CASE_: Optional[Any] =num_samples * [prompt]
SCREAMING_SNAKE_CASE_: List[Any] =pipeline.prepare_inputs(lowerCAmelCase )
# shard inputs and rng
SCREAMING_SNAKE_CASE_: Dict =replicate(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =jax.random.split(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =shard(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =pipeline(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , jit=lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_5_0_4_3_9_4_5) ) < 1E-3
assert np.abs((np.abs(lowerCAmelCase , dtype=np.floataa ).sum() - 234_7693.5) ) < 5E-1
def lowerCamelCase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =(
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
SCREAMING_SNAKE_CASE_: Tuple =jax.device_count()
SCREAMING_SNAKE_CASE_: Optional[int] =num_samples * [prompt]
SCREAMING_SNAKE_CASE_: List[str] =jax.random.split(jax.random.PRNGKey(0 ) , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=lowerCAmelCase , )
SCREAMING_SNAKE_CASE_: Union[str, Any] =replicate(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =pipeline.prepare_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =shard(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =pipeline(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , jit=lowerCAmelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
SCREAMING_SNAKE_CASE_: List[str] =images[2, 0, 256, 10:17, 1]
# With memory efficient attention
SCREAMING_SNAKE_CASE_: List[Any] =FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=lowerCAmelCase , use_memory_efficient_attention=lowerCAmelCase , )
SCREAMING_SNAKE_CASE_: int =replicate(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =pipeline.prepare_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =shard(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =pipeline(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , jit=lowerCAmelCase ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
SCREAMING_SNAKE_CASE_: Union[str, Any] =images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 707
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : Dict ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict =FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE_: Any =jax.device_count()
SCREAMING_SNAKE_CASE_: Dict =num_samples * [prompt]
SCREAMING_SNAKE_CASE_: List[Any] =sd_pipe.prepare_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =replicate(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =shard(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =jax.random.split(lowerCAmelCase , jax.device_count() )
SCREAMING_SNAKE_CASE_: Tuple =sd_pipe(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , num_inference_steps=25 , jit=lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
SCREAMING_SNAKE_CASE_: Dict =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE_: Dict =images[0, 253:256, 253:256, -1]
SCREAMING_SNAKE_CASE_: Optional[int] =jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE_: Optional[int] =jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int ="""stabilityai/stable-diffusion-2"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =FlaxDPMSolverMultistepScheduler.from_pretrained(lowerCAmelCase , subfolder="""scheduler""" )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =FlaxStableDiffusionPipeline.from_pretrained(
lowerCAmelCase , scheduler=lowerCAmelCase , revision="""bf16""" , dtype=jnp.bfloataa , )
SCREAMING_SNAKE_CASE_: Optional[int] =scheduler_params
SCREAMING_SNAKE_CASE_: Tuple ="""A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE_: Union[str, Any] =jax.device_count()
SCREAMING_SNAKE_CASE_: Optional[Any] =num_samples * [prompt]
SCREAMING_SNAKE_CASE_: List[Any] =sd_pipe.prepare_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =replicate(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =shard(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_: Any =jax.random.split(lowerCAmelCase , jax.device_count() )
SCREAMING_SNAKE_CASE_: Tuple =sd_pipe(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , num_inference_steps=25 , jit=lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
SCREAMING_SNAKE_CASE_: str =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE_: Any =images[0, 253:256, 253:256, -1]
SCREAMING_SNAKE_CASE_: Optional[Any] =jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE_: Optional[int] =jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 36
| 0
|
"""simple docstring"""
from math import sqrt
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any =0
for i in range(1 , int(sqrt(lowerCAmelCase__ ) + 1 ) ):
if n % i == 0 and i != sqrt(lowerCAmelCase__ ):
total += i + n // i
elif i == sqrt(lowerCAmelCase__ ):
total += i
return total - n
def __magic_name__ ( lowercase = 1_0000 ):
SCREAMING_SNAKE_CASE_: List[str] =sum(
i
for i in range(1 , lowerCAmelCase__ )
if sum_of_divisors(sum_of_divisors(lowerCAmelCase__ ) ) == i and sum_of_divisors(lowerCAmelCase__ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 708
|
"""simple docstring"""
def __magic_name__ ( lowercase = 200_0000 ):
SCREAMING_SNAKE_CASE_: List[Any] =[0 for i in range(n + 1 )]
SCREAMING_SNAKE_CASE_: Union[str, Any] =1
SCREAMING_SNAKE_CASE_: Optional[Any] =1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =1
SCREAMING_SNAKE_CASE_: Dict =0
for i in range(lowercase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f"""{solution() = }""")
| 36
| 0
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a :
def __init__( self : Tuple , lowerCAmelCase : List[Any] , lowerCAmelCase : Dict=3 , lowerCAmelCase : Any=32 , lowerCAmelCase : List[str]=3 , lowerCAmelCase : str=10 , lowerCAmelCase : Optional[Any]=[10, 20, 30, 40] , lowerCAmelCase : Union[str, Any]=[1, 1, 2, 1] , lowerCAmelCase : Tuple=True , lowerCAmelCase : List[Any]=True , lowerCAmelCase : int="relu" , lowerCAmelCase : Tuple=3 , lowerCAmelCase : Any=None , ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =parent
SCREAMING_SNAKE_CASE_: Tuple =batch_size
SCREAMING_SNAKE_CASE_: Dict =image_size
SCREAMING_SNAKE_CASE_: int =num_channels
SCREAMING_SNAKE_CASE_: Union[str, Any] =embeddings_size
SCREAMING_SNAKE_CASE_: Union[str, Any] =hidden_sizes
SCREAMING_SNAKE_CASE_: int =depths
SCREAMING_SNAKE_CASE_: Dict =is_training
SCREAMING_SNAKE_CASE_: Optional[Any] =use_labels
SCREAMING_SNAKE_CASE_: Dict =hidden_act
SCREAMING_SNAKE_CASE_: str =num_labels
SCREAMING_SNAKE_CASE_: Any =scope
SCREAMING_SNAKE_CASE_: Tuple =len(lowerCamelCase_ )
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_: Any =None
if self.use_labels:
SCREAMING_SNAKE_CASE_: Optional[int] =ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE_: Optional[int] =self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =TFResNetModel(config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_: Dict =model(lowerCamelCase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =self.num_labels
SCREAMING_SNAKE_CASE_: Union[str, Any] =TFResNetForImageClassification(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_: Optional[int] =model(lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] =config_and_inputs
SCREAMING_SNAKE_CASE_: Dict ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class a ( a__ , a__ , unittest.TestCase ):
UpperCamelCase : Union[str, Any] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
UpperCamelCase : Any = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
UpperCamelCase : int = False
UpperCamelCase : List[str] = False
UpperCamelCase : str = False
UpperCamelCase : Tuple = False
UpperCamelCase : Union[str, Any] = False
def lowerCamelCase__ ( self : List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =TFResNetModelTester(self )
SCREAMING_SNAKE_CASE_: str =ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ )
def lowerCamelCase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase__ ( self : List[Any] ) -> Any:
'''simple docstring'''
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def lowerCamelCase__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def lowerCamelCase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Any =model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_: int =inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_: Optional[Any] =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_: Union[str, Any] =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def lowerCamelCase__ ( self : List[str] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCamelCase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
def check_hidden_states_output(lowerCAmelCase : Optional[Any] , lowerCAmelCase : int , lowerCAmelCase : List[str] ):
SCREAMING_SNAKE_CASE_: Optional[Any] =model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_: str =model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE_: List[str] =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE_: str =self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase_ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_: int =["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
SCREAMING_SNAKE_CASE_: Tuple =layer_type
SCREAMING_SNAKE_CASE_: Tuple =True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_: List[str] =True
check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
@slow
def lowerCamelCase__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_: Union[str, Any] =TFResNetModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: int =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class a ( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowerCamelCase__ ( self : Any ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
SCREAMING_SNAKE_CASE_: Optional[int] =self.default_image_processor
SCREAMING_SNAKE_CASE_: Tuple =prepare_img()
SCREAMING_SNAKE_CASE_: str =image_processor(images=lowerCamelCase_ , return_tensors="""tf""" )
# forward pass
SCREAMING_SNAKE_CASE_: List[str] =model(**lowerCamelCase_ )
# verify the logits
SCREAMING_SNAKE_CASE_: Optional[Any] =tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_: int =tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowerCamelCase_ , atol=1E-4 ) )
| 709
|
"""simple docstring"""
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""bert""", choices=["""bert"""])
parser.add_argument("""--model_name""", default="""bert-base-uncased""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_bert-base-uncased_0247911.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
_UpperCAmelCase = parser.parse_args()
if args.model_type == "bert":
_UpperCAmelCase = BertForMaskedLM.from_pretrained(args.model_name)
_UpperCAmelCase = """bert"""
else:
raise ValueError("""args.model_type should be \"bert\".""")
_UpperCAmelCase = model.state_dict()
_UpperCAmelCase = {}
for w in ["word_embeddings", "position_embeddings"]:
_UpperCAmelCase = state_dict[f"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""]
_UpperCAmelCase = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
_UpperCAmelCase = state_dict["""cls.predictions.decoder.weight"""]
_UpperCAmelCase = state_dict["""cls.predictions.bias"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[f"""cls.predictions.transform.dense.{w}"""]
_UpperCAmelCase = state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 36
| 0
|
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
_UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(lowercase_ )
class a ( lowercase_ ):
def __init__( self : Dict , **lowerCAmelCase : List[Any] ) -> int:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , """vision""" )
self.check_model_type(UpperCamelCase__ )
def __call__( self : Optional[int] , lowerCAmelCase : Union[str, "Image.Image", List[Dict[str, Any]]] , lowerCAmelCase : Union[str, List[str]] = None , **lowerCAmelCase : str , ) -> List[Any]:
'''simple docstring'''
if "text_queries" in kwargs:
SCREAMING_SNAKE_CASE_: List[str] =kwargs.pop("""text_queries""" )
if isinstance(UpperCamelCase__ , (str, Image.Image) ):
SCREAMING_SNAKE_CASE_: List[Any] ={"""image""": image, """candidate_labels""": candidate_labels}
else:
SCREAMING_SNAKE_CASE_: Any =image
SCREAMING_SNAKE_CASE_: List[Any] =super().__call__(UpperCamelCase__ , **UpperCamelCase__ )
return results
def lowerCamelCase__ ( self : Tuple , **lowerCAmelCase : str ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any ={}
if "threshold" in kwargs:
SCREAMING_SNAKE_CASE_: List[Any] =kwargs["""threshold"""]
if "top_k" in kwargs:
SCREAMING_SNAKE_CASE_: Dict =kwargs["""top_k"""]
return {}, {}, postprocess_params
def lowerCamelCase__ ( self : int , lowerCAmelCase : int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =load_image(inputs["""image"""] )
SCREAMING_SNAKE_CASE_: int =inputs["""candidate_labels"""]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_: List[Any] =candidate_labels.split(""",""" )
SCREAMING_SNAKE_CASE_: Dict =torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_: Optional[Any] =self.tokenizer(UpperCamelCase__ , return_tensors=self.framework )
SCREAMING_SNAKE_CASE_: Optional[Any] =self.image_processor(UpperCamelCase__ , return_tensors=self.framework )
yield {
"is_last": i == len(UpperCamelCase__ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =model_inputs.pop("""target_size""" )
SCREAMING_SNAKE_CASE_: Optional[Any] =model_inputs.pop("""candidate_label""" )
SCREAMING_SNAKE_CASE_: List[Any] =model_inputs.pop("""is_last""" )
SCREAMING_SNAKE_CASE_: List[str] =self.model(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE_: Dict ={"""target_size""": target_size, """candidate_label""": candidate_label, """is_last""": is_last, **outputs}
return model_outputs
def lowerCamelCase__ ( self : str , lowerCAmelCase : Any , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : Optional[Any]=None ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =[]
for model_output in model_outputs:
SCREAMING_SNAKE_CASE_: str =model_output["""candidate_label"""]
SCREAMING_SNAKE_CASE_: Optional[Any] =BaseModelOutput(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_: Optional[int] =self.image_processor.post_process_object_detection(
outputs=UpperCamelCase__ , threshold=UpperCamelCase__ , target_sizes=model_output["""target_size"""] )[0]
for index in outputs["scores"].nonzero():
SCREAMING_SNAKE_CASE_: str =outputs["""scores"""][index].item()
SCREAMING_SNAKE_CASE_: str =self._get_bounding_box(outputs["""boxes"""][index][0] )
SCREAMING_SNAKE_CASE_: Optional[Any] ={"""score""": score, """label""": label, """box""": box}
results.append(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_: Union[str, Any] =sorted(UpperCamelCase__ , key=lambda lowerCAmelCase : x["score"] , reverse=UpperCamelCase__ )
if top_k:
SCREAMING_SNAKE_CASE_: Dict =results[:top_k]
return results
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : "torch.Tensor" ) -> List[str]:
'''simple docstring'''
if self.framework != "pt":
raise ValueError("""The ZeroShotObjectDetectionPipeline is only available in PyTorch.""" )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] =box.int().tolist()
SCREAMING_SNAKE_CASE_: Union[str, Any] ={
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 710
|
"""simple docstring"""
def __magic_name__ ( lowercase , lowercase ):
return int((input_a, input_a).count(0 ) == 0 )
def __magic_name__ ( ):
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 36
| 0
|
"""simple docstring"""
A = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A = {
0: "Sunday",
1: "Monday",
2: "Tuesday",
3: "Wednesday",
4: "Thursday",
5: "Friday",
6: "Saturday",
}
def __magic_name__ ( lowercase , lowercase , lowercase ):
assert len(str(__lowerCAmelCase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
SCREAMING_SNAKE_CASE_: str =year // 100
SCREAMING_SNAKE_CASE_: Dict =(5 * (century % 4) + 2) % 7
SCREAMING_SNAKE_CASE_: Any =year % 100
SCREAMING_SNAKE_CASE_: Any =centurian % 12
SCREAMING_SNAKE_CASE_: Optional[Any] =(
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
SCREAMING_SNAKE_CASE_: List[Any] =(
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
SCREAMING_SNAKE_CASE_: List[str] =(dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711
|
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger("""transformers.models.speecht5""")
def __magic_name__ ( lowercase , lowercase , lowercase ):
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE_: Tuple =checkpoint["""input_conv.weight_g"""]
SCREAMING_SNAKE_CASE_: Optional[int] =checkpoint["""input_conv.weight_v"""]
SCREAMING_SNAKE_CASE_: Optional[int] =checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''upsamples.{i}.1.weight_g''']
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''upsamples.{i}.1.weight_v''']
SCREAMING_SNAKE_CASE_: List[str] =checkpoint[f'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE_: Dict =checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_g''']
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_v''']
SCREAMING_SNAKE_CASE_: Union[str, Any] =checkpoint[f'''blocks.{i}.convs1.{j}.1.bias''']
SCREAMING_SNAKE_CASE_: Dict =checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_g''']
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_v''']
SCREAMING_SNAKE_CASE_: List[Any] =checkpoint[f'''blocks.{i}.convs2.{j}.1.bias''']
SCREAMING_SNAKE_CASE_: Tuple =checkpoint["""output_conv.1.weight_g"""]
SCREAMING_SNAKE_CASE_: List[str] =checkpoint["""output_conv.1.weight_v"""]
SCREAMING_SNAKE_CASE_: Optional[int] =checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase=None , lowercase=None , ):
if config_path is not None:
SCREAMING_SNAKE_CASE_: List[Any] =SpeechTaHifiGanConfig.from_pretrained(lowercase )
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE_: Union[str, Any] =SpeechTaHifiGan(lowercase )
SCREAMING_SNAKE_CASE_: Any =torch.load(lowercase )
load_weights(orig_checkpoint["""model"""]["""generator"""] , lowercase , lowercase )
SCREAMING_SNAKE_CASE_: List[Any] =np.load(lowercase )
SCREAMING_SNAKE_CASE_: Any =stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE_: str =stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE_: Dict =torch.from_numpy(lowercase ).float()
SCREAMING_SNAKE_CASE_: Dict =torch.from_numpy(lowercase ).float()
model.save_pretrained(lowercase )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
_UpperCAmelCase = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 36
| 0
|
"""simple docstring"""
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
_UpperCAmelCase = logging.get_logger(__name__)
class a ( _UpperCamelCase ):
def __init__( self : Any , **lowerCAmelCase : Optional[Any] ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ["""bs4"""] )
super().__init__(**_UpperCAmelCase )
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =[]
SCREAMING_SNAKE_CASE_: Dict =[]
SCREAMING_SNAKE_CASE_: int =element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
SCREAMING_SNAKE_CASE_: Dict =parent.find_all(child.name , recursive=_UpperCAmelCase )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(_UpperCAmelCase ) else next(i for i, s in enumerate(_UpperCAmelCase , 1 ) if s is child ) )
SCREAMING_SNAKE_CASE_: Any =parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : Tuple ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =BeautifulSoup(_UpperCAmelCase , """html.parser""" )
SCREAMING_SNAKE_CASE_: Any =[]
SCREAMING_SNAKE_CASE_: Dict =[]
SCREAMING_SNAKE_CASE_: Optional[int] =[]
for element in html_code.descendants:
if type(_UpperCAmelCase ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
SCREAMING_SNAKE_CASE_: Dict =html.unescape(_UpperCAmelCase ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =self.xpath_soup(_UpperCAmelCase )
stringaxtag_seq.append(_UpperCAmelCase )
stringaxsubs_seq.append(_UpperCAmelCase )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError("""Number of doc strings and xtags does not correspond""" )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError("""Number of doc strings and xsubs does not correspond""" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =''''''
for tagname, subs in zip(_UpperCAmelCase , _UpperCAmelCase ):
xpath += f'''/{tagname}'''
if subs != 0:
xpath += f'''[{subs}]'''
return xpath
def __call__( self : str , lowerCAmelCase : Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =False
# Check that strings has a valid type
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: str =True
elif isinstance(_UpperCAmelCase , (list, tuple) ):
if len(_UpperCAmelCase ) == 0 or isinstance(html_strings[0] , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =True
if not valid_strings:
raise ValueError(
"""HTML strings must of type `str`, `List[str]` (batch of examples), """
f'''but is of type {type(_UpperCAmelCase )}.''' )
SCREAMING_SNAKE_CASE_: List[str] =bool(isinstance(_UpperCAmelCase , (list, tuple) ) and (isinstance(html_strings[0] , _UpperCAmelCase )) )
if not is_batched:
SCREAMING_SNAKE_CASE_: List[Any] =[html_strings]
# Get nodes + xpaths
SCREAMING_SNAKE_CASE_: Tuple =[]
SCREAMING_SNAKE_CASE_: int =[]
for html_string in html_strings:
SCREAMING_SNAKE_CASE_: Any =self.get_three_from_single(_UpperCAmelCase )
nodes.append(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =[]
for node, tag_list, sub_list in zip(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[int] =self.construct_xpath(_UpperCAmelCase , _UpperCAmelCase )
xpath_strings.append(_UpperCAmelCase )
xpaths.append(_UpperCAmelCase )
# return as Dict
SCREAMING_SNAKE_CASE_: int ={'''nodes''': nodes, '''xpaths''': xpaths}
SCREAMING_SNAKE_CASE_: Any =BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
return encoded_inputs
| 712
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def __magic_name__ ( lowercase ):
if "cls_token" in name:
SCREAMING_SNAKE_CASE_: Optional[int] =name.replace("""cls_token""" , """vit.embeddings.cls_token""" )
if "mask_token" in name:
SCREAMING_SNAKE_CASE_: Optional[int] =name.replace("""mask_token""" , """decoder.mask_token""" )
if "decoder_pos_embed" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""pos_embed""" , """vit.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""patch_embed.proj""" , """vit.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""patch_embed.norm""" , """vit.embeddings.norm""" )
if "decoder_blocks" in name:
SCREAMING_SNAKE_CASE_: List[Any] =name.replace("""decoder_blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""blocks""" , """vit.encoder.layer""" )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE_: Optional[Any] =name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
SCREAMING_SNAKE_CASE_: Union[str, Any] =name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
SCREAMING_SNAKE_CASE_: Optional[Any] =name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE_: Dict =name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
SCREAMING_SNAKE_CASE_: Tuple =name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
SCREAMING_SNAKE_CASE_: Any =name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""norm.weight""" , """vit.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""norm.bias""" , """vit.layernorm.bias""" )
return name
def __magic_name__ ( lowercase , lowercase ):
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE_: Optional[int] =orig_state_dict.pop(lowercase )
if "qkv" in key:
SCREAMING_SNAKE_CASE_: Dict =key.split(""".""" )
SCREAMING_SNAKE_CASE_: Optional[Any] =int(key_split[1] )
if "decoder_blocks" in key:
SCREAMING_SNAKE_CASE_: int =config.decoder_hidden_size
SCREAMING_SNAKE_CASE_: Optional[int] ="""decoder.decoder_layers."""
if "weight" in key:
SCREAMING_SNAKE_CASE_: Dict =val[:dim, :]
SCREAMING_SNAKE_CASE_: Tuple =val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_: str =val[-dim:, :]
elif "bias" in key:
SCREAMING_SNAKE_CASE_: List[Any] =val[:dim]
SCREAMING_SNAKE_CASE_: Tuple =val[dim : dim * 2]
SCREAMING_SNAKE_CASE_: List[Any] =val[-dim:]
else:
SCREAMING_SNAKE_CASE_: Any =config.hidden_size
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""vit.encoder.layer."""
if "weight" in key:
SCREAMING_SNAKE_CASE_: Optional[Any] =val[:dim, :]
SCREAMING_SNAKE_CASE_: Optional[Any] =val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_: Dict =val[-dim:, :]
elif "bias" in key:
SCREAMING_SNAKE_CASE_: Optional[Any] =val[:dim]
SCREAMING_SNAKE_CASE_: Any =val[dim : dim * 2]
SCREAMING_SNAKE_CASE_: Optional[Any] =val[-dim:]
else:
SCREAMING_SNAKE_CASE_: Tuple =val
return orig_state_dict
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Dict =ViTMAEConfig()
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE_: List[Any] =1024
SCREAMING_SNAKE_CASE_: Dict =4096
SCREAMING_SNAKE_CASE_: Tuple =24
SCREAMING_SNAKE_CASE_: int =16
elif "huge" in checkpoint_url:
SCREAMING_SNAKE_CASE_: Union[str, Any] =14
SCREAMING_SNAKE_CASE_: Any =1280
SCREAMING_SNAKE_CASE_: Dict =5120
SCREAMING_SNAKE_CASE_: Optional[int] =32
SCREAMING_SNAKE_CASE_: Optional[Any] =16
SCREAMING_SNAKE_CASE_: Tuple =ViTMAEForPreTraining(lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =torch.hub.load_state_dict_from_url(lowercase , map_location="""cpu""" )["""model"""]
SCREAMING_SNAKE_CASE_: Optional[Any] =ViTMAEImageProcessor(size=config.image_size )
SCREAMING_SNAKE_CASE_: str =convert_state_dict(lowercase , lowercase )
model.load_state_dict(lowercase )
model.eval()
SCREAMING_SNAKE_CASE_: Tuple ="""https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"""
SCREAMING_SNAKE_CASE_: List[Any] =Image.open(requests.get(lowercase , stream=lowercase ).raw )
SCREAMING_SNAKE_CASE_: int =ViTMAEImageProcessor(size=config.image_size )
SCREAMING_SNAKE_CASE_: int =image_processor(images=lowercase , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE_: Optional[Any] =model(**lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =outputs.logits
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE_: Dict =torch.tensor(
[[-0.7_309, -0.7_128, -1.0_169], [-1.0_161, -0.9_058, -1.1_878], [-1.0_478, -0.9_411, -1.1_911]] )
elif "huge" in checkpoint_url:
SCREAMING_SNAKE_CASE_: Tuple =torch.tensor(
[[-1.1_599, -0.9_199, -1.2_221], [-1.1_952, -0.9_269, -1.2_307], [-1.2_143, -0.9_337, -1.2_262]] )
else:
SCREAMING_SNAKE_CASE_: Any =torch.tensor(
[[-0.9_192, -0.8_481, -1.1_259], [-1.1_349, -1.0_034, -1.2_599], [-1.1_757, -1.0_429, -1.2_726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , lowercase , atol=1e-4 )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_UpperCAmelCase = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 36
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_UpperCAmelCase = {
"""configuration_layoutlmv3""": [
"""LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""LayoutLMv3Config""",
"""LayoutLMv3OnnxConfig""",
],
"""processing_layoutlmv3""": ["""LayoutLMv3Processor"""],
"""tokenization_layoutlmv3""": ["""LayoutLMv3Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""LayoutLMv3TokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv3ForQuestionAnswering""",
"""LayoutLMv3ForSequenceClassification""",
"""LayoutLMv3ForTokenClassification""",
"""LayoutLMv3Model""",
"""LayoutLMv3PreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLayoutLMv3ForQuestionAnswering""",
"""TFLayoutLMv3ForSequenceClassification""",
"""TFLayoutLMv3ForTokenClassification""",
"""TFLayoutLMv3Model""",
"""TFLayoutLMv3PreTrainedModel""",
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""LayoutLMv3FeatureExtractor"""]
_UpperCAmelCase = ["""LayoutLMv3ImageProcessor"""]
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 713
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_UpperCAmelCase = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 36
| 0
|
"""simple docstring"""
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_UpperCAmelCase = 2
class a :
def __init__( self : List[str] , *, # begin keyword-only arguments
lowerCAmelCase : Optional[Any]="<s>" , lowerCAmelCase : Any="<pad>" , lowerCAmelCase : Union[str, Any]="</s>" , lowerCAmelCase : List[Any]="<unk>" , lowerCAmelCase : Tuple=None , ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int =bos, unk, pad, eos
SCREAMING_SNAKE_CASE_: List[Any] =[]
SCREAMING_SNAKE_CASE_: Dict =[]
SCREAMING_SNAKE_CASE_: Union[str, Any] ={}
SCREAMING_SNAKE_CASE_: str =self.add_symbol(__UpperCamelCase )
SCREAMING_SNAKE_CASE_: str =self.add_symbol(__UpperCamelCase )
SCREAMING_SNAKE_CASE_: str =self.add_symbol(__UpperCamelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.add_symbol(__UpperCamelCase )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(__UpperCamelCase )
SCREAMING_SNAKE_CASE_: Any =len(self.symbols )
def __eq__( self : Union[str, Any] , lowerCAmelCase : int ) -> Optional[int]:
'''simple docstring'''
return self.indices == other.indices
def __getitem__( self : List[Any] , lowerCAmelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return len(self.symbols )
def __contains__( self : Dict , lowerCAmelCase : Optional[int] ) -> int:
'''simple docstring'''
return sym in self.indices
@classmethod
def lowerCamelCase__ ( cls : List[str] , lowerCAmelCase : Union[str, Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =cls()
d.add_from_file(__UpperCamelCase )
return d
def lowerCamelCase__ ( self : int , lowerCAmelCase : List[str] , lowerCAmelCase : int=1 , lowerCAmelCase : Union[str, Any]=False ) -> int:
'''simple docstring'''
if word in self.indices and not overwrite:
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.indices[word]
SCREAMING_SNAKE_CASE_: Any =self.count[idx] + n
return idx
else:
SCREAMING_SNAKE_CASE_: Tuple =len(self.symbols )
SCREAMING_SNAKE_CASE_: Dict =idx
self.symbols.append(__UpperCamelCase )
self.count.append(__UpperCamelCase )
return idx
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : Any ) -> Optional[Any]:
'''simple docstring'''
return 0
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : int ) -> Union[str, Any]:
'''simple docstring'''
if isinstance(__UpperCamelCase , __UpperCamelCase ):
try:
with open(__UpperCamelCase , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(__UpperCamelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("""Incorrect encoding detected in {}, please rebuild the dataset""".format(__UpperCamelCase ) )
return
SCREAMING_SNAKE_CASE_: str =f.readlines()
SCREAMING_SNAKE_CASE_: List[Any] =self._load_meta(__UpperCamelCase )
for line in lines[indices_start_line:]:
try:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any =line.rstrip().rsplit(""" """ , 1 )
if field == "#fairseq:overwrite":
SCREAMING_SNAKE_CASE_: Optional[Any] =True
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict =line.rsplit(""" """ , 1 )
else:
SCREAMING_SNAKE_CASE_: Dict =False
SCREAMING_SNAKE_CASE_: List[Any] =int(__UpperCamelCase )
SCREAMING_SNAKE_CASE_: int =line
if word in self and not overwrite:
raise RuntimeError(
"""Duplicate word found when loading Dictionary: '{}'. """
"""Duplicate words can overwrite earlier ones by adding the """
"""#fairseq:overwrite flag at the end of the corresponding row """
"""in the dictionary file. If using the Camembert model, please """
"""download an updated copy of the model file.""".format(__UpperCamelCase ) )
self.add_symbol(__UpperCamelCase , n=__UpperCamelCase , overwrite=__UpperCamelCase )
except ValueError:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt> [flags]'""" )
def __magic_name__ ( lowercase ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
SCREAMING_SNAKE_CASE_: int =dict((re.sub(R"""@@$""" , """""" , UpperCAmelCase__ ), v) if k.endswith("""@@""" ) else (re.sub(R"""$""" , """</w>""" , UpperCAmelCase__ ), v) for k, v in d.items() )
SCREAMING_SNAKE_CASE_: Dict ="""<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[f'''{k}</w>''']
SCREAMING_SNAKE_CASE_: List[Any] =d[k] # restore
return da
def __magic_name__ ( lowercase , lowercase ):
# prep
if not os.path.exists(UpperCAmelCase__ ):
raise ValueError(f'''path {biogpt_checkpoint_path} does not exist!''' )
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
print(f'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
SCREAMING_SNAKE_CASE_: str =os.path.join(UpperCAmelCase__ , """checkpoint.pt""" )
if not os.path.isfile(UpperCAmelCase__ ):
raise ValueError(f'''path to the file {checkpoint_file} does not exist!''' )
SCREAMING_SNAKE_CASE_: Any =torch.load(UpperCAmelCase__ , map_location="""cpu""" )
SCREAMING_SNAKE_CASE_: str =chkpt["""cfg"""]["""model"""]
# dicts
SCREAMING_SNAKE_CASE_: str =os.path.join(UpperCAmelCase__ , """dict.txt""" )
if not os.path.isfile(UpperCAmelCase__ ):
raise ValueError(f'''path to the file {dict_file} does not exist!''' )
SCREAMING_SNAKE_CASE_: Optional[Any] =Dictionary.load(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE_: Union[str, Any] =rewrite_dict_keys(src_dict.indices )
SCREAMING_SNAKE_CASE_: str =len(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE_: Tuple =os.path.join(UpperCAmelCase__ , VOCAB_FILES_NAMES["""vocab_file"""] )
print(f'''Generating {src_vocab_file} of {src_vocab_size} records''' )
with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__ , indent=UpperCAmelCase__ ) )
# merges_file (bpecodes)
SCREAMING_SNAKE_CASE_: Any =os.path.join(UpperCAmelCase__ , """bpecodes""" )
if not os.path.isfile(UpperCAmelCase__ ):
raise ValueError(f'''path to the file {bpecodes_file} does not exist!''' )
SCREAMING_SNAKE_CASE_: str =os.path.join(UpperCAmelCase__ , VOCAB_FILES_NAMES["""merges_file"""] )
shutil.copyfile(UpperCAmelCase__ , UpperCAmelCase__ )
# model config
SCREAMING_SNAKE_CASE_: List[Any] =os.path.join(UpperCAmelCase__ , """config.json""" )
SCREAMING_SNAKE_CASE_: Any ={
"""activation_dropout""": args["""activation_dropout"""],
"""architectures""": ["""BioGptForCausalLM"""],
"""attention_probs_dropout_prob""": args["""attention_dropout"""],
"""bos_token_id""": 0,
"""eos_token_id""": 2,
"""hidden_act""": args["""activation_fn"""],
"""hidden_dropout_prob""": args["""dropout"""],
"""hidden_size""": args["""decoder_embed_dim"""],
"""initializer_range""": 0.02,
"""intermediate_size""": args["""decoder_ffn_embed_dim"""],
"""layer_norm_eps""": 1e-12,
"""layerdrop""": args["""decoder_layerdrop"""],
"""max_position_embeddings""": args["""max_target_positions"""],
"""model_type""": """biogpt""",
"""num_attention_heads""": args["""decoder_attention_heads"""],
"""num_hidden_layers""": args["""decoder_layers"""],
"""pad_token_id""": 1,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_decoder_input_output_embed"""],
"""vocab_size""": src_vocab_size,
}
# good hparam defaults to start with
print(f'''Generating {biogpt_model_config_file}''' )
with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__ , indent=UpperCAmelCase__ ) )
# tokenizer config
SCREAMING_SNAKE_CASE_: Tuple =os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE_: List[str] ={
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
"""model_max_length""": 1024,
"""pad_token""": """<pad>""",
"""special_tokens_map_file""": None,
"""tokenizer_class""": """BioGptTokenizer""",
"""unk_token""": """<unk>""",
}
print(f'''Generating {biogpt_tokenizer_config_file}''' )
with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__ , indent=UpperCAmelCase__ ) )
# model
SCREAMING_SNAKE_CASE_: List[Any] =chkpt["""model"""]
# remove unneeded keys
SCREAMING_SNAKE_CASE_: Dict =[
"""decoder.version""",
]
for k in ignore_keys:
model_state_dict.pop(UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE_: Union[str, Any] =list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("""output_projection.weight""" ):
SCREAMING_SNAKE_CASE_: Tuple =model_state_dict.pop(UpperCAmelCase__ )
else:
SCREAMING_SNAKE_CASE_: Optional[int] =model_state_dict.pop(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE_: List[Any] =BioGptConfig.from_pretrained(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE_: Optional[int] =BioGptForCausalLM(UpperCAmelCase__ )
# check that it loads ok
model_new.load_state_dict(UpperCAmelCase__ )
# save
SCREAMING_SNAKE_CASE_: Any =os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
print(f'''Generating {pytorch_weights_dump_path}''' )
torch.save(UpperCAmelCase__ , UpperCAmelCase__ )
print("""Conversion is done!""" )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--biogpt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_UpperCAmelCase = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 714
|
"""simple docstring"""
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: int =False
while is_sorted is False: # Until all the indices are traversed keep looping
SCREAMING_SNAKE_CASE_: Tuple =True
for i in range(0 , len(lowercase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =input_list[i + 1], input_list[i]
# swapping if elements not in order
SCREAMING_SNAKE_CASE_: Tuple =False
for i in range(1 , len(lowercase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str =input_list[i + 1], input_list[i]
# swapping if elements not in order
SCREAMING_SNAKE_CASE_: str =False
return input_list
if __name__ == "__main__":
print("""Enter list to be sorted""")
_UpperCAmelCase = [int(x) for x in input().split()]
# inputing elements of the list in one line
_UpperCAmelCase = odd_even_sort(input_list)
print("""The sorted list is""")
print(sorted_list)
| 36
| 0
|
"""simple docstring"""
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase ):
if isinstance(_UpperCamelCase , _UpperCamelCase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =np.full((len(_UpperCamelCase ), sequence_length, 2) , _UpperCamelCase )
else:
SCREAMING_SNAKE_CASE_: int =np.full((len(_UpperCamelCase ), sequence_length) , _UpperCamelCase )
for i, tensor in enumerate(_UpperCamelCase ):
if padding_side == "right":
if isinstance(_UpperCamelCase , _UpperCamelCase ):
SCREAMING_SNAKE_CASE_: Dict =tensor[:sequence_length]
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =tensor[:sequence_length]
else:
if isinstance(_UpperCamelCase , _UpperCamelCase ):
SCREAMING_SNAKE_CASE_: List[Any] =tensor[:sequence_length]
else:
SCREAMING_SNAKE_CASE_: str =tensor[:sequence_length]
return out_tensor.tolist()
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Tuple =ord(_UpperCamelCase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
SCREAMING_SNAKE_CASE_: Optional[int] =unicodedata.category(_UpperCamelCase )
if cat.startswith("""P""" ):
return True
return False
@dataclass
class a ( UpperCAmelCase__ ):
UpperCamelCase : PreTrainedTokenizerBase
UpperCamelCase : Union[bool, str, PaddingStrategy] = True
UpperCamelCase : Optional[int] = None
UpperCamelCase : Optional[int] = None
UpperCamelCase : int = -1_0_0
UpperCamelCase : str = "pt"
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : Any ) -> Optional[Any]:
'''simple docstring'''
import torch
SCREAMING_SNAKE_CASE_: Optional[Any] ="""label""" if """label""" in features[0].keys() else """labels"""
SCREAMING_SNAKE_CASE_: Union[str, Any] =[feature[label_name] for feature in features] if label_name in features[0].keys() else None
SCREAMING_SNAKE_CASE_: Optional[int] =self.tokenizer.pad(
lowerCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" if labels is None else None , )
if labels is None:
return batch
SCREAMING_SNAKE_CASE_: int =torch.tensor(batch["""entity_ids"""] ).shape[1]
SCREAMING_SNAKE_CASE_: Optional[int] =self.tokenizer.padding_side
if padding_side == "right":
SCREAMING_SNAKE_CASE_: Tuple =[
list(lowerCAmelCase ) + [self.label_pad_token_id] * (sequence_length - len(lowerCAmelCase )) for label in labels
]
else:
SCREAMING_SNAKE_CASE_: Optional[int] =[
[self.label_pad_token_id] * (sequence_length - len(lowerCAmelCase )) + list(lowerCAmelCase ) for label in labels
]
SCREAMING_SNAKE_CASE_: Optional[Any] =[feature["""ner_tags"""] for feature in features]
SCREAMING_SNAKE_CASE_: Any =padding_tensor(lowerCAmelCase , -1 , lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =[feature["""original_entity_spans"""] for feature in features]
SCREAMING_SNAKE_CASE_: Optional[Any] =padding_tensor(lowerCAmelCase , (-1, -1) , lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] ={k: torch.tensor(lowerCAmelCase , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 715
|
"""simple docstring"""
def __magic_name__ ( lowercase ):
return str(lowercase ) == str(lowercase )[::-1]
def __magic_name__ ( lowercase ):
return int(lowercase ) + int(str(lowercase )[::-1] )
def __magic_name__ ( lowercase = 1_0000 ):
SCREAMING_SNAKE_CASE_: List[str] =[]
for num in range(1 , lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =0
SCREAMING_SNAKE_CASE_: int =num
while iterations < 50:
SCREAMING_SNAKE_CASE_: Optional[Any] =sum_reverse(lowercase )
iterations += 1
if is_palindrome(lowercase ):
break
else:
lychrel_nums.append(lowercase )
return len(lowercase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 36
| 0
|
"""simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class a :
def __init__( self : Dict , lowerCAmelCase : Any , lowerCAmelCase : int=13 , lowerCAmelCase : Dict=7 , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : List[str]=True , lowerCAmelCase : Tuple=True , lowerCAmelCase : Dict=True , lowerCAmelCase : Tuple=99 , lowerCAmelCase : List[str]=64 , lowerCAmelCase : List[Any]=32 , lowerCAmelCase : Tuple=5 , lowerCAmelCase : Any=4 , lowerCAmelCase : Optional[int]=37 , lowerCAmelCase : Dict="gelu" , lowerCAmelCase : Any=0.1 , lowerCAmelCase : Any=0.1 , lowerCAmelCase : str=512 , lowerCAmelCase : int=16 , lowerCAmelCase : List[str]=2 , lowerCAmelCase : List[Any]=0.0_2 , lowerCAmelCase : Union[str, Any]=3 , lowerCAmelCase : List[str]=4 , lowerCAmelCase : Tuple=None , ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =parent
SCREAMING_SNAKE_CASE_: Any =batch_size
SCREAMING_SNAKE_CASE_: str =seq_length
SCREAMING_SNAKE_CASE_: Union[str, Any] =is_training
SCREAMING_SNAKE_CASE_: Any =use_input_mask
SCREAMING_SNAKE_CASE_: Dict =use_token_type_ids
SCREAMING_SNAKE_CASE_: Any =use_labels
SCREAMING_SNAKE_CASE_: List[Any] =vocab_size
SCREAMING_SNAKE_CASE_: int =hidden_size
SCREAMING_SNAKE_CASE_: int =embedding_size
SCREAMING_SNAKE_CASE_: str =num_hidden_layers
SCREAMING_SNAKE_CASE_: Optional[Any] =num_attention_heads
SCREAMING_SNAKE_CASE_: List[Any] =intermediate_size
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_act
SCREAMING_SNAKE_CASE_: List[Any] =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: str =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: Union[str, Any] =max_position_embeddings
SCREAMING_SNAKE_CASE_: Dict =type_vocab_size
SCREAMING_SNAKE_CASE_: List[str] =type_sequence_label_size
SCREAMING_SNAKE_CASE_: List[Any] =initializer_range
SCREAMING_SNAKE_CASE_: Optional[Any] =num_labels
SCREAMING_SNAKE_CASE_: Any =num_choices
SCREAMING_SNAKE_CASE_: List[str] =scope
def lowerCamelCase__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_: Optional[Any] =None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_: int =random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_: Optional[Any] =None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_: Dict =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_: Optional[Any] =None
SCREAMING_SNAKE_CASE_: Union[str, Any] =None
SCREAMING_SNAKE_CASE_: Any =None
if self.use_labels:
SCREAMING_SNAKE_CASE_: Optional[int] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_: Any =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_: List[str] =ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE_: List[str] =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self : List[Any] ) -> int:
'''simple docstring'''
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , )
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =MobileBertModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Any =model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =model(lowerCAmelCase , token_type_ids=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =MobileBertForMaskedLM(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: List[str] =model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : Dict , lowerCAmelCase : Any , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , lowerCAmelCase : List[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =MobileBertForNextSentencePrediction(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: List[Any] =model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : List[str] , lowerCAmelCase : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =MobileBertForPreTraining(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: List[Any] =model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , next_sentence_label=lowerCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =MobileBertForQuestionAnswering(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Union[str, Any] =model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , lowerCAmelCase : List[str] , lowerCAmelCase : Tuple ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =self.num_labels
SCREAMING_SNAKE_CASE_: List[Any] =MobileBertForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: List[Any] =model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Dict , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =self.num_labels
SCREAMING_SNAKE_CASE_: Union[str, Any] =MobileBertForTokenClassification(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Dict =model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Any , lowerCAmelCase : int , lowerCAmelCase : Any , lowerCAmelCase : str , lowerCAmelCase : Any ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =self.num_choices
SCREAMING_SNAKE_CASE_: Union[str, Any] =MobileBertForMultipleChoice(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: str =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE_: Any =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE_: List[str] =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE_: Optional[Any] =model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase__ ( self : str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
): Optional[int] =config_and_inputs
SCREAMING_SNAKE_CASE_: Optional[Any] ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
UpperCamelCase : Union[str, Any] = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase : List[str] = (
{
'feature-extraction': MobileBertModel,
'fill-mask': MobileBertForMaskedLM,
'question-answering': MobileBertForQuestionAnswering,
'text-classification': MobileBertForSequenceClassification,
'token-classification': MobileBertForTokenClassification,
'zero-shot': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase : int = True
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : Any=False ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
if return_labels:
if model_class in get_values(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: Tuple =torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase )
return inputs_dict
def lowerCamelCase__ ( self : int ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =MobileBertModelTester(self )
SCREAMING_SNAKE_CASE_: int =ConfigTester(self , config_class=lowerCAmelCase , hidden_size=37 )
def lowerCamelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCAmelCase )
def lowerCamelCase__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCAmelCase )
def lowerCamelCase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCAmelCase )
def lowerCamelCase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCAmelCase )
def lowerCamelCase__ ( self : int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCAmelCase )
def lowerCamelCase__ ( self : Tuple ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCAmelCase )
def lowerCamelCase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCAmelCase )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCAmelCase )
def __magic_name__ ( lowercase ):
return torch.tensor(
lowercase , dtype=torch.long , device=lowercase , )
_UpperCAmelCase = 1e-3
@require_torch
@require_sentencepiece
@require_tokenizers
class a ( unittest.TestCase ):
@slow
def lowerCamelCase__ ( self : Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =MobileBertModel.from_pretrained("""google/mobilebert-uncased""" ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =_long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE_: List[str] =model(lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE_: Dict =torch.Size((1, 9, 512) )
self.assertEqual(output.shape , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =torch.tensor(
[
[
[-2.4_736_526E07, 8.2_691_656E04, 1.6_521_838E05],
[-5.7_541_704E-01, 3.9_056_022E00, 4.4_011_507E00],
[2.6_047_359E00, 1.5_677_652E00, -1.7_324_188E-01],
]
] , device=lowerCAmelCase , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
SCREAMING_SNAKE_CASE_: List[Any] =torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
SCREAMING_SNAKE_CASE_: int =torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 716
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
_UpperCAmelCase = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""DPTFeatureExtractor"""]
_UpperCAmelCase = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 36
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase = {
"configuration_convbert": ["CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvBertConfig", "ConvBertOnnxConfig"],
"tokenization_convbert": ["ConvBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["ConvBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvBertForMaskedLM",
"ConvBertForMultipleChoice",
"ConvBertForQuestionAnswering",
"ConvBertForSequenceClassification",
"ConvBertForTokenClassification",
"ConvBertLayer",
"ConvBertModel",
"ConvBertPreTrainedModel",
"load_tf_weights_in_convbert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFConvBertForMaskedLM",
"TFConvBertForMultipleChoice",
"TFConvBertForQuestionAnswering",
"TFConvBertForSequenceClassification",
"TFConvBertForTokenClassification",
"TFConvBertLayer",
"TFConvBertModel",
"TFConvBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 717
|
"""simple docstring"""
from __future__ import annotations
import math
import random
from typing import Any
class a :
def __init__( self : str ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: list[Any] =[]
SCREAMING_SNAKE_CASE_: int =0
SCREAMING_SNAKE_CASE_: int =0
def lowerCamelCase__ ( self : Optional[Any] ) -> bool:
'''simple docstring'''
return self.head == self.tail
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
self.data.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =self.tail + 1
def lowerCamelCase__ ( self : int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self.data[self.head]
SCREAMING_SNAKE_CASE_: Optional[int] =self.head + 1
return ret
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
return self.tail - self.head
def lowerCamelCase__ ( self : str ) -> None:
'''simple docstring'''
print(self.data )
print("""**************""" )
print(self.data[self.head : self.tail] )
class a :
def __init__( self : Union[str, Any] , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =data
SCREAMING_SNAKE_CASE_: MyNode | None =None
SCREAMING_SNAKE_CASE_: MyNode | None =None
SCREAMING_SNAKE_CASE_: int =1
def lowerCamelCase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
return self.data
def lowerCamelCase__ ( self : List[Any] ) -> MyNode | None:
'''simple docstring'''
return self.left
def lowerCamelCase__ ( self : Dict ) -> MyNode | None:
'''simple docstring'''
return self.right
def lowerCamelCase__ ( self : Any ) -> int:
'''simple docstring'''
return self.height
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =data
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : MyNode | None ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =node
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : MyNode | None ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =node
def lowerCamelCase__ ( self : int , lowerCAmelCase : int ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =height
def __magic_name__ ( lowercase ):
if node is None:
return 0
return node.get_height()
def __magic_name__ ( lowercase , lowercase ):
if a > b:
return a
return b
def __magic_name__ ( lowercase ):
print("""left rotation node:""" , node.get_data() )
SCREAMING_SNAKE_CASE_: List[Any] =node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(lowercase )
SCREAMING_SNAKE_CASE_: Tuple =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowercase )
SCREAMING_SNAKE_CASE_: int =my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowercase )
return ret
def __magic_name__ ( lowercase ):
print("""right rotation node:""" , node.get_data() )
SCREAMING_SNAKE_CASE_: List[Any] =node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(lowercase )
SCREAMING_SNAKE_CASE_: List[Any] =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowercase )
return ret
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any =node.get_left()
assert left_child is not None
node.set_left(left_rotation(lowercase ) )
return right_rotation(lowercase )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Dict =node.get_right()
assert right_child is not None
node.set_right(right_rotation(lowercase ) )
return left_rotation(lowercase )
def __magic_name__ ( lowercase , lowercase ):
if node is None:
return MyNode(lowercase )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , lowercase ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
SCREAMING_SNAKE_CASE_: Union[str, Any] =node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
SCREAMING_SNAKE_CASE_: Any =right_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: List[Any] =lr_rotation(lowercase )
else:
node.set_right(insert_node(node.get_right() , lowercase ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
SCREAMING_SNAKE_CASE_: Tuple =node.get_right()
assert right_child is not None
if data < right_child.get_data():
SCREAMING_SNAKE_CASE_: Optional[Any] =rl_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: Optional[int] =left_rotation(lowercase )
SCREAMING_SNAKE_CASE_: Tuple =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowercase )
return node
def __magic_name__ ( lowercase ):
while True:
SCREAMING_SNAKE_CASE_: Dict =root.get_right()
if right_child is None:
break
SCREAMING_SNAKE_CASE_: str =right_child
return root.get_data()
def __magic_name__ ( lowercase ):
while True:
SCREAMING_SNAKE_CASE_: str =root.get_left()
if left_child is None:
break
SCREAMING_SNAKE_CASE_: Dict =left_child
return root.get_data()
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: str =root.get_left()
SCREAMING_SNAKE_CASE_: List[Any] =root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
SCREAMING_SNAKE_CASE_: Union[str, Any] =get_left_most(lowercase )
root.set_data(lowercase )
root.set_right(del_node(lowercase , lowercase ) )
elif left_child is not None:
SCREAMING_SNAKE_CASE_: Optional[int] =left_child
elif right_child is not None:
SCREAMING_SNAKE_CASE_: Any =right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("""No such data""" )
return root
else:
root.set_left(del_node(lowercase , lowercase ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(lowercase , lowercase ) )
if get_height(lowercase ) - get_height(lowercase ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
SCREAMING_SNAKE_CASE_: Tuple =left_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =rl_rotation(lowercase )
elif get_height(lowercase ) - get_height(lowercase ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
SCREAMING_SNAKE_CASE_: Optional[Any] =right_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: str =lr_rotation(lowercase )
SCREAMING_SNAKE_CASE_: str =my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(lowercase )
return root
class a :
def __init__( self : int ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: MyNode | None =None
def lowerCamelCase__ ( self : List[Any] ) -> int:
'''simple docstring'''
return get_height(self.root )
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
print("""insert:""" + str(lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: Tuple =insert_node(self.root , lowerCAmelCase )
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
print("""delete:""" + str(lowerCAmelCase ) )
if self.root is None:
print("""Tree is empty!""" )
return
SCREAMING_SNAKE_CASE_: Union[str, Any] =del_node(self.root , lowerCAmelCase )
def __str__( self : List[str] , ) -> str: # a level traversale, gives a more intuitive look on the tree
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =""""""
SCREAMING_SNAKE_CASE_: str =MyQueue()
q.push(self.root )
SCREAMING_SNAKE_CASE_: List[str] =self.get_height()
if layer == 0:
return output
SCREAMING_SNAKE_CASE_: int =0
while not q.is_empty():
SCREAMING_SNAKE_CASE_: int =q.pop()
SCREAMING_SNAKE_CASE_: List[Any] =""" """ * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(lowerCAmelCase )
q.push(lowerCAmelCase )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
SCREAMING_SNAKE_CASE_: List[Any] =cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , lowerCAmelCase ) - 1:
SCREAMING_SNAKE_CASE_: int =layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def __magic_name__ ( ):
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
_UpperCAmelCase = AVLtree()
_UpperCAmelCase = list(range(1_0))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 36
| 0
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_lowerCAmelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""")
@dataclass
class a :
UpperCamelCase : Tuple = field(
default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} )
UpperCamelCase : Union[str, Any] = field(
default=UpperCamelCase_ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
UpperCamelCase : Tuple = field(
default=UpperCamelCase_ , metadata={'help': 'The column name of the images in the files.'} )
UpperCamelCase : List[Any] = field(default=UpperCamelCase_ , metadata={'help': 'A folder containing the training data.'} )
UpperCamelCase : List[str] = field(default=UpperCamelCase_ , metadata={'help': 'A folder containing the validation data.'} )
UpperCamelCase : Union[str, Any] = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
UpperCamelCase : List[Any] = field(
default=UpperCamelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
UpperCamelCase : Optional[int] = field(
default=UpperCamelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def lowerCamelCase__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] ={}
if self.train_dir is not None:
SCREAMING_SNAKE_CASE_: List[str] =self.train_dir
if self.validation_dir is not None:
SCREAMING_SNAKE_CASE_: str =self.validation_dir
SCREAMING_SNAKE_CASE_: Union[str, Any] =data_files if data_files else None
@dataclass
class a :
UpperCamelCase : Optional[int] = field(
default=UpperCamelCase_ , metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} , )
UpperCamelCase : List[str] = field(
default=UpperCamelCase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name_or_path'} )
UpperCamelCase : str = field(
default=UpperCamelCase_ , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
UpperCamelCase : Union[str, Any] = field(
default=UpperCamelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
UpperCamelCase : Optional[Any] = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
UpperCamelCase : List[str] = field(default=UpperCamelCase_ , metadata={'help': 'Name or path of preprocessor config.'} )
UpperCamelCase : Any = field(
default=UpperCamelCase_ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
UpperCamelCase : Optional[int] = field(
default=0.75 , metadata={'help': 'The ratio of the number of masked tokens in the input sequence.'} )
UpperCamelCase : Tuple = field(
default=UpperCamelCase_ , metadata={'help': 'Whether or not to train with normalized pixel values as target.'} )
@dataclass
class a ( UpperCamelCase_ ):
UpperCamelCase : List[Any] = field(
default=1E-3 , metadata={'help': 'Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'} )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =torch.stack([example["""pixel_values"""] for example in examples] )
return {"pixel_values": pixel_values}
def __magic_name__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE_: List[Any] =HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE_: Optional[int] =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE_: Dict =parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mae""" , snake_case_ , snake_case_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_: Optional[Any] =training_args.get_process_log_level()
logger.setLevel(snake_case_ )
transformers.utils.logging.set_verbosity(snake_case_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE_: List[str] =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE_: Tuple =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
SCREAMING_SNAKE_CASE_: str =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
SCREAMING_SNAKE_CASE_: Any =None if '''validation''' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , snake_case_ ) and data_args.train_val_split > 0.0:
SCREAMING_SNAKE_CASE_: List[Any] =ds['''train'''].train_test_split(data_args.train_val_split )
SCREAMING_SNAKE_CASE_: int =split['''train''']
SCREAMING_SNAKE_CASE_: Optional[int] =split['''test''']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE_: Optional[int] ={
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
SCREAMING_SNAKE_CASE_: Dict =ViTMAEConfig.from_pretrained(model_args.config_name , **snake_case_ )
elif model_args.model_name_or_path:
SCREAMING_SNAKE_CASE_: Any =ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **snake_case_ )
else:
SCREAMING_SNAKE_CASE_: Any =ViTMAEConfig()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(f'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(f'''New config: {config}''' )
# adapt config
config.update(
{
"""mask_ratio""": model_args.mask_ratio,
"""norm_pix_loss""": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
SCREAMING_SNAKE_CASE_: List[str] =ViTImageProcessor.from_pretrained(model_args.image_processor_name , **snake_case_ )
elif model_args.model_name_or_path:
SCREAMING_SNAKE_CASE_: Optional[Any] =ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **snake_case_ )
else:
SCREAMING_SNAKE_CASE_: Any =ViTImageProcessor()
# create model
if model_args.model_name_or_path:
SCREAMING_SNAKE_CASE_: int =ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=snake_case_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
SCREAMING_SNAKE_CASE_: Union[str, Any] =ViTMAEForPreTraining(snake_case_ )
if training_args.do_train:
SCREAMING_SNAKE_CASE_: int =ds['''train'''].column_names
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =ds['''validation'''].column_names
if data_args.image_column_name is not None:
SCREAMING_SNAKE_CASE_: Optional[int] =data_args.image_column_name
elif "image" in column_names:
SCREAMING_SNAKE_CASE_: List[Any] ='''image'''
elif "img" in column_names:
SCREAMING_SNAKE_CASE_: Dict ='''img'''
else:
SCREAMING_SNAKE_CASE_: List[str] =column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
SCREAMING_SNAKE_CASE_: Optional[int] =image_processor.size['''shortest_edge''']
else:
SCREAMING_SNAKE_CASE_: Union[str, Any] =(image_processor.size['''height'''], image_processor.size['''width'''])
SCREAMING_SNAKE_CASE_: Union[str, Any] =Compose(
[
Lambda(lambda lowercase : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(snake_case_ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(lowercase ):
SCREAMING_SNAKE_CASE_: Any =[transforms(snake_case_ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE_: Any =ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(snake_case_ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE_: Union[str, Any] =(
ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(snake_case_ )
# Compute absolute learning rate
SCREAMING_SNAKE_CASE_: Dict =(
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
SCREAMING_SNAKE_CASE_: int =training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
SCREAMING_SNAKE_CASE_: Dict =Trainer(
model=snake_case_ , args=snake_case_ , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=snake_case_ , data_collator=snake_case_ , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE_: Optional[Any] =None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE_: List[str] =training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE_: List[str] =last_checkpoint
SCREAMING_SNAKE_CASE_: Any =trainer.train(resume_from_checkpoint=snake_case_ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
SCREAMING_SNAKE_CASE_: Dict =trainer.evaluate()
trainer.log_metrics("""eval""" , snake_case_ )
trainer.save_metrics("""eval""" , snake_case_ )
# Write model card and (optionally) push to hub
SCREAMING_SNAKE_CASE_: int ={
'''tasks''': '''masked-auto-encoding''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''masked-auto-encoding'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case_ )
else:
trainer.create_model_card(**snake_case_ )
def __magic_name__ ( lowercase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 718
|
"""simple docstring"""
import string
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =""""""
for i in sequence:
SCREAMING_SNAKE_CASE_: List[Any] =ord(lowercase )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any =string.ascii_letters
SCREAMING_SNAKE_CASE_: Tuple =string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(lowercase )] if c in letters else c for c in sequence )
def __magic_name__ ( ):
from timeit import timeit
print("""Running performance benchmarks...""" )
SCREAMING_SNAKE_CASE_: int ="""from string import printable ; from __main__ import atbash, atbash_slow"""
print(f'''> atbash_slow(): {timeit("atbash_slow(printable)" , setup=lowercase )} seconds''' )
print(f'''> atbash(): {timeit("atbash(printable)" , setup=lowercase )} seconds''' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(f"""{example} encrypted in atbash: {atbash(example)}""")
benchmark()
| 36
| 0
|
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class a ( lowercase_ , unittest.TestCase ):
UpperCamelCase : List[Any] = TransfoXLTokenizer
UpperCamelCase : List[Any] = False
UpperCamelCase : Union[str, Any] = False
def lowerCamelCase__ ( self : int ) -> Any:
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE_: Optional[int] =[
"""<unk>""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""unwanted""",
"""wa""",
"""un""",
"""running""",
""",""",
"""low""",
"""l""",
]
SCREAMING_SNAKE_CASE_: int =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def lowerCamelCase__ ( self : Any , **lowerCAmelCase : Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : str ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict ="""<unk> UNwanted , running"""
SCREAMING_SNAKE_CASE_: str ="""<unk> unwanted, running"""
return input_text, output_text
def lowerCamelCase__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =tokenizer.tokenize("""<unk> UNwanted , running""" )
self.assertListEqual(lowerCAmelCase , ["""<unk>""", """unwanted""", """,""", """running"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , [0, 4, 8, 7] )
def lowerCamelCase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =TransfoXLTokenizer(lower_case=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
def lowerCamelCase__ ( self : Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =TransfoXLTokenizer(lower_case=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def lowerCamelCase__ ( self : Any ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =TransfoXLTokenizer(lower_case=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] ="""Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?"""
SCREAMING_SNAKE_CASE_: int =[
"""Hello""",
"""(""",
"""bracket""",
""")""",
"""and""",
"""side""",
"""@-@""",
"""scrolled""",
"""[""",
"""and""",
"""]""",
"""Henry""",
"""\'s""",
"""$""",
"""5""",
"""@,@""",
"""000""",
"""with""",
"""3""",
"""@.@""",
"""34""",
"""m""",
""".""",
"""What""",
"""\'s""",
"""up""",
"""!""",
"""?""",
]
self.assertListEqual(tokenizer.tokenize(lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(tokenizer.convert_tokens_to_string(lowerCAmelCase ) , lowerCAmelCase )
def lowerCamelCase__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =self.get_tokenizer()
SCREAMING_SNAKE_CASE_: Union[str, Any] =len(lowerCAmelCase )
tokenizer.add_tokens(["""new1""", """new2"""] )
tokenizer.move_added_token("""new1""" , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(lowerCAmelCase ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode("""new1""" ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , """new1""" )
| 719
|
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class a :
def __init__( self : Union[str, Any] , lowerCAmelCase : List[str]=2 , lowerCAmelCase : int=3 , lowerCAmelCase : Optional[Any]=64 , lowerCAmelCase : Union[str, Any]=None ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =np.random.default_rng(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =length
SCREAMING_SNAKE_CASE_: Union[str, Any] =rng.normal(size=(length,) ).astype(np.floataa )
SCREAMING_SNAKE_CASE_: Tuple =a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : List[Any] ) -> str:
'''simple docstring'''
return self.length
def __getitem__( self : Union[str, Any] , lowerCAmelCase : Any ) -> List[str]:
'''simple docstring'''
return {"x": self.x[i], "y": self.y[i]}
class a ( torch.nn.Module ):
def __init__( self : Optional[int] , lowerCAmelCase : str=0 , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : Optional[int]=False ) -> Tuple:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_: int =torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE_: int =torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE_: Dict =True
def lowerCamelCase__ ( self : str , lowerCAmelCase : Tuple=None ) -> int:
'''simple docstring'''
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
SCREAMING_SNAKE_CASE_: Union[str, Any] =False
return x * self.a[0] + self.b[0]
class a ( torch.nn.Module ):
def __init__( self : Union[str, Any] , lowerCAmelCase : Any=0 , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : List[Any]=False ) -> str:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_: List[str] =torch.nn.Parameter(torch.tensor(lowerCAmelCase ).float() )
SCREAMING_SNAKE_CASE_: Optional[int] =torch.nn.Parameter(torch.tensor(lowerCAmelCase ).float() )
SCREAMING_SNAKE_CASE_: List[Any] =True
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : int=None ) -> Any:
'''simple docstring'''
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
SCREAMING_SNAKE_CASE_: Optional[int] =False
return x * self.a + self.b
def __magic_name__ ( lowercase , lowercase = 16 ):
from datasets import load_dataset
from transformers import AutoTokenizer
SCREAMING_SNAKE_CASE_: Optional[Any] =AutoTokenizer.from_pretrained("""bert-base-cased""" )
SCREAMING_SNAKE_CASE_: Optional[int] ={"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""}
SCREAMING_SNAKE_CASE_: Any =load_dataset("""csv""" , data_files=lowercase )
SCREAMING_SNAKE_CASE_: Any =datasets["""train"""].unique("""label""" )
SCREAMING_SNAKE_CASE_: List[Any] ={v: i for i, v in enumerate(lowercase )}
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_: Dict =tokenizer(
examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase , max_length=lowercase , padding="""max_length""" )
if "label" in examples:
SCREAMING_SNAKE_CASE_: Optional[int] =[label_to_id[l] for l in examples["""label"""]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE_: List[Any] =datasets.map(
lowercase , batched=lowercase , remove_columns=["""sentence1""", """sentence2""", """label"""] , )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(lowercase , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_: Optional[int] =DataLoader(tokenized_datasets["""train"""] , shuffle=lowercase , collate_fn=lowercase , batch_size=2 )
SCREAMING_SNAKE_CASE_: Dict =DataLoader(tokenized_datasets["""validation"""] , shuffle=lowercase , collate_fn=lowercase , batch_size=1 )
return train_dataloader, eval_dataloader
| 36
| 0
|
"""simple docstring"""
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
_UpperCAmelCase = 1.054571817e-34 # unit of ℏ : J * s
_UpperCAmelCase = 3e8 # unit of c : m * s^-1
def __magic_name__ ( lowercase , lowercase , lowercase ):
if (force, area, distance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if force < 0:
raise ValueError("""Magnitude of force can not be negative""" )
if distance < 0:
raise ValueError("""Distance can not be negative""" )
if area < 0:
raise ValueError("""Area can not be negative""" )
if force == 0:
SCREAMING_SNAKE_CASE_: Any =(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
SCREAMING_SNAKE_CASE_: int =(240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
SCREAMING_SNAKE_CASE_: Tuple =(
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("""One and only one argument must be 0""" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720
|
"""simple docstring"""
def __magic_name__ ( lowercase ):
if upper_limit < 0:
raise ValueError("""Limit for the Catalan sequence must be ≥ 0""" )
SCREAMING_SNAKE_CASE_: Tuple =[0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
SCREAMING_SNAKE_CASE_: Any =1
if upper_limit > 0:
SCREAMING_SNAKE_CASE_: List[str] =1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(lowercase ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""")
print("""\n*** Enter -1 at any time to quit ***""")
print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""")
try:
while True:
_UpperCAmelCase = int(input().strip())
if N < 0:
print("""\n********* Goodbye!! ************""")
break
else:
print(f"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print("""Try another upper limit for the sequence: """, end="""""")
except (NameError, ValueError):
print("""\n********* Invalid input, goodbye! ************\n""")
import doctest
doctest.testmod()
| 36
| 0
|
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class a ( snake_case__ ):
UpperCamelCase : Tuple = (IPNDMScheduler,)
UpperCamelCase : str = (('num_inference_steps', 5_0),)
def lowerCamelCase__ ( self : Any , **lowerCAmelCase : Optional[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] ={"""num_train_timesteps""": 1000}
config.update(**_SCREAMING_SNAKE_CASE )
return config
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : Optional[int]=0 , **lowerCAmelCase : Any ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE_: Tuple =kwargs.pop("""num_inference_steps""" , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_: List[Any] =self.dummy_sample
SCREAMING_SNAKE_CASE_: Union[str, Any] =0.1 * sample
SCREAMING_SNAKE_CASE_: Dict =[residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE_: Any =self.get_scheduler_config(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_: List[str] =scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE_: List[str] =dummy_past_residuals[:]
if time_step is None:
SCREAMING_SNAKE_CASE_: Optional[int] =scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_: Union[str, Any] =scheduler_class.from_pretrained(_SCREAMING_SNAKE_CASE )
new_scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE_: List[Any] =dummy_past_residuals[:]
SCREAMING_SNAKE_CASE_: Dict =scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
SCREAMING_SNAKE_CASE_: Union[str, Any] =new_scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
SCREAMING_SNAKE_CASE_: Dict =scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
SCREAMING_SNAKE_CASE_: List[Any] =new_scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : str , lowerCAmelCase : str=0 , **lowerCAmelCase : str ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE_: Optional[Any] =kwargs.pop("""num_inference_steps""" , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_: Tuple =self.dummy_sample
SCREAMING_SNAKE_CASE_: List[Any] =0.1 * sample
SCREAMING_SNAKE_CASE_: int =[residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE_: Optional[int] =self.get_scheduler_config()
SCREAMING_SNAKE_CASE_: Tuple =scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals (must be after setting timesteps)
SCREAMING_SNAKE_CASE_: Optional[int] =dummy_past_residuals[:]
if time_step is None:
SCREAMING_SNAKE_CASE_: Union[str, Any] =scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_: int =scheduler_class.from_pretrained(_SCREAMING_SNAKE_CASE )
# copy over dummy past residuals
new_scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# copy over dummy past residual (must be after setting timesteps)
SCREAMING_SNAKE_CASE_: int =dummy_past_residuals[:]
SCREAMING_SNAKE_CASE_: str =scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
SCREAMING_SNAKE_CASE_: Union[str, Any] =new_scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
SCREAMING_SNAKE_CASE_: Optional[Any] =scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
SCREAMING_SNAKE_CASE_: Union[str, Any] =new_scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase__ ( self : Union[str, Any] , **lowerCAmelCase : str ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_: Dict =self.get_scheduler_config(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_: Tuple =scheduler_class(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_: Any =10
SCREAMING_SNAKE_CASE_: Optional[Any] =self.dummy_model()
SCREAMING_SNAKE_CASE_: Optional[Any] =self.dummy_sample_deter
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_: Optional[int] =scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE_: Tuple =model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_: List[Any] =scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).prev_sample
return sample
def lowerCamelCase__ ( self : str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE_: List[Any] =kwargs.pop("""num_inference_steps""" , _SCREAMING_SNAKE_CASE )
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE_: Tuple =self.get_scheduler_config()
SCREAMING_SNAKE_CASE_: str =scheduler_class(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_: str =self.dummy_sample
SCREAMING_SNAKE_CASE_: Tuple =0.1 * sample
if num_inference_steps is not None and hasattr(_SCREAMING_SNAKE_CASE , """set_timesteps""" ):
scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
elif num_inference_steps is not None and not hasattr(_SCREAMING_SNAKE_CASE , """set_timesteps""" ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
SCREAMING_SNAKE_CASE_: Dict =[residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
SCREAMING_SNAKE_CASE_: Any =dummy_past_residuals[:]
SCREAMING_SNAKE_CASE_: int =scheduler.timesteps[5]
SCREAMING_SNAKE_CASE_: Tuple =scheduler.timesteps[6]
SCREAMING_SNAKE_CASE_: int =scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
SCREAMING_SNAKE_CASE_: Tuple =scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
SCREAMING_SNAKE_CASE_: int =scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
SCREAMING_SNAKE_CASE_: str =scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCamelCase__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE , time_step=_SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=_SCREAMING_SNAKE_CASE , time_step=_SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =self.full_loop()
SCREAMING_SNAKE_CASE_: Optional[int] =torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_mean.item() - 254_0529 ) < 10
| 721
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
_UpperCAmelCase = {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""",
}
class a ( UpperCAmelCase__ ):
UpperCamelCase : Any = 'albert'
def __init__( self : Dict , lowerCAmelCase : List[str]=3_0000 , lowerCAmelCase : List[Any]=128 , lowerCAmelCase : List[str]=4096 , lowerCAmelCase : str=12 , lowerCAmelCase : str=1 , lowerCAmelCase : Tuple=64 , lowerCAmelCase : Dict=1_6384 , lowerCAmelCase : int=1 , lowerCAmelCase : str="gelu_new" , lowerCAmelCase : Dict=0 , lowerCAmelCase : Optional[Any]=0 , lowerCAmelCase : str=512 , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : List[Any]=0.0_2 , lowerCAmelCase : Union[str, Any]=1E-12 , lowerCAmelCase : Tuple=0.1 , lowerCAmelCase : List[Any]="absolute" , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : int=2 , lowerCAmelCase : Optional[int]=3 , **lowerCAmelCase : int , ) -> Tuple:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =vocab_size
SCREAMING_SNAKE_CASE_: Optional[int] =embedding_size
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_size
SCREAMING_SNAKE_CASE_: Tuple =num_hidden_layers
SCREAMING_SNAKE_CASE_: Any =num_hidden_groups
SCREAMING_SNAKE_CASE_: List[Any] =num_attention_heads
SCREAMING_SNAKE_CASE_: List[Any] =inner_group_num
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_act
SCREAMING_SNAKE_CASE_: int =intermediate_size
SCREAMING_SNAKE_CASE_: Any =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Union[str, Any] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: int =max_position_embeddings
SCREAMING_SNAKE_CASE_: Any =type_vocab_size
SCREAMING_SNAKE_CASE_: int =initializer_range
SCREAMING_SNAKE_CASE_: List[Any] =layer_norm_eps
SCREAMING_SNAKE_CASE_: Dict =classifier_dropout_prob
SCREAMING_SNAKE_CASE_: int =position_embedding_type
class a ( UpperCAmelCase__ ):
@property
def lowerCamelCase__ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_: str ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
SCREAMING_SNAKE_CASE_: Dict ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 36
| 0
|
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCAmelCase = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class a ( UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : List[str] = XLMRobertaTokenizer
UpperCamelCase : Any = XLMRobertaTokenizerFast
UpperCamelCase : Any = True
UpperCamelCase : Optional[Any] = True
def lowerCamelCase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE_: Optional[int] =XLMRobertaTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str ="""<pad>"""
SCREAMING_SNAKE_CASE_: Any =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase ) , lowerCAmelCase )
def lowerCamelCase__ ( self : Any ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(lowerCAmelCase ) , 1002 )
def lowerCamelCase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1002 )
def lowerCamelCase__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =XLMRobertaTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
SCREAMING_SNAKE_CASE_: List[Any] =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
SCREAMING_SNAKE_CASE_: Tuple =tokenizer.convert_tokens_to_ids(lowerCAmelCase )
self.assertListEqual(
lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
SCREAMING_SNAKE_CASE_: List[Any] =tokenizer.convert_ids_to_tokens(lowerCAmelCase )
self.assertListEqual(
lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
SCREAMING_SNAKE_CASE_: Optional[int] =(self.rust_tokenizer_class, """hf-internal-testing/tiny-xlm-roberta""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE_: Tuple =self.rust_tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =self.tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_: Tuple =tokenizer_r.save_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =tokenizer_p.save_pretrained(lowerCAmelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
SCREAMING_SNAKE_CASE_: List[Any] =tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(lowerCAmelCase , lowerCAmelCase )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE_: List[str] =tokenizer_r.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =tokenizer_p.from_pretrained(lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase , lowerCAmelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCAmelCase )
# Save tokenizer rust, legacy_format=True
SCREAMING_SNAKE_CASE_: Any =tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_: List[str] =tokenizer_r.save_pretrained(lowerCAmelCase , legacy_format=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =tokenizer_p.save_pretrained(lowerCAmelCase )
# Checks it save with the same files
self.assertSequenceEqual(lowerCAmelCase , lowerCAmelCase )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE_: Dict =tokenizer_r.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =tokenizer_p.from_pretrained(lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase , lowerCAmelCase ) )
shutil.rmtree(lowerCAmelCase )
# Save tokenizer rust, legacy_format=False
SCREAMING_SNAKE_CASE_: Optional[int] =tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_: Optional[int] =tokenizer_r.save_pretrained(lowerCAmelCase , legacy_format=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =tokenizer_p.save_pretrained(lowerCAmelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
SCREAMING_SNAKE_CASE_: Optional[int] =tokenizer_r.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =tokenizer_p.from_pretrained(lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase , lowerCAmelCase ) )
shutil.rmtree(lowerCAmelCase )
@cached_property
def lowerCamelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
return XLMRobertaTokenizer.from_pretrained("""xlm-roberta-base""" )
def lowerCamelCase__ ( self : List[Any] ) -> Any:
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCAmelCase , f.name )
SCREAMING_SNAKE_CASE_: Optional[Any] =XLMRobertaTokenizer(f.name , keep_accents=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =pickle.dumps(lowerCAmelCase )
pickle.loads(lowerCAmelCase )
def lowerCamelCase__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE_: Tuple =self.get_tokenizer()
SCREAMING_SNAKE_CASE_: Optional[int] =self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_: List[str] ="""I was born in 92000, and this is falsé."""
SCREAMING_SNAKE_CASE_: Any =tokenizer.tokenize(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =rust_tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_: List[Any] =tokenizer.encode(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =rust_tokenizer.encode(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
@slow
def lowerCamelCase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple ="""Hello World!"""
SCREAMING_SNAKE_CASE_: Tuple =[0, 3_5378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowerCAmelCase , self.big_tokenizer.encode(lowerCAmelCase ) )
@slow
def lowerCamelCase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =(
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
SCREAMING_SNAKE_CASE_: Union[str, Any] =[
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
17_9459,
12_4850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
1_0114,
711,
152,
20,
6,
5,
2_2376,
642,
1221,
1_5190,
3_4153,
450,
5608,
959,
1119,
5_7702,
136,
186,
47,
1098,
2_9367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
5_0901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowerCAmelCase , self.big_tokenizer.encode(lowerCAmelCase ) )
@slow
def lowerCamelCase__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple ={"""input_ids""": [[0, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [0, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase , model_name="""xlm-roberta-base""" , revision="""d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3""" , )
| 700
|
"""simple docstring"""
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class a ( yaml.SafeLoader ):
def lowerCamelCase__ ( self : int , lowerCAmelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =[self.constructed_objects[key_node] for key_node, _ in node.value]
SCREAMING_SNAKE_CASE_: Any =[tuple(lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else key for key in keys]
SCREAMING_SNAKE_CASE_: Dict =Counter(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =[key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f'''Got duplicate yaml keys: {duplicate_keys}''' )
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : str , lowerCAmelCase : Optional[int]=False ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =super().construct_mapping(lowerCAmelCase , deep=lowerCAmelCase )
self._check_no_duplicates_on_constructed_node(lowerCAmelCase )
return mapping
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Dict =list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
SCREAMING_SNAKE_CASE_: Union[str, Any] =full_content[1:].index("""---""" ) + 1
SCREAMING_SNAKE_CASE_: List[str] ="""\n""".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(lowercase )
class a ( UpperCAmelCase__ ):
# class attributes
UpperCamelCase : Tuple = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def lowerCamelCase__ ( cls : List[Any] , lowerCAmelCase : Path ) -> "DatasetMetadata":
'''simple docstring'''
with open(lowerCAmelCase , encoding="""utf-8""" ) as readme_file:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] =_split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(lowerCAmelCase )
else:
return cls()
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Path ) -> List[str]:
'''simple docstring'''
if path.exists():
with open(lowerCAmelCase , encoding="""utf-8""" ) as readme_file:
SCREAMING_SNAKE_CASE_: str =readme_file.read()
else:
SCREAMING_SNAKE_CASE_: str =None
SCREAMING_SNAKE_CASE_: Tuple =self._to_readme(lowerCAmelCase )
with open(lowerCAmelCase , """w""" , encoding="""utf-8""" ) as readme_file:
readme_file.write(lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : Optional[str] = None ) -> str:
'''simple docstring'''
if readme_content is not None:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] =_split_yaml_from_readme(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] ="""---\n""" + self.to_yaml_string() + """---\n""" + content
else:
SCREAMING_SNAKE_CASE_: List[Any] ="""---\n""" + self.to_yaml_string() + """---\n"""
return full_content
@classmethod
def lowerCamelCase__ ( cls : Optional[int] , lowerCAmelCase : str ) -> "DatasetMetadata":
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =yaml.load(lowerCAmelCase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
SCREAMING_SNAKE_CASE_: List[Any] ={
(key.replace("""-""" , """_""" ) if key.replace("""-""" , """_""" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**lowerCAmelCase )
def lowerCamelCase__ ( self : Dict ) -> str:
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace("""_""" , """-""" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=lowerCAmelCase , allow_unicode=lowerCAmelCase , encoding="""utf-8""" , ).decode("""utf-8""" )
_UpperCAmelCase = {
"""image-classification""": [],
"""translation""": [],
"""image-segmentation""": [],
"""fill-mask""": [],
"""automatic-speech-recognition""": [],
"""token-classification""": [],
"""sentence-similarity""": [],
"""audio-classification""": [],
"""question-answering""": [],
"""summarization""": [],
"""zero-shot-classification""": [],
"""table-to-text""": [],
"""feature-extraction""": [],
"""other""": [],
"""multiple-choice""": [],
"""text-classification""": [],
"""text-to-image""": [],
"""text2text-generation""": [],
"""zero-shot-image-classification""": [],
"""tabular-classification""": [],
"""tabular-regression""": [],
"""image-to-image""": [],
"""tabular-to-text""": [],
"""unconditional-image-generation""": [],
"""text-retrieval""": [],
"""text-to-speech""": [],
"""object-detection""": [],
"""audio-to-audio""": [],
"""text-generation""": [],
"""conversational""": [],
"""table-question-answering""": [],
"""visual-question-answering""": [],
"""image-to-text""": [],
"""reinforcement-learning""": [],
"""voice-activity-detection""": [],
"""time-series-forecasting""": [],
"""document-question-answering""": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
_UpperCAmelCase = ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""")
ap.add_argument("""readme_filepath""")
_UpperCAmelCase = ap.parse_args()
_UpperCAmelCase = Path(args.readme_filepath)
_UpperCAmelCase = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 36
| 0
|
"""simple docstring"""
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
@register_to_config
def __init__( self : Dict , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : float , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : bool = False , ) -> Optional[int]:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_: Tuple =nn.Embedding(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =nn.Embedding(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =False
SCREAMING_SNAKE_CASE_: Optional[Any] =nn.Dropout(p=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =TaConfig(
vocab_size=lowerCAmelCase , d_model=lowerCAmelCase , num_heads=lowerCAmelCase , d_kv=lowerCAmelCase , d_ff=lowerCAmelCase , dropout_rate=lowerCAmelCase , feed_forward_proj=lowerCAmelCase , is_decoder=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , )
SCREAMING_SNAKE_CASE_: Optional[int] =nn.ModuleList()
for lyr_num in range(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: int =TaBlock(lowerCAmelCase )
self.encoders.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =TaLayerNorm(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =nn.Dropout(p=lowerCAmelCase )
def lowerCamelCase__ ( self : int , lowerCAmelCase : int , lowerCAmelCase : Dict ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.token_embedder(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =encoder_input_tokens.shape[1]
SCREAMING_SNAKE_CASE_: Union[str, Any] =torch.arange(lowerCAmelCase , device=encoder_input_tokens.device )
x += self.position_encoding(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =self.dropout_pre(lowerCAmelCase )
# inverted the attention mask
SCREAMING_SNAKE_CASE_: List[Any] =encoder_input_tokens.size()
SCREAMING_SNAKE_CASE_: Dict =self.get_extended_attention_mask(lowerCAmelCase , lowerCAmelCase )
for lyr in self.encoders:
SCREAMING_SNAKE_CASE_: Dict =lyr(lowerCAmelCase , lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE_: int =self.layer_norm(lowerCAmelCase )
return self.dropout_post(lowerCAmelCase ), encoder_inputs_mask
| 701
|
"""simple docstring"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def __magic_name__ ( lowercase ):
return (data["data"], data["target"])
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Dict =XGBClassifier()
classifier.fit(lowercase , lowercase )
return classifier
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Optional[Any] =load_iris()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =data_handling(lowercase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =train_test_split(
lowercase , lowercase , test_size=0.25 )
SCREAMING_SNAKE_CASE_: Tuple =iris["""target_names"""]
# Create an XGBoost Classifier from the training data
SCREAMING_SNAKE_CASE_: Optional[int] =xgboost(lowercase , lowercase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
lowercase , lowercase , lowercase , display_labels=lowercase , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 36
| 0
|
"""simple docstring"""
def __magic_name__ ( lowercase ):
if isinstance(lowercase , lowercase ):
raise TypeError("""'float' object cannot be interpreted as an integer""" )
if isinstance(lowercase , lowercase ):
raise TypeError("""'str' object cannot be interpreted as an integer""" )
if num == 0:
return "0b0"
SCREAMING_SNAKE_CASE_: Optional[Any] =False
if num < 0:
SCREAMING_SNAKE_CASE_: List[str] =True
SCREAMING_SNAKE_CASE_: List[Any] =-num
SCREAMING_SNAKE_CASE_: list[int] =[]
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(lowercase ) for e in binary )
return "0b" + "".join(str(lowercase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702
|
"""simple docstring"""
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =[]
SCREAMING_SNAKE_CASE_: List[str] =[]
SCREAMING_SNAKE_CASE_: Any =[]
for rt in rc.restypes:
SCREAMING_SNAKE_CASE_: Optional[int] =rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
SCREAMING_SNAKE_CASE_: Any ={name: i for i, name in enumerate(lowercase )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =torch.tensor(
lowercase , dtype=torch.intaa , device=protein["""aatype"""].device , )
SCREAMING_SNAKE_CASE_: Optional[Any] =torch.tensor(
lowercase , dtype=torch.intaa , device=protein["""aatype"""].device , )
SCREAMING_SNAKE_CASE_: List[str] =torch.tensor(
lowercase , dtype=torch.floataa , device=protein["""aatype"""].device , )
SCREAMING_SNAKE_CASE_: Optional[Any] =protein["""aatype"""].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
SCREAMING_SNAKE_CASE_: List[str] =restype_atomaa_to_atomaa[protein_aatype]
SCREAMING_SNAKE_CASE_: Any =restype_atomaa_mask[protein_aatype]
SCREAMING_SNAKE_CASE_: Tuple =residx_atomaa_mask
SCREAMING_SNAKE_CASE_: Dict =residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
SCREAMING_SNAKE_CASE_: Dict =restype_atomaa_to_atomaa[protein_aatype]
SCREAMING_SNAKE_CASE_: Optional[int] =residx_atomaa_to_atomaa.long()
# create the corresponding mask
SCREAMING_SNAKE_CASE_: Optional[int] =torch.zeros([21, 37] , dtype=torch.floataa , device=protein["""aatype"""].device )
for restype, restype_letter in enumerate(rc.restypes ):
SCREAMING_SNAKE_CASE_: int =rc.restype_atoa[restype_letter]
SCREAMING_SNAKE_CASE_: Any =rc.residue_atoms[restype_name]
for atom_name in atom_names:
SCREAMING_SNAKE_CASE_: Optional[int] =rc.atom_order[atom_name]
SCREAMING_SNAKE_CASE_: Dict =1
SCREAMING_SNAKE_CASE_: List[str] =restype_atomaa_mask[protein_aatype]
SCREAMING_SNAKE_CASE_: List[Any] =residx_atomaa_mask
return protein
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any =tree_map(lambda lowercase : torch.tensor(lowercase , device=batch["""aatype"""].device ) , lowercase , np.ndarray )
SCREAMING_SNAKE_CASE_: int =tensor_tree_map(lambda lowercase : np.array(lowercase ) , make_atomaa_masks(lowercase ) )
return out
| 36
| 0
|
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a ( UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : Dict = MgpstrTokenizer
UpperCamelCase : Dict = False
UpperCamelCase : Optional[int] = {}
UpperCamelCase : Optional[int] = False
def lowerCamelCase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# fmt: off
SCREAMING_SNAKE_CASE_: Optional[int] =["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
SCREAMING_SNAKE_CASE_: Optional[int] =dict(zip(lowerCAmelCase , range(len(lowerCAmelCase ) ) ) )
SCREAMING_SNAKE_CASE_: List[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase ) + """\n""" )
def lowerCamelCase__ ( self : Any , **lowerCAmelCase : int ) -> Optional[int]:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def lowerCamelCase__ ( self : str , lowerCAmelCase : List[str] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any ="""tester"""
SCREAMING_SNAKE_CASE_: Tuple ="""tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def lowerCamelCase__ ( self : Tuple ) -> str:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =self.get_tokenizers(do_lower_case=lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE_: int ="""[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
SCREAMING_SNAKE_CASE_: Optional[int] =tokenizer.encode([special_token] , add_special_tokens=lowerCAmelCase )
self.assertEqual(len(lowerCAmelCase ) , 1 )
SCREAMING_SNAKE_CASE_: Optional[int] =tokenizer.decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase )
self.assertTrue(special_token not in decoded )
def lowerCamelCase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE_: List[Any] =self.get_input_output_texts(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =tokenizer.tokenize(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =tokenizer.convert_tokens_to_ids(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =tokenizer.convert_ids_to_tokens(lowerCAmelCase )
self.assertNotEqual(len(lowerCAmelCase ) , 0 )
SCREAMING_SNAKE_CASE_: Any =tokenizer.decode(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(text_a.replace(""" """ , """""" ) , lowerCAmelCase )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def lowerCamelCase__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def lowerCamelCase__ ( self : Dict ) -> List[str]:
'''simple docstring'''
pass
| 703
|
"""simple docstring"""
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_UpperCAmelCase = ["""text""", """image""", """audio"""]
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: str =[]
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(lowercase , lowercase ):
inputs.append(create_inputs(lowercase ) )
else:
raise ValueError(f'''Invalid type requested: {input_type}''' )
return inputs
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: int =[]
for output in outputs:
if isinstance(lowercase , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(lowercase , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(lowercase , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(f'''Invalid output: {output}''' )
return output_types
@is_tool_test
class a :
def lowerCamelCase__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , """inputs""" ) )
self.assertTrue(hasattr(self.tool , """outputs""" ) )
SCREAMING_SNAKE_CASE_: Optional[int] =self.tool.inputs
for _input in inputs:
if isinstance(_input , lowerCAmelCase ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
SCREAMING_SNAKE_CASE_: Any =self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowerCamelCase__ ( self : str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_: List[Any] =self.tool(*lowerCAmelCase )
# There is a single output
if len(self.tool.outputs ) == 1:
SCREAMING_SNAKE_CASE_: str =[outputs]
self.assertListEqual(output_types(lowerCAmelCase ) , self.tool.outputs )
def lowerCamelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , """description""" ) )
self.assertTrue(hasattr(self.tool , """default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def lowerCamelCase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_: Tuple =self.tool(*lowerCAmelCase )
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: int =[outputs]
self.assertEqual(len(lowerCAmelCase ) , len(self.tool.outputs ) )
for output, output_type in zip(lowerCAmelCase , self.tool.outputs ):
SCREAMING_SNAKE_CASE_: int =AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(lowerCAmelCase , lowerCAmelCase ) )
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_: Union[str, Any] =[]
for _input, input_type in zip(lowerCAmelCase , self.tool.inputs ):
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
SCREAMING_SNAKE_CASE_: Dict =self.tool(*lowerCAmelCase )
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] =[outputs]
self.assertEqual(len(lowerCAmelCase ) , len(self.tool.outputs ) )
| 36
| 0
|
from manim import *
class a ( UpperCAmelCase__ ):
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =Rectangle(height=0.5 , width=0.5 )
SCREAMING_SNAKE_CASE_: int =Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
SCREAMING_SNAKE_CASE_: Optional[Any] =[mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE_: List[Any] =[mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE_: Dict =VGroup(*lowerCAmelCase ).arrange(lowerCAmelCase , buff=0 )
SCREAMING_SNAKE_CASE_: Dict =VGroup(*lowerCAmelCase ).arrange(lowerCAmelCase , buff=0 )
SCREAMING_SNAKE_CASE_: Optional[int] =VGroup(lowerCAmelCase , lowerCAmelCase ).arrange(lowerCAmelCase , buff=0 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =Text("""CPU""" , font_size=24 )
SCREAMING_SNAKE_CASE_: str =Group(lowerCAmelCase , lowerCAmelCase ).arrange(lowerCAmelCase , buff=0.5 , aligned_edge=lowerCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =[mem.copy() for i in range(1 )]
SCREAMING_SNAKE_CASE_: Dict =VGroup(*lowerCAmelCase ).arrange(lowerCAmelCase , buff=0 )
SCREAMING_SNAKE_CASE_: Dict =Text("""GPU""" , font_size=24 )
SCREAMING_SNAKE_CASE_: str =Group(lowerCAmelCase , lowerCAmelCase ).arrange(lowerCAmelCase , buff=0.5 , aligned_edge=lowerCAmelCase )
gpu.align_to(lowerCAmelCase , lowerCAmelCase )
gpu.set_x(gpu.get_x() - 1 )
self.add(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =[mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE_: Tuple =VGroup(*lowerCAmelCase ).arrange(lowerCAmelCase , buff=0 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =Text("""Model""" , font_size=24 )
SCREAMING_SNAKE_CASE_: List[Any] =Group(lowerCAmelCase , lowerCAmelCase ).arrange(lowerCAmelCase , buff=0.5 , aligned_edge=lowerCAmelCase )
model.move_to([3, -1.0, 0] )
self.play(
Create(lowerCAmelCase , run_time=1 ) , Create(lowerCAmelCase , run_time=1 ) , Create(lowerCAmelCase , run_time=1 ) , )
SCREAMING_SNAKE_CASE_: List[Any] =MarkupText(
f'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' , font_size=24 , )
SCREAMING_SNAKE_CASE_: Union[str, Any] =Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
SCREAMING_SNAKE_CASE_: Any =MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCAmelCase , run_time=2.5 ) , Write(lowerCAmelCase ) , Write(lowerCAmelCase ) )
self.add(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =[]
SCREAMING_SNAKE_CASE_: Union[str, Any] =[]
SCREAMING_SNAKE_CASE_: List[Any] =[]
for i, rect in enumerate(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: Dict =Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(lowerCAmelCase , opacity=0.7 )
cpu_target.move_to(lowerCAmelCase )
cpu_target.generate_target()
SCREAMING_SNAKE_CASE_: str =0.4_6 / 4
SCREAMING_SNAKE_CASE_: Union[str, Any] =0.4_6 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=lowerCAmelCase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=lowerCAmelCase , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=lowerCAmelCase , buff=0.0 )
cpu_targs.append(lowerCAmelCase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(lowerCAmelCase ) )
second_animations.append(MoveToTarget(lowerCAmelCase , run_time=1.5 ) )
self.play(*lowerCAmelCase )
self.play(*lowerCAmelCase )
self.wait()
| 704
|
"""simple docstring"""
from __future__ import annotations
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =sorted(numsa + numsa )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =divmod(len(lowercase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase = [float(x) for x in input("""Enter the elements of first array: """).split()]
_UpperCAmelCase = [float(x) for x in input("""Enter the elements of second array: """).split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 36
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class a ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = 'mobilenet_v1'
def __init__( self : int , lowerCAmelCase : List[str]=3 , lowerCAmelCase : Dict=224 , lowerCAmelCase : Tuple=1.0 , lowerCAmelCase : int=8 , lowerCAmelCase : Any="relu6" , lowerCAmelCase : Any=True , lowerCAmelCase : List[str]=0.9_9_9 , lowerCAmelCase : Union[str, Any]=0.0_2 , lowerCAmelCase : Tuple=0.0_0_1 , **lowerCAmelCase : List[Any] , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**lowerCAmelCase )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
SCREAMING_SNAKE_CASE_: List[str] =num_channels
SCREAMING_SNAKE_CASE_: List[Any] =image_size
SCREAMING_SNAKE_CASE_: List[str] =depth_multiplier
SCREAMING_SNAKE_CASE_: Union[str, Any] =min_depth
SCREAMING_SNAKE_CASE_: Tuple =hidden_act
SCREAMING_SNAKE_CASE_: List[str] =tf_padding
SCREAMING_SNAKE_CASE_: int =classifier_dropout_prob
SCREAMING_SNAKE_CASE_: Any =initializer_range
SCREAMING_SNAKE_CASE_: Dict =layer_norm_eps
class a ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCamelCase : int = version.parse('1.11' )
@property
def lowerCamelCase__ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def lowerCamelCase__ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def lowerCamelCase__ ( self : Tuple ) -> float:
'''simple docstring'''
return 1E-4
| 705
|
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a :
def __init__( self : Any , lowerCAmelCase : Any , lowerCAmelCase : List[str]=13 , lowerCAmelCase : Dict=3 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Dict=True , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : str=0.1 , lowerCAmelCase : List[str]=224 , lowerCAmelCase : List[str]=1000 , lowerCAmelCase : Optional[Any]=[3, 3, 6, 4] , lowerCAmelCase : int=[48, 56, 112, 220] , ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =parent
SCREAMING_SNAKE_CASE_: Any =batch_size
SCREAMING_SNAKE_CASE_: Tuple =num_channels
SCREAMING_SNAKE_CASE_: Union[str, Any] =is_training
SCREAMING_SNAKE_CASE_: Tuple =use_labels
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Tuple =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: List[Any] =num_labels
SCREAMING_SNAKE_CASE_: int =image_size
SCREAMING_SNAKE_CASE_: Optional[Any] =layer_depths
SCREAMING_SNAKE_CASE_: List[Any] =embed_dims
def lowerCamelCase__ ( self : List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_: List[Any] =None
if self.use_labels:
SCREAMING_SNAKE_CASE_: List[str] =ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE_: Tuple =self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowerCAmelCase , layer_scale_init_value=1E-5 , )
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =SwiftFormerModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Any =model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =self.num_labels
SCREAMING_SNAKE_CASE_: Dict =SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Dict =model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
SCREAMING_SNAKE_CASE_: int =SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Tuple =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_: Dict =model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)): str =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_: Tuple ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : Optional[int] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
UpperCamelCase : Tuple = (
{'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase : Any = False
UpperCamelCase : Optional[int] = False
UpperCamelCase : Optional[Any] = False
UpperCamelCase : Dict = False
UpperCamelCase : List[str] = False
def lowerCamelCase__ ( self : Dict ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =SwiftFormerModelTester(self )
SCREAMING_SNAKE_CASE_: Union[str, Any] =ConfigTester(
self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def lowerCamelCase__ ( self : Tuple ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def lowerCamelCase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Any =model_class(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase , nn.Linear ) )
def lowerCamelCase__ ( self : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: int =model_class(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_: Any =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_: Tuple =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def lowerCamelCase__ ( self : int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def lowerCamelCase__ ( self : Any ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@slow
def lowerCamelCase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_: Optional[Any] =SwiftFormerModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def lowerCamelCase__ ( self : Optional[int] ) -> str:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
def check_hidden_states_output(lowerCAmelCase : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : Optional[int] ):
SCREAMING_SNAKE_CASE_: Optional[Any] =model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_: List[str] =model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: Dict =outputs.hidden_states
SCREAMING_SNAKE_CASE_: List[Any] =8
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowerCAmelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Dict =True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_: Any =True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
def _config_zero_init(lowerCAmelCase : str ):
SCREAMING_SNAKE_CASE_: Dict =copy.deepcopy(lowerCAmelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowerCAmelCase , lowerCAmelCase , 1E-10 )
if isinstance(getattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: Tuple =_config_zero_init(getattr(lowerCAmelCase , lowerCAmelCase ) )
setattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return configs_no_init
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_: List[Any] =_config_zero_init(lowerCAmelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Any =model_class(config=lowerCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
pass
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: List[Any] =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self : str ) -> str:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =self.default_image_processor
SCREAMING_SNAKE_CASE_: int =prepare_img()
SCREAMING_SNAKE_CASE_: Union[str, Any] =image_processor(images=lowerCAmelCase , return_tensors="""pt""" ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Dict =model(**lowerCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE_: Optional[Any] =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =torch.tensor([[-2.1_703E00, 2.1_107E00, -2.0_811E00]] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
| 36
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_UpperCAmelCase = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 706
|
"""simple docstring"""
from math import pi
def __magic_name__ ( lowercase , lowercase ):
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(9_0, 1_0))
| 36
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
_UpperCAmelCase = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""DPTFeatureExtractor"""]
_UpperCAmelCase = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 707
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : Dict ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict =FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE_: Any =jax.device_count()
SCREAMING_SNAKE_CASE_: Dict =num_samples * [prompt]
SCREAMING_SNAKE_CASE_: List[Any] =sd_pipe.prepare_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =replicate(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =shard(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =jax.random.split(lowerCAmelCase , jax.device_count() )
SCREAMING_SNAKE_CASE_: Tuple =sd_pipe(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , num_inference_steps=25 , jit=lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
SCREAMING_SNAKE_CASE_: Dict =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE_: Dict =images[0, 253:256, 253:256, -1]
SCREAMING_SNAKE_CASE_: Optional[int] =jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE_: Optional[int] =jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int ="""stabilityai/stable-diffusion-2"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =FlaxDPMSolverMultistepScheduler.from_pretrained(lowerCAmelCase , subfolder="""scheduler""" )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =FlaxStableDiffusionPipeline.from_pretrained(
lowerCAmelCase , scheduler=lowerCAmelCase , revision="""bf16""" , dtype=jnp.bfloataa , )
SCREAMING_SNAKE_CASE_: Optional[int] =scheduler_params
SCREAMING_SNAKE_CASE_: Tuple ="""A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE_: Union[str, Any] =jax.device_count()
SCREAMING_SNAKE_CASE_: Optional[Any] =num_samples * [prompt]
SCREAMING_SNAKE_CASE_: List[Any] =sd_pipe.prepare_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =replicate(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =shard(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_: Any =jax.random.split(lowerCAmelCase , jax.device_count() )
SCREAMING_SNAKE_CASE_: Tuple =sd_pipe(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , num_inference_steps=25 , jit=lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
SCREAMING_SNAKE_CASE_: str =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE_: Any =images[0, 253:256, 253:256, -1]
SCREAMING_SNAKE_CASE_: Optional[Any] =jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE_: Optional[int] =jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 36
| 0
|
"""simple docstring"""
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_UpperCAmelCase = argparse.ArgumentParser("""Stable Diffusion script with intel optimization""", add_help=False)
parser.add_argument("""--dpm""", action="""store_true""", help="""Enable DPMSolver or not""")
parser.add_argument("""--steps""", default=None, type=int, help="""Num inference steps""")
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = """cpu"""
_UpperCAmelCase = """a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"""
_UpperCAmelCase = """path-to-your-trained-model"""
_UpperCAmelCase = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_UpperCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_UpperCAmelCase = pipe.to(device)
# to channels last
_UpperCAmelCase = pipe.unet.to(memory_format=torch.channels_last)
_UpperCAmelCase = pipe.vae.to(memory_format=torch.channels_last)
_UpperCAmelCase = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_UpperCAmelCase = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_UpperCAmelCase = torch.randn(2, 4, 6_4, 6_4)
_UpperCAmelCase = torch.rand(1) * 9_9_9
_UpperCAmelCase = torch.randn(2, 7_7, 7_6_8)
_UpperCAmelCase = (sample, timestep, encoder_hidden_status)
try:
_UpperCAmelCase = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_UpperCAmelCase = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_UpperCAmelCase = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_UpperCAmelCase = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_UpperCAmelCase = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_UpperCAmelCase = 6_6_6
_UpperCAmelCase = torch.Generator(device).manual_seed(seed)
_UpperCAmelCase = {"""generator""": generator}
if args.steps is not None:
_UpperCAmelCase = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_UpperCAmelCase = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("""generated.png""")
| 708
|
"""simple docstring"""
def __magic_name__ ( lowercase = 200_0000 ):
SCREAMING_SNAKE_CASE_: List[Any] =[0 for i in range(n + 1 )]
SCREAMING_SNAKE_CASE_: Union[str, Any] =1
SCREAMING_SNAKE_CASE_: Optional[Any] =1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =1
SCREAMING_SNAKE_CASE_: Dict =0
for i in range(lowercase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f"""{solution() = }""")
| 36
| 0
|
"""simple docstring"""
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class a ( pl.LightningModule ):
def __init__( self : List[Any] , lowerCAmelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_: Any =model
SCREAMING_SNAKE_CASE_: Optional[Any] =2
SCREAMING_SNAKE_CASE_: Union[str, Any] =nn.Linear(self.model.config.hidden_size , self.num_labels )
def lowerCamelCase__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
pass
def __magic_name__ ( lowercase , lowercase , lowercase ):
# load longformer model from model identifier
SCREAMING_SNAKE_CASE_: Union[str, Any] =LongformerModel.from_pretrained(lowercase )
SCREAMING_SNAKE_CASE_: Optional[Any] =LightningModel(lowercase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =torch.load(lowercase , map_location=torch.device("""cpu""" ) )
lightning_model.load_state_dict(ckpt["""state_dict"""] )
# init longformer question answering model
SCREAMING_SNAKE_CASE_: int =LongformerForQuestionAnswering.from_pretrained(lowercase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(lowercase )
print(f'''Conversion successful. Model saved under {pytorch_dump_folder_path}''' )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--longformer_model""",
default=None,
type=str,
required=True,
help="""model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.""",
)
parser.add_argument(
"""--longformer_question_answering_ckpt_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch Lightning Checkpoint.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_UpperCAmelCase = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 709
|
"""simple docstring"""
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""bert""", choices=["""bert"""])
parser.add_argument("""--model_name""", default="""bert-base-uncased""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_bert-base-uncased_0247911.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
_UpperCAmelCase = parser.parse_args()
if args.model_type == "bert":
_UpperCAmelCase = BertForMaskedLM.from_pretrained(args.model_name)
_UpperCAmelCase = """bert"""
else:
raise ValueError("""args.model_type should be \"bert\".""")
_UpperCAmelCase = model.state_dict()
_UpperCAmelCase = {}
for w in ["word_embeddings", "position_embeddings"]:
_UpperCAmelCase = state_dict[f"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""]
_UpperCAmelCase = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
_UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
_UpperCAmelCase = state_dict["""cls.predictions.decoder.weight"""]
_UpperCAmelCase = state_dict["""cls.predictions.bias"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
_UpperCAmelCase = state_dict[f"""cls.predictions.transform.dense.{w}"""]
_UpperCAmelCase = state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 36
| 0
|
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
_UpperCAmelCase = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["""memory_attention""", """encoder_attn"""],
["""attention""", """attn"""],
["""/""", """."""],
[""".LayerNorm.gamma""", """_layer_norm.weight"""],
[""".LayerNorm.beta""", """_layer_norm.bias"""],
["""r.layer_""", """r.layers."""],
["""output_proj""", """out_proj"""],
["""ffn.dense_1.""", """fc2."""],
["""ffn.dense.""", """fc1."""],
["""ffn_layer_norm""", """final_layer_norm"""],
["""kernel""", """weight"""],
["""encoder_layer_norm.""", """encoder.layer_norm."""],
["""decoder_layer_norm.""", """decoder.layer_norm."""],
["""embeddings.weights""", """shared.weight"""],
]
def __magic_name__ ( lowercase ):
for pegasus_name, hf_name in PATTERNS:
SCREAMING_SNAKE_CASE_: Optional[int] =k.replace(lowercase , lowercase )
return k
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Any =DEFAULTS.copy()
cfg_kwargs.update(lowercase )
SCREAMING_SNAKE_CASE_: Tuple =PegasusConfig(**lowercase )
SCREAMING_SNAKE_CASE_: Any =PegasusForConditionalGeneration(lowercase )
SCREAMING_SNAKE_CASE_: Any =torch_model.model.state_dict()
SCREAMING_SNAKE_CASE_: int ={}
for k, v in tf_weights.items():
SCREAMING_SNAKE_CASE_: Any =rename_state_dict_key(lowercase )
if new_k not in sd:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
SCREAMING_SNAKE_CASE_: Optional[int] =v.T
SCREAMING_SNAKE_CASE_: Union[str, Any] =torch.tensor(lowercase , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
SCREAMING_SNAKE_CASE_: str =torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
SCREAMING_SNAKE_CASE_: Optional[int] =mapping["""shared.weight"""]
SCREAMING_SNAKE_CASE_: Dict =mapping["""shared.weight"""]
SCREAMING_SNAKE_CASE_: Union[str, Any] ={k: torch.zeros_like(lowercase ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**lowercase )
SCREAMING_SNAKE_CASE_: Tuple =torch_model.model.load_state_dict(lowercase , strict=lowercase )
SCREAMING_SNAKE_CASE_: List[str] =[
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def __magic_name__ ( lowercase="./ckpt/aeslc/model.ckpt-32000" ):
SCREAMING_SNAKE_CASE_: Optional[int] =tf.train.list_variables(lowercase )
SCREAMING_SNAKE_CASE_: Any ={}
SCREAMING_SNAKE_CASE_: Optional[Any] =["""Adafactor""", """global_step"""]
for name, shape in tqdm(lowercase , desc="""converting tf checkpoint to dict""" ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =any(pat in name for pat in ignore_name )
if skip_key:
continue
SCREAMING_SNAKE_CASE_: List[str] =tf.train.load_variable(lowercase , lowercase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =array
return tf_weights
def __magic_name__ ( lowercase , lowercase ):
# save tokenizer first
SCREAMING_SNAKE_CASE_: List[Any] =Path(lowercase ).parent.name
SCREAMING_SNAKE_CASE_: Optional[int] =task_specific_params[f'''summarization_{dataset}''']["""max_position_embeddings"""]
SCREAMING_SNAKE_CASE_: List[str] =PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=lowercase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(lowercase )
# convert model
SCREAMING_SNAKE_CASE_: Optional[Any] =get_tf_weights_as_numpy(lowercase )
SCREAMING_SNAKE_CASE_: List[str] =task_specific_params[f'''summarization_{dataset}''']
if dataset == "large":
SCREAMING_SNAKE_CASE_: Any =task_specific_params
SCREAMING_SNAKE_CASE_: List[str] =convert_pegasus(lowercase , lowercase )
torch_model.save_pretrained(lowercase )
SCREAMING_SNAKE_CASE_: int =torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(lowercase , Path(lowercase ) / """pytorch_model.bin""" )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
_UpperCAmelCase = parser.parse_args()
if args.save_dir is None:
_UpperCAmelCase = Path(args.tf_ckpt_path).parent.name
_UpperCAmelCase = os.path.join("""pegasus""", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 710
|
"""simple docstring"""
def __magic_name__ ( lowercase , lowercase ):
return int((input_a, input_a).count(0 ) == 0 )
def __magic_name__ ( ):
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 36
| 0
|
"""simple docstring"""
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("""To use the rich extension, install rich with `pip install rich`""")
| 711
|
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger("""transformers.models.speecht5""")
def __magic_name__ ( lowercase , lowercase , lowercase ):
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE_: Tuple =checkpoint["""input_conv.weight_g"""]
SCREAMING_SNAKE_CASE_: Optional[int] =checkpoint["""input_conv.weight_v"""]
SCREAMING_SNAKE_CASE_: Optional[int] =checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''upsamples.{i}.1.weight_g''']
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''upsamples.{i}.1.weight_v''']
SCREAMING_SNAKE_CASE_: List[str] =checkpoint[f'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE_: Dict =checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_g''']
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_v''']
SCREAMING_SNAKE_CASE_: Union[str, Any] =checkpoint[f'''blocks.{i}.convs1.{j}.1.bias''']
SCREAMING_SNAKE_CASE_: Dict =checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_g''']
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_v''']
SCREAMING_SNAKE_CASE_: List[Any] =checkpoint[f'''blocks.{i}.convs2.{j}.1.bias''']
SCREAMING_SNAKE_CASE_: Tuple =checkpoint["""output_conv.1.weight_g"""]
SCREAMING_SNAKE_CASE_: List[str] =checkpoint["""output_conv.1.weight_v"""]
SCREAMING_SNAKE_CASE_: Optional[int] =checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase=None , lowercase=None , ):
if config_path is not None:
SCREAMING_SNAKE_CASE_: List[Any] =SpeechTaHifiGanConfig.from_pretrained(lowercase )
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE_: Union[str, Any] =SpeechTaHifiGan(lowercase )
SCREAMING_SNAKE_CASE_: Any =torch.load(lowercase )
load_weights(orig_checkpoint["""model"""]["""generator"""] , lowercase , lowercase )
SCREAMING_SNAKE_CASE_: List[Any] =np.load(lowercase )
SCREAMING_SNAKE_CASE_: Any =stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE_: str =stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE_: Dict =torch.from_numpy(lowercase ).float()
SCREAMING_SNAKE_CASE_: Dict =torch.from_numpy(lowercase ).float()
model.save_pretrained(lowercase )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
_UpperCAmelCase = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 36
| 0
|
"""simple docstring"""
def __magic_name__ ( lowercase ):
if not isinstance(lowercase , lowercase ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(lowercase ) == 0:
raise ValueError("""Input list must be a non empty list""" )
if len(lowercase ) == 1:
return True
SCREAMING_SNAKE_CASE_: Union[str, Any] =series[1] - series[0]
for index in range(len(lowercase ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def __magic_name__ ( lowercase ):
if not isinstance(lowercase , lowercase ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(lowercase ) == 0:
raise ValueError("""Input list must be a non empty list""" )
SCREAMING_SNAKE_CASE_: Tuple =0
for val in series:
answer += val
return answer / len(lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def __magic_name__ ( lowercase ):
if "cls_token" in name:
SCREAMING_SNAKE_CASE_: Optional[int] =name.replace("""cls_token""" , """vit.embeddings.cls_token""" )
if "mask_token" in name:
SCREAMING_SNAKE_CASE_: Optional[int] =name.replace("""mask_token""" , """decoder.mask_token""" )
if "decoder_pos_embed" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""pos_embed""" , """vit.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""patch_embed.proj""" , """vit.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""patch_embed.norm""" , """vit.embeddings.norm""" )
if "decoder_blocks" in name:
SCREAMING_SNAKE_CASE_: List[Any] =name.replace("""decoder_blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
SCREAMING_SNAKE_CASE_: str =name.replace("""blocks""" , """vit.encoder.layer""" )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE_: Optional[Any] =name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
SCREAMING_SNAKE_CASE_: Union[str, Any] =name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
SCREAMING_SNAKE_CASE_: Optional[Any] =name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE_: Dict =name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
SCREAMING_SNAKE_CASE_: Tuple =name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
SCREAMING_SNAKE_CASE_: Any =name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""norm.weight""" , """vit.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name:
SCREAMING_SNAKE_CASE_: List[str] =name.replace("""norm.bias""" , """vit.layernorm.bias""" )
return name
def __magic_name__ ( lowercase , lowercase ):
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE_: Optional[int] =orig_state_dict.pop(lowercase )
if "qkv" in key:
SCREAMING_SNAKE_CASE_: Dict =key.split(""".""" )
SCREAMING_SNAKE_CASE_: Optional[Any] =int(key_split[1] )
if "decoder_blocks" in key:
SCREAMING_SNAKE_CASE_: int =config.decoder_hidden_size
SCREAMING_SNAKE_CASE_: Optional[int] ="""decoder.decoder_layers."""
if "weight" in key:
SCREAMING_SNAKE_CASE_: Dict =val[:dim, :]
SCREAMING_SNAKE_CASE_: Tuple =val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_: str =val[-dim:, :]
elif "bias" in key:
SCREAMING_SNAKE_CASE_: List[Any] =val[:dim]
SCREAMING_SNAKE_CASE_: Tuple =val[dim : dim * 2]
SCREAMING_SNAKE_CASE_: List[Any] =val[-dim:]
else:
SCREAMING_SNAKE_CASE_: Any =config.hidden_size
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""vit.encoder.layer."""
if "weight" in key:
SCREAMING_SNAKE_CASE_: Optional[Any] =val[:dim, :]
SCREAMING_SNAKE_CASE_: Optional[Any] =val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE_: Dict =val[-dim:, :]
elif "bias" in key:
SCREAMING_SNAKE_CASE_: Optional[Any] =val[:dim]
SCREAMING_SNAKE_CASE_: Any =val[dim : dim * 2]
SCREAMING_SNAKE_CASE_: Optional[Any] =val[-dim:]
else:
SCREAMING_SNAKE_CASE_: Tuple =val
return orig_state_dict
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Dict =ViTMAEConfig()
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE_: List[Any] =1024
SCREAMING_SNAKE_CASE_: Dict =4096
SCREAMING_SNAKE_CASE_: Tuple =24
SCREAMING_SNAKE_CASE_: int =16
elif "huge" in checkpoint_url:
SCREAMING_SNAKE_CASE_: Union[str, Any] =14
SCREAMING_SNAKE_CASE_: Any =1280
SCREAMING_SNAKE_CASE_: Dict =5120
SCREAMING_SNAKE_CASE_: Optional[int] =32
SCREAMING_SNAKE_CASE_: Optional[Any] =16
SCREAMING_SNAKE_CASE_: Tuple =ViTMAEForPreTraining(lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =torch.hub.load_state_dict_from_url(lowercase , map_location="""cpu""" )["""model"""]
SCREAMING_SNAKE_CASE_: Optional[Any] =ViTMAEImageProcessor(size=config.image_size )
SCREAMING_SNAKE_CASE_: str =convert_state_dict(lowercase , lowercase )
model.load_state_dict(lowercase )
model.eval()
SCREAMING_SNAKE_CASE_: Tuple ="""https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"""
SCREAMING_SNAKE_CASE_: List[Any] =Image.open(requests.get(lowercase , stream=lowercase ).raw )
SCREAMING_SNAKE_CASE_: int =ViTMAEImageProcessor(size=config.image_size )
SCREAMING_SNAKE_CASE_: int =image_processor(images=lowercase , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE_: Optional[Any] =model(**lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =outputs.logits
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE_: Dict =torch.tensor(
[[-0.7_309, -0.7_128, -1.0_169], [-1.0_161, -0.9_058, -1.1_878], [-1.0_478, -0.9_411, -1.1_911]] )
elif "huge" in checkpoint_url:
SCREAMING_SNAKE_CASE_: Tuple =torch.tensor(
[[-1.1_599, -0.9_199, -1.2_221], [-1.1_952, -0.9_269, -1.2_307], [-1.2_143, -0.9_337, -1.2_262]] )
else:
SCREAMING_SNAKE_CASE_: Any =torch.tensor(
[[-0.9_192, -0.8_481, -1.1_259], [-1.1_349, -1.0_034, -1.2_599], [-1.1_757, -1.0_429, -1.2_726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , lowercase , atol=1e-4 )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_UpperCAmelCase = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 36
| 0
|
"""simple docstring"""
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_UpperCAmelCase = ["""text""", """image""", """audio"""]
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: str =[]
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(lowercase , lowercase ):
inputs.append(create_inputs(lowercase ) )
else:
raise ValueError(f'''Invalid type requested: {input_type}''' )
return inputs
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: int =[]
for output in outputs:
if isinstance(lowercase , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(lowercase , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(lowercase , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(f'''Invalid output: {output}''' )
return output_types
@is_tool_test
class a :
def lowerCamelCase__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , """inputs""" ) )
self.assertTrue(hasattr(self.tool , """outputs""" ) )
SCREAMING_SNAKE_CASE_: Optional[int] =self.tool.inputs
for _input in inputs:
if isinstance(_input , lowerCAmelCase ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
SCREAMING_SNAKE_CASE_: Any =self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowerCamelCase__ ( self : str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_: List[Any] =self.tool(*lowerCAmelCase )
# There is a single output
if len(self.tool.outputs ) == 1:
SCREAMING_SNAKE_CASE_: str =[outputs]
self.assertListEqual(output_types(lowerCAmelCase ) , self.tool.outputs )
def lowerCamelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , """description""" ) )
self.assertTrue(hasattr(self.tool , """default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def lowerCamelCase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_: Tuple =self.tool(*lowerCAmelCase )
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: int =[outputs]
self.assertEqual(len(lowerCAmelCase ) , len(self.tool.outputs ) )
for output, output_type in zip(lowerCAmelCase , self.tool.outputs ):
SCREAMING_SNAKE_CASE_: int =AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(lowerCAmelCase , lowerCAmelCase ) )
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_: Union[str, Any] =[]
for _input, input_type in zip(lowerCAmelCase , self.tool.inputs ):
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
SCREAMING_SNAKE_CASE_: Dict =self.tool(*lowerCAmelCase )
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] =[outputs]
self.assertEqual(len(lowerCAmelCase ) , len(self.tool.outputs ) )
| 713
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_UpperCAmelCase = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 36
| 0
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a ( UpperCAmelCase__ ):
UpperCamelCase : Tuple = ['image_processor', 'tokenizer']
UpperCamelCase : Union[str, Any] = 'CLIPImageProcessor'
UpperCamelCase : str = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self : Union[str, Any] , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Optional[int]=None , **lowerCAmelCase : List[str] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowerCAmelCase , )
SCREAMING_SNAKE_CASE_: Tuple =kwargs.pop("""feature_extractor""" )
SCREAMING_SNAKE_CASE_: Any =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowerCAmelCase , lowerCAmelCase )
def __call__( self : List[Any] , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Union[str, Any]=None , **lowerCAmelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
SCREAMING_SNAKE_CASE_: Optional[int] =self.tokenizer(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
if images is not None:
SCREAMING_SNAKE_CASE_: str =self.image_processor(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE_: Optional[Any] =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase ) , tensor_type=lowerCAmelCase )
def lowerCamelCase__ ( self : List[Any] , *lowerCAmelCase : Tuple , **lowerCAmelCase : Tuple ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCAmelCase , **lowerCAmelCase )
def lowerCamelCase__ ( self : Dict , *lowerCAmelCase : Dict , **lowerCAmelCase : List[Any] ) -> int:
'''simple docstring'''
return self.tokenizer.decode(*lowerCAmelCase , **lowerCAmelCase )
@property
def lowerCamelCase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE_: int =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 714
|
"""simple docstring"""
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: int =False
while is_sorted is False: # Until all the indices are traversed keep looping
SCREAMING_SNAKE_CASE_: Tuple =True
for i in range(0 , len(lowercase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =input_list[i + 1], input_list[i]
# swapping if elements not in order
SCREAMING_SNAKE_CASE_: Tuple =False
for i in range(1 , len(lowercase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str =input_list[i + 1], input_list[i]
# swapping if elements not in order
SCREAMING_SNAKE_CASE_: str =False
return input_list
if __name__ == "__main__":
print("""Enter list to be sorted""")
_UpperCAmelCase = [int(x) for x in input().split()]
# inputing elements of the list in one line
_UpperCAmelCase = odd_even_sort(input_list)
print("""The sorted list is""")
print(sorted_list)
| 36
| 0
|
"""simple docstring"""
from PIL import Image
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Dict =image.size
SCREAMING_SNAKE_CASE_: Union[str, Any] =0
SCREAMING_SNAKE_CASE_: Optional[int] =image.load()
for i in range(lowercase ):
for j in range(lowercase ):
SCREAMING_SNAKE_CASE_: List[str] =pixels[j, i]
mean += pixel
mean //= width * height
for j in range(lowercase ):
for i in range(lowercase ):
SCREAMING_SNAKE_CASE_: Tuple =255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
_UpperCAmelCase = mean_threshold(Image.open("""path_to_image""").convert("""L"""))
image.save("""output_image_path""")
| 715
|
"""simple docstring"""
def __magic_name__ ( lowercase ):
return str(lowercase ) == str(lowercase )[::-1]
def __magic_name__ ( lowercase ):
return int(lowercase ) + int(str(lowercase )[::-1] )
def __magic_name__ ( lowercase = 1_0000 ):
SCREAMING_SNAKE_CASE_: List[str] =[]
for num in range(1 , lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =0
SCREAMING_SNAKE_CASE_: int =num
while iterations < 50:
SCREAMING_SNAKE_CASE_: Optional[Any] =sum_reverse(lowercase )
iterations += 1
if is_palindrome(lowercase ):
break
else:
lychrel_nums.append(lowercase )
return len(lowercase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 36
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class a ( unittest.TestCase ):
def __init__( self : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : Tuple=13 , lowerCAmelCase : List[Any]=7 , lowerCAmelCase : List[str]=True , lowerCAmelCase : List[str]=True , lowerCAmelCase : int=True , lowerCAmelCase : List[Any]=True , lowerCAmelCase : str=99 , lowerCAmelCase : Optional[int]=32 , lowerCAmelCase : str=5 , lowerCAmelCase : Optional[Any]=4 , lowerCAmelCase : int=37 , lowerCAmelCase : Tuple="gelu" , lowerCAmelCase : Optional[int]=0.1 , lowerCAmelCase : Tuple=0.1 , lowerCAmelCase : Optional[int]=512 , lowerCAmelCase : Optional[Any]=16 , lowerCAmelCase : Tuple=2 , lowerCAmelCase : List[str]=0.0_2 , lowerCAmelCase : Any=4 , ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =parent
SCREAMING_SNAKE_CASE_: int =batch_size
SCREAMING_SNAKE_CASE_: Any =seq_length
SCREAMING_SNAKE_CASE_: Any =is_training
SCREAMING_SNAKE_CASE_: Optional[Any] =use_attention_mask
SCREAMING_SNAKE_CASE_: List[Any] =use_token_type_ids
SCREAMING_SNAKE_CASE_: Optional[int] =use_labels
SCREAMING_SNAKE_CASE_: int =vocab_size
SCREAMING_SNAKE_CASE_: Tuple =hidden_size
SCREAMING_SNAKE_CASE_: List[str] =num_hidden_layers
SCREAMING_SNAKE_CASE_: List[Any] =num_attention_heads
SCREAMING_SNAKE_CASE_: List[Any] =intermediate_size
SCREAMING_SNAKE_CASE_: Optional[Any] =hidden_act
SCREAMING_SNAKE_CASE_: int =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: int =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: Dict =max_position_embeddings
SCREAMING_SNAKE_CASE_: Tuple =type_vocab_size
SCREAMING_SNAKE_CASE_: Optional[int] =type_sequence_label_size
SCREAMING_SNAKE_CASE_: Optional[Any] =initializer_range
SCREAMING_SNAKE_CASE_: Dict =num_choices
def lowerCamelCase__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_: Union[str, Any] =None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE_: int =random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_: Any =None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_: Dict =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_: List[str] =BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_: Optional[Any] =config_and_inputs
SCREAMING_SNAKE_CASE_: List[str] ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowerCamelCase__ ( self : str ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_: Any =config_and_inputs
SCREAMING_SNAKE_CASE_: Any =True
SCREAMING_SNAKE_CASE_: List[str] =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
SCREAMING_SNAKE_CASE_: List[str] =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class a ( UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : Optional[Any] = True
UpperCamelCase : Any = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =FlaxBertModelTester(self )
@slow
def lowerCamelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =FlaxBertModel.from_pretrained("""bert-base-cased""" )
SCREAMING_SNAKE_CASE_: str =model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCAmelCase )
| 716
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
_UpperCAmelCase = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""DPTFeatureExtractor"""]
_UpperCAmelCase = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 36
| 0
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class a :
@staticmethod
def lowerCamelCase__ ( *lowerCAmelCase : List[Any] , **lowerCAmelCase : Any ) -> Union[str, Any]:
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class a ( unittest.TestCase ):
UpperCamelCase : int = MODEL_FOR_OBJECT_DETECTION_MAPPING
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : Tuple , lowerCAmelCase : List[str] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =ObjectDetectionPipeline(model=lowerCAmelCase , image_processor=lowerCAmelCase )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =object_detector("""./tests/fixtures/tests_samples/COCO/000000039769.png""" , threshold=0.0 )
self.assertGreater(len(lowerCAmelCase ) , 0 )
for detected_object in outputs:
self.assertEqual(
lowerCAmelCase , {
"""score""": ANY(lowerCAmelCase ),
"""label""": ANY(lowerCAmelCase ),
"""box""": {"""xmin""": ANY(lowerCAmelCase ), """ymin""": ANY(lowerCAmelCase ), """xmax""": ANY(lowerCAmelCase ), """ymax""": ANY(lowerCAmelCase )},
} , )
import datasets
SCREAMING_SNAKE_CASE_: Any =datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
SCREAMING_SNAKE_CASE_: str =[
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
]
SCREAMING_SNAKE_CASE_: Any =object_detector(lowerCAmelCase , threshold=0.0 )
self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) )
for outputs in batch_outputs:
self.assertGreater(len(lowerCAmelCase ) , 0 )
for detected_object in outputs:
self.assertEqual(
lowerCAmelCase , {
"""score""": ANY(lowerCAmelCase ),
"""label""": ANY(lowerCAmelCase ),
"""box""": {"""xmin""": ANY(lowerCAmelCase ), """ymin""": ANY(lowerCAmelCase ), """xmax""": ANY(lowerCAmelCase ), """ymax""": ANY(lowerCAmelCase )},
} , )
@require_tf
@unittest.skip("""Object detection not implemented in TF""" )
def lowerCamelCase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
pass
@require_torch
def lowerCamelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple ="""hf-internal-testing/tiny-detr-mobilenetsv3"""
SCREAMING_SNAKE_CASE_: Union[str, Any] =AutoModelForObjectDetection.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =AutoFeatureExtractor.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =ObjectDetectionPipeline(model=lowerCAmelCase , feature_extractor=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=0.0 )
self.assertEqual(
nested_simplify(lowerCAmelCase , decimals=4 ) , [
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
] , )
SCREAMING_SNAKE_CASE_: str =object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(lowerCAmelCase , decimals=4 ) , [
[
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
],
[
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3_3_7_6, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
],
] , )
@require_torch
@slow
def lowerCamelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""facebook/detr-resnet-50"""
SCREAMING_SNAKE_CASE_: Dict =AutoModelForObjectDetection.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =AutoFeatureExtractor.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =ObjectDetectionPipeline(model=lowerCAmelCase , feature_extractor=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(lowerCAmelCase , decimals=4 ) , [
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
SCREAMING_SNAKE_CASE_: str =object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(lowerCAmelCase , decimals=4 ) , [
[
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
[
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
] , )
@require_torch
@slow
def lowerCamelCase__ ( self : int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any ="""facebook/detr-resnet-50"""
SCREAMING_SNAKE_CASE_: int =pipeline("""object-detection""" , model=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(lowerCAmelCase , decimals=4 ) , [
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
SCREAMING_SNAKE_CASE_: Any =object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(lowerCAmelCase , decimals=4 ) , [
[
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
[
{"""score""": 0.9_9_8_2, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_9_6_0, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_9_5_5, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
] , )
@require_torch
@slow
def lowerCamelCase__ ( self : int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =0.9_9_8_5
SCREAMING_SNAKE_CASE_: Tuple ="""facebook/detr-resnet-50"""
SCREAMING_SNAKE_CASE_: str =pipeline("""object-detection""" , model=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=lowerCAmelCase )
self.assertEqual(
nested_simplify(lowerCAmelCase , decimals=4 ) , [
{"""score""": 0.9_9_8_8, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_9_8_7, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def lowerCamelCase__ ( self : Dict ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""Narsil/layoutlmv3-finetuned-funsd"""
SCREAMING_SNAKE_CASE_: Union[str, Any] =0.9_9_9_3
SCREAMING_SNAKE_CASE_: Any =pipeline("""object-detection""" , model=lowerCAmelCase , threshold=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =object_detector(
"""https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png""" )
self.assertEqual(
nested_simplify(lowerCAmelCase , decimals=4 ) , [
{"""score""": 0.9_9_9_3, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}},
{"""score""": 0.9_9_9_3, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}},
] , )
| 717
|
"""simple docstring"""
from __future__ import annotations
import math
import random
from typing import Any
class a :
def __init__( self : str ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: list[Any] =[]
SCREAMING_SNAKE_CASE_: int =0
SCREAMING_SNAKE_CASE_: int =0
def lowerCamelCase__ ( self : Optional[Any] ) -> bool:
'''simple docstring'''
return self.head == self.tail
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
self.data.append(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =self.tail + 1
def lowerCamelCase__ ( self : int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self.data[self.head]
SCREAMING_SNAKE_CASE_: Optional[int] =self.head + 1
return ret
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
return self.tail - self.head
def lowerCamelCase__ ( self : str ) -> None:
'''simple docstring'''
print(self.data )
print("""**************""" )
print(self.data[self.head : self.tail] )
class a :
def __init__( self : Union[str, Any] , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =data
SCREAMING_SNAKE_CASE_: MyNode | None =None
SCREAMING_SNAKE_CASE_: MyNode | None =None
SCREAMING_SNAKE_CASE_: int =1
def lowerCamelCase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
return self.data
def lowerCamelCase__ ( self : List[Any] ) -> MyNode | None:
'''simple docstring'''
return self.left
def lowerCamelCase__ ( self : Dict ) -> MyNode | None:
'''simple docstring'''
return self.right
def lowerCamelCase__ ( self : Any ) -> int:
'''simple docstring'''
return self.height
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =data
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : MyNode | None ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =node
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : MyNode | None ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =node
def lowerCamelCase__ ( self : int , lowerCAmelCase : int ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =height
def __magic_name__ ( lowercase ):
if node is None:
return 0
return node.get_height()
def __magic_name__ ( lowercase , lowercase ):
if a > b:
return a
return b
def __magic_name__ ( lowercase ):
print("""left rotation node:""" , node.get_data() )
SCREAMING_SNAKE_CASE_: List[Any] =node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(lowercase )
SCREAMING_SNAKE_CASE_: Tuple =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowercase )
SCREAMING_SNAKE_CASE_: int =my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowercase )
return ret
def __magic_name__ ( lowercase ):
print("""right rotation node:""" , node.get_data() )
SCREAMING_SNAKE_CASE_: List[Any] =node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(lowercase )
SCREAMING_SNAKE_CASE_: List[Any] =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowercase )
return ret
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any =node.get_left()
assert left_child is not None
node.set_left(left_rotation(lowercase ) )
return right_rotation(lowercase )
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Dict =node.get_right()
assert right_child is not None
node.set_right(right_rotation(lowercase ) )
return left_rotation(lowercase )
def __magic_name__ ( lowercase , lowercase ):
if node is None:
return MyNode(lowercase )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , lowercase ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
SCREAMING_SNAKE_CASE_: Union[str, Any] =node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
SCREAMING_SNAKE_CASE_: Any =right_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: List[Any] =lr_rotation(lowercase )
else:
node.set_right(insert_node(node.get_right() , lowercase ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
SCREAMING_SNAKE_CASE_: Tuple =node.get_right()
assert right_child is not None
if data < right_child.get_data():
SCREAMING_SNAKE_CASE_: Optional[Any] =rl_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: Optional[int] =left_rotation(lowercase )
SCREAMING_SNAKE_CASE_: Tuple =my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowercase )
return node
def __magic_name__ ( lowercase ):
while True:
SCREAMING_SNAKE_CASE_: Dict =root.get_right()
if right_child is None:
break
SCREAMING_SNAKE_CASE_: str =right_child
return root.get_data()
def __magic_name__ ( lowercase ):
while True:
SCREAMING_SNAKE_CASE_: str =root.get_left()
if left_child is None:
break
SCREAMING_SNAKE_CASE_: Dict =left_child
return root.get_data()
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: str =root.get_left()
SCREAMING_SNAKE_CASE_: List[Any] =root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
SCREAMING_SNAKE_CASE_: Union[str, Any] =get_left_most(lowercase )
root.set_data(lowercase )
root.set_right(del_node(lowercase , lowercase ) )
elif left_child is not None:
SCREAMING_SNAKE_CASE_: Optional[int] =left_child
elif right_child is not None:
SCREAMING_SNAKE_CASE_: Any =right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("""No such data""" )
return root
else:
root.set_left(del_node(lowercase , lowercase ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(lowercase , lowercase ) )
if get_height(lowercase ) - get_height(lowercase ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
SCREAMING_SNAKE_CASE_: Tuple =left_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =rl_rotation(lowercase )
elif get_height(lowercase ) - get_height(lowercase ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
SCREAMING_SNAKE_CASE_: Optional[Any] =right_rotation(lowercase )
else:
SCREAMING_SNAKE_CASE_: str =lr_rotation(lowercase )
SCREAMING_SNAKE_CASE_: str =my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(lowercase )
return root
class a :
def __init__( self : int ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: MyNode | None =None
def lowerCamelCase__ ( self : List[Any] ) -> int:
'''simple docstring'''
return get_height(self.root )
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
print("""insert:""" + str(lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: Tuple =insert_node(self.root , lowerCAmelCase )
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : Any ) -> None:
'''simple docstring'''
print("""delete:""" + str(lowerCAmelCase ) )
if self.root is None:
print("""Tree is empty!""" )
return
SCREAMING_SNAKE_CASE_: Union[str, Any] =del_node(self.root , lowerCAmelCase )
def __str__( self : List[str] , ) -> str: # a level traversale, gives a more intuitive look on the tree
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =""""""
SCREAMING_SNAKE_CASE_: str =MyQueue()
q.push(self.root )
SCREAMING_SNAKE_CASE_: List[str] =self.get_height()
if layer == 0:
return output
SCREAMING_SNAKE_CASE_: int =0
while not q.is_empty():
SCREAMING_SNAKE_CASE_: int =q.pop()
SCREAMING_SNAKE_CASE_: List[Any] =""" """ * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(lowerCAmelCase )
q.push(lowerCAmelCase )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
SCREAMING_SNAKE_CASE_: List[Any] =cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , lowerCAmelCase ) - 1:
SCREAMING_SNAKE_CASE_: int =layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def __magic_name__ ( ):
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
_UpperCAmelCase = AVLtree()
_UpperCAmelCase = list(range(1_0))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 36
| 0
|
"""simple docstring"""
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
_lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class a ( UpperCAmelCase__ ):
def __init__( self : int , lowerCAmelCase : CLIPSegForImageSegmentation , lowerCAmelCase : CLIPSegProcessor , lowerCAmelCase : AutoencoderKL , lowerCAmelCase : CLIPTextModel , lowerCAmelCase : CLIPTokenizer , lowerCAmelCase : UNetaDConditionModel , lowerCAmelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCAmelCase : StableDiffusionSafetyChecker , lowerCAmelCase : CLIPImageProcessor , ) -> Any:
'''simple docstring'''
super().__init__()
if hasattr(scheduler.config , """steps_offset""" ) and scheduler.config.steps_offset != 1:
SCREAMING_SNAKE_CASE_: Optional[int] =(
f'''The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'''
f''' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '''
"""to update the config accordingly as leaving `steps_offset` might led to incorrect results"""
""" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"""
""" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"""
""" file"""
)
deprecate("""steps_offset!=1""" , """1.0.0""" , lowerCAmelCase , standard_warn=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =dict(scheduler.config )
SCREAMING_SNAKE_CASE_: int =1
SCREAMING_SNAKE_CASE_: Any =FrozenDict(lowerCAmelCase )
if hasattr(scheduler.config , """skip_prk_steps""" ) and scheduler.config.skip_prk_steps is False:
SCREAMING_SNAKE_CASE_: int =(
f'''The configuration file of this scheduler: {scheduler} has not set the configuration'''
""" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"""
""" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"""
""" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"""
""" Hub, it would be very nice if you could open a Pull request for the"""
""" `scheduler/scheduler_config.json` file"""
)
deprecate("""skip_prk_steps not set""" , """1.0.0""" , lowerCAmelCase , standard_warn=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =dict(scheduler.config )
SCREAMING_SNAKE_CASE_: Any =True
SCREAMING_SNAKE_CASE_: List[Any] =FrozenDict(lowerCAmelCase )
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
segmentation_model=lowerCAmelCase , segmentation_processor=lowerCAmelCase , vae=lowerCAmelCase , text_encoder=lowerCAmelCase , tokenizer=lowerCAmelCase , unet=lowerCAmelCase , scheduler=lowerCAmelCase , safety_checker=lowerCAmelCase , feature_extractor=lowerCAmelCase , )
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : Optional[Union[str, int]] = "auto" ) -> Union[str, Any]:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
SCREAMING_SNAKE_CASE_: Dict =self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCAmelCase )
def lowerCamelCase__ ( self : str ) -> List[Any]:
'''simple docstring'''
self.enable_attention_slicing(lowerCAmelCase )
def lowerCamelCase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
SCREAMING_SNAKE_CASE_: List[Any] =torch.device("""cuda""" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(lowerCAmelCase , lowerCAmelCase )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCamelCase__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
if self.device != torch.device("""meta""" ) or not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCAmelCase , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Tuple , lowerCAmelCase : Union[str, List[str]] , lowerCAmelCase : Union[torch.FloatTensor, PIL.Image.Image] , lowerCAmelCase : str , lowerCAmelCase : int = 512 , lowerCAmelCase : int = 512 , lowerCAmelCase : int = 50 , lowerCAmelCase : float = 7.5 , lowerCAmelCase : Optional[Union[str, List[str]]] = None , lowerCAmelCase : Optional[int] = 1 , lowerCAmelCase : float = 0.0 , lowerCAmelCase : Optional[torch.Generator] = None , lowerCAmelCase : Optional[torch.FloatTensor] = None , lowerCAmelCase : Optional[str] = "pil" , lowerCAmelCase : bool = True , lowerCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCAmelCase : int = 1 , **lowerCAmelCase : str , ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =self.segmentation_processor(
text=[text] , images=[image] , padding="""max_length""" , return_tensors="""pt""" ).to(self.device )
SCREAMING_SNAKE_CASE_: List[str] =self.segmentation_model(**lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
SCREAMING_SNAKE_CASE_: Any =self.numpy_to_pil(lowerCAmelCase )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
SCREAMING_SNAKE_CASE_: List[Any] =StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=lowerCAmelCase , image=lowerCAmelCase , mask_image=lowerCAmelCase , height=lowerCAmelCase , width=lowerCAmelCase , num_inference_steps=lowerCAmelCase , guidance_scale=lowerCAmelCase , negative_prompt=lowerCAmelCase , num_images_per_prompt=lowerCAmelCase , eta=lowerCAmelCase , generator=lowerCAmelCase , latents=lowerCAmelCase , output_type=lowerCAmelCase , return_dict=lowerCAmelCase , callback=lowerCAmelCase , callback_steps=lowerCAmelCase , )
| 718
|
"""simple docstring"""
import string
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =""""""
for i in sequence:
SCREAMING_SNAKE_CASE_: List[Any] =ord(lowercase )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any =string.ascii_letters
SCREAMING_SNAKE_CASE_: Tuple =string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(lowercase )] if c in letters else c for c in sequence )
def __magic_name__ ( ):
from timeit import timeit
print("""Running performance benchmarks...""" )
SCREAMING_SNAKE_CASE_: int ="""from string import printable ; from __main__ import atbash, atbash_slow"""
print(f'''> atbash_slow(): {timeit("atbash_slow(printable)" , setup=lowercase )} seconds''' )
print(f'''> atbash(): {timeit("atbash(printable)" , setup=lowercase )} seconds''' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(f"""{example} encrypted in atbash: {atbash(example)}""")
benchmark()
| 36
| 0
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCamelCase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =1
SCREAMING_SNAKE_CASE_: Dict =3
SCREAMING_SNAKE_CASE_: List[Any] =(32, 32)
SCREAMING_SNAKE_CASE_: List[str] =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCAmelCase )
return image
@property
def lowerCamelCase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: str =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def lowerCamelCase__ ( self : Dict ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: Optional[Any] =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def lowerCamelCase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: str =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(lowerCAmelCase )
@property
def lowerCamelCase__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
def extract(*lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Any ):
class a :
def __init__( self : List[str] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =torch.ones([0] )
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : Optional[Any] ) -> List[Any]:
'''simple docstring'''
self.pixel_values.to(lowerCAmelCase )
return self
return Out()
return extract
def lowerCamelCase__ ( self : int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] ="""cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_: List[Any] =self.dummy_cond_unet
SCREAMING_SNAKE_CASE_: Tuple =DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
SCREAMING_SNAKE_CASE_: int =self.dummy_vae
SCREAMING_SNAKE_CASE_: List[Any] =self.dummy_text_encoder
SCREAMING_SNAKE_CASE_: int =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE_: Any =StableDiffusionPipeline(
unet=lowerCAmelCase , scheduler=lowerCAmelCase , vae=lowerCAmelCase , text_encoder=lowerCAmelCase , tokenizer=lowerCAmelCase , safety_checker=lowerCAmelCase , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE_: List[Any] =sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any ="""A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE_: Union[str, Any] =torch.Generator(device=lowerCAmelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE_: Optional[int] =sd_pipe([prompt] , generator=lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
SCREAMING_SNAKE_CASE_: Optional[int] =output.images
SCREAMING_SNAKE_CASE_: Tuple =torch.Generator(device=lowerCAmelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE_: Dict =sd_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=lowerCAmelCase , )[0]
SCREAMING_SNAKE_CASE_: Union[str, Any] =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_: Optional[Any] =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_: Tuple =np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] ="""cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_: int =self.dummy_cond_unet
SCREAMING_SNAKE_CASE_: str =PNDMScheduler(skip_prk_steps=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =self.dummy_vae
SCREAMING_SNAKE_CASE_: int =self.dummy_text_encoder
SCREAMING_SNAKE_CASE_: Optional[Any] =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE_: int =StableDiffusionPipeline(
unet=lowerCAmelCase , scheduler=lowerCAmelCase , vae=lowerCAmelCase , text_encoder=lowerCAmelCase , tokenizer=lowerCAmelCase , safety_checker=lowerCAmelCase , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE_: Optional[Any] =sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any ="""A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE_: Any =torch.Generator(device=lowerCAmelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE_: Tuple =sd_pipe([prompt] , generator=lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
SCREAMING_SNAKE_CASE_: Dict =output.images
SCREAMING_SNAKE_CASE_: Optional[int] =torch.Generator(device=lowerCAmelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE_: Tuple =sd_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=lowerCAmelCase , )[0]
SCREAMING_SNAKE_CASE_: Tuple =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_: str =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_: Dict =np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" , safety_checker=lowerCAmelCase )
assert isinstance(lowerCAmelCase , lowerCAmelCase )
assert isinstance(pipe.scheduler , lowerCAmelCase )
assert pipe.safety_checker is None
SCREAMING_SNAKE_CASE_: Union[str, Any] =pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =StableDiffusionPipeline.from_pretrained(lowerCAmelCase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
SCREAMING_SNAKE_CASE_: Dict =pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =self.dummy_cond_unet
SCREAMING_SNAKE_CASE_: List[str] =PNDMScheduler(skip_prk_steps=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =self.dummy_vae
SCREAMING_SNAKE_CASE_: List[Any] =self.dummy_text_encoder
SCREAMING_SNAKE_CASE_: Tuple =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
SCREAMING_SNAKE_CASE_: str =unet.half()
SCREAMING_SNAKE_CASE_: Dict =vae.half()
SCREAMING_SNAKE_CASE_: int =bert.half()
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE_: List[Any] =StableDiffusionPipeline(
unet=lowerCAmelCase , scheduler=lowerCAmelCase , vae=lowerCAmelCase , text_encoder=lowerCAmelCase , tokenizer=lowerCAmelCase , safety_checker=lowerCAmelCase , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE_: Optional[int] =sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE_: Optional[Any] =sd_pipe([prompt] , num_inference_steps=2 , output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : Any ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : Any ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
SCREAMING_SNAKE_CASE_: Union[str, Any] =sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =(
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
SCREAMING_SNAKE_CASE_: str =40_0366_0346
SCREAMING_SNAKE_CASE_: Dict =7
# without safety guidance (sld_guidance_scale = 0)
SCREAMING_SNAKE_CASE_: Tuple =torch.manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =sd_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=lowerCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
SCREAMING_SNAKE_CASE_: Tuple =output.images
SCREAMING_SNAKE_CASE_: int =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_: Tuple =[0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
SCREAMING_SNAKE_CASE_: Dict =torch.manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =sd_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=lowerCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
SCREAMING_SNAKE_CASE_: Optional[Any] =output.images
SCREAMING_SNAKE_CASE_: Optional[Any] =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_: Optional[Any] =[0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self : str ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
SCREAMING_SNAKE_CASE_: Any =sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str ="""padme amidala taking a bath artwork, safe for work, no nudity"""
SCREAMING_SNAKE_CASE_: int =27_3497_1755
SCREAMING_SNAKE_CASE_: Union[str, Any] =7
SCREAMING_SNAKE_CASE_: Union[str, Any] =torch.manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =sd_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=lowerCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
SCREAMING_SNAKE_CASE_: List[str] =output.images
SCREAMING_SNAKE_CASE_: Optional[Any] =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_: List[str] =[0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
SCREAMING_SNAKE_CASE_: Dict =torch.manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =sd_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=lowerCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
SCREAMING_SNAKE_CASE_: Optional[int] =output.images
SCREAMING_SNAKE_CASE_: List[str] =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_: Dict =[0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
SCREAMING_SNAKE_CASE_: Any =sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =(
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
SCREAMING_SNAKE_CASE_: Optional[Any] =10_4435_5234
SCREAMING_SNAKE_CASE_: Tuple =12
SCREAMING_SNAKE_CASE_: Any =torch.manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =sd_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=lowerCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
SCREAMING_SNAKE_CASE_: List[Any] =output.images
SCREAMING_SNAKE_CASE_: str =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_: Dict =np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
SCREAMING_SNAKE_CASE_: Any =torch.manual_seed(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =sd_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=lowerCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
SCREAMING_SNAKE_CASE_: Union[str, Any] =output.images
SCREAMING_SNAKE_CASE_: int =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_: List[str] =np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 719
|
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class a :
def __init__( self : Union[str, Any] , lowerCAmelCase : List[str]=2 , lowerCAmelCase : int=3 , lowerCAmelCase : Optional[Any]=64 , lowerCAmelCase : Union[str, Any]=None ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =np.random.default_rng(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =length
SCREAMING_SNAKE_CASE_: Union[str, Any] =rng.normal(size=(length,) ).astype(np.floataa )
SCREAMING_SNAKE_CASE_: Tuple =a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : List[Any] ) -> str:
'''simple docstring'''
return self.length
def __getitem__( self : Union[str, Any] , lowerCAmelCase : Any ) -> List[str]:
'''simple docstring'''
return {"x": self.x[i], "y": self.y[i]}
class a ( torch.nn.Module ):
def __init__( self : Optional[int] , lowerCAmelCase : str=0 , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : Optional[int]=False ) -> Tuple:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_: int =torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE_: int =torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE_: Dict =True
def lowerCamelCase__ ( self : str , lowerCAmelCase : Tuple=None ) -> int:
'''simple docstring'''
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
SCREAMING_SNAKE_CASE_: Union[str, Any] =False
return x * self.a[0] + self.b[0]
class a ( torch.nn.Module ):
def __init__( self : Union[str, Any] , lowerCAmelCase : Any=0 , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : List[Any]=False ) -> str:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_: List[str] =torch.nn.Parameter(torch.tensor(lowerCAmelCase ).float() )
SCREAMING_SNAKE_CASE_: Optional[int] =torch.nn.Parameter(torch.tensor(lowerCAmelCase ).float() )
SCREAMING_SNAKE_CASE_: List[Any] =True
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : int=None ) -> Any:
'''simple docstring'''
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
SCREAMING_SNAKE_CASE_: Optional[int] =False
return x * self.a + self.b
def __magic_name__ ( lowercase , lowercase = 16 ):
from datasets import load_dataset
from transformers import AutoTokenizer
SCREAMING_SNAKE_CASE_: Optional[Any] =AutoTokenizer.from_pretrained("""bert-base-cased""" )
SCREAMING_SNAKE_CASE_: Optional[int] ={"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""}
SCREAMING_SNAKE_CASE_: Any =load_dataset("""csv""" , data_files=lowercase )
SCREAMING_SNAKE_CASE_: Any =datasets["""train"""].unique("""label""" )
SCREAMING_SNAKE_CASE_: List[Any] ={v: i for i, v in enumerate(lowercase )}
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_: Dict =tokenizer(
examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase , max_length=lowercase , padding="""max_length""" )
if "label" in examples:
SCREAMING_SNAKE_CASE_: Optional[int] =[label_to_id[l] for l in examples["""label"""]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE_: List[Any] =datasets.map(
lowercase , batched=lowercase , remove_columns=["""sentence1""", """sentence2""", """label"""] , )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(lowercase , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_: Optional[int] =DataLoader(tokenized_datasets["""train"""] , shuffle=lowercase , collate_fn=lowercase , batch_size=2 )
SCREAMING_SNAKE_CASE_: Dict =DataLoader(tokenized_datasets["""validation"""] , shuffle=lowercase , collate_fn=lowercase , batch_size=1 )
return train_dataloader, eval_dataloader
| 36
| 0
|
"""simple docstring"""
from math import pi, sqrt
def __magic_name__ ( lowercase ):
if num <= 0:
raise ValueError("""math domain error""" )
if num > 171.5:
raise OverflowError("""math range error""" )
elif num - int(lowercase ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(lowercase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def __magic_name__ ( ):
assert gamma(0.5 ) == sqrt(lowercase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
_UpperCAmelCase = 1.0
while num:
_UpperCAmelCase = float(input("""Gamma of: """))
print(f"""gamma({num}) = {gamma(num)}""")
print("""\nEnter 0 to exit...""")
| 720
|
"""simple docstring"""
def __magic_name__ ( lowercase ):
if upper_limit < 0:
raise ValueError("""Limit for the Catalan sequence must be ≥ 0""" )
SCREAMING_SNAKE_CASE_: Tuple =[0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
SCREAMING_SNAKE_CASE_: Any =1
if upper_limit > 0:
SCREAMING_SNAKE_CASE_: List[str] =1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(lowercase ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""")
print("""\n*** Enter -1 at any time to quit ***""")
print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""")
try:
while True:
_UpperCAmelCase = int(input().strip())
if N < 0:
print("""\n********* Goodbye!! ************""")
break
else:
print(f"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print("""Try another upper limit for the sequence: """, end="""""")
except (NameError, ValueError):
print("""\n********* Invalid input, goodbye! ************\n""")
import doctest
doctest.testmod()
| 36
| 0
|
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger("""transformers.models.speecht5""")
def __magic_name__ ( lowercase , lowercase , lowercase ):
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE_: Tuple =checkpoint["""input_conv.weight_g"""]
SCREAMING_SNAKE_CASE_: Optional[int] =checkpoint["""input_conv.weight_v"""]
SCREAMING_SNAKE_CASE_: Optional[int] =checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''upsamples.{i}.1.weight_g''']
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''upsamples.{i}.1.weight_v''']
SCREAMING_SNAKE_CASE_: List[str] =checkpoint[f'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE_: Dict =checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_g''']
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_v''']
SCREAMING_SNAKE_CASE_: Union[str, Any] =checkpoint[f'''blocks.{i}.convs1.{j}.1.bias''']
SCREAMING_SNAKE_CASE_: Dict =checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_g''']
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_v''']
SCREAMING_SNAKE_CASE_: List[Any] =checkpoint[f'''blocks.{i}.convs2.{j}.1.bias''']
SCREAMING_SNAKE_CASE_: Tuple =checkpoint["""output_conv.1.weight_g"""]
SCREAMING_SNAKE_CASE_: List[str] =checkpoint["""output_conv.1.weight_v"""]
SCREAMING_SNAKE_CASE_: Optional[int] =checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase=None , lowercase=None , ):
if config_path is not None:
SCREAMING_SNAKE_CASE_: List[Any] =SpeechTaHifiGanConfig.from_pretrained(lowercase )
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE_: Union[str, Any] =SpeechTaHifiGan(lowercase )
SCREAMING_SNAKE_CASE_: Any =torch.load(lowercase )
load_weights(orig_checkpoint["""model"""]["""generator"""] , lowercase , lowercase )
SCREAMING_SNAKE_CASE_: List[Any] =np.load(lowercase )
SCREAMING_SNAKE_CASE_: Any =stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE_: str =stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE_: Dict =torch.from_numpy(lowercase ).float()
SCREAMING_SNAKE_CASE_: Dict =torch.from_numpy(lowercase ).float()
model.save_pretrained(lowercase )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
_UpperCAmelCase = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 721
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
_UpperCAmelCase = {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""",
}
class a ( UpperCAmelCase__ ):
UpperCamelCase : Any = 'albert'
def __init__( self : Dict , lowerCAmelCase : List[str]=3_0000 , lowerCAmelCase : List[Any]=128 , lowerCAmelCase : List[str]=4096 , lowerCAmelCase : str=12 , lowerCAmelCase : str=1 , lowerCAmelCase : Tuple=64 , lowerCAmelCase : Dict=1_6384 , lowerCAmelCase : int=1 , lowerCAmelCase : str="gelu_new" , lowerCAmelCase : Dict=0 , lowerCAmelCase : Optional[Any]=0 , lowerCAmelCase : str=512 , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : List[Any]=0.0_2 , lowerCAmelCase : Union[str, Any]=1E-12 , lowerCAmelCase : Tuple=0.1 , lowerCAmelCase : List[Any]="absolute" , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : int=2 , lowerCAmelCase : Optional[int]=3 , **lowerCAmelCase : int , ) -> Tuple:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =vocab_size
SCREAMING_SNAKE_CASE_: Optional[int] =embedding_size
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_size
SCREAMING_SNAKE_CASE_: Tuple =num_hidden_layers
SCREAMING_SNAKE_CASE_: Any =num_hidden_groups
SCREAMING_SNAKE_CASE_: List[Any] =num_attention_heads
SCREAMING_SNAKE_CASE_: List[Any] =inner_group_num
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_act
SCREAMING_SNAKE_CASE_: int =intermediate_size
SCREAMING_SNAKE_CASE_: Any =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Union[str, Any] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: int =max_position_embeddings
SCREAMING_SNAKE_CASE_: Any =type_vocab_size
SCREAMING_SNAKE_CASE_: int =initializer_range
SCREAMING_SNAKE_CASE_: List[Any] =layer_norm_eps
SCREAMING_SNAKE_CASE_: Dict =classifier_dropout_prob
SCREAMING_SNAKE_CASE_: int =position_embedding_type
class a ( UpperCAmelCase__ ):
@property
def lowerCamelCase__ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_: str ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
SCREAMING_SNAKE_CASE_: Dict ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 36
| 0
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: str =SwinConfig(image_size=192 )
if "base" in model_name:
SCREAMING_SNAKE_CASE_: str =6
SCREAMING_SNAKE_CASE_: Any =128
SCREAMING_SNAKE_CASE_: Optional[Any] =(2, 2, 18, 2)
SCREAMING_SNAKE_CASE_: Any =(4, 8, 16, 32)
elif "large" in model_name:
SCREAMING_SNAKE_CASE_: Optional[int] =12
SCREAMING_SNAKE_CASE_: Optional[int] =192
SCREAMING_SNAKE_CASE_: int =(2, 2, 18, 2)
SCREAMING_SNAKE_CASE_: Any =(6, 12, 24, 48)
else:
raise ValueError("""Model not supported, only supports base and large variants""" )
SCREAMING_SNAKE_CASE_: Optional[Any] =window_size
SCREAMING_SNAKE_CASE_: Union[str, Any] =embed_dim
SCREAMING_SNAKE_CASE_: Tuple =depths
SCREAMING_SNAKE_CASE_: str =num_heads
return config
def __magic_name__ ( lowercase ):
if "encoder.mask_token" in name:
SCREAMING_SNAKE_CASE_: int =name.replace("""encoder.mask_token""" , """embeddings.mask_token""" )
if "encoder.patch_embed.proj" in name:
SCREAMING_SNAKE_CASE_: Optional[Any] =name.replace("""encoder.patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "encoder.patch_embed.norm" in name:
SCREAMING_SNAKE_CASE_: Tuple =name.replace("""encoder.patch_embed.norm""" , """embeddings.norm""" )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE_: Tuple =name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
SCREAMING_SNAKE_CASE_: Optional[int] =name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
SCREAMING_SNAKE_CASE_: Dict =name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
SCREAMING_SNAKE_CASE_: Optional[Any] =name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE_: List[Any] =name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE_: Tuple =name.replace("""mlp.fc2""" , """output.dense""" )
if name == "encoder.norm.weight":
SCREAMING_SNAKE_CASE_: List[str] ="""layernorm.weight"""
if name == "encoder.norm.bias":
SCREAMING_SNAKE_CASE_: int ="""layernorm.bias"""
if "decoder" in name:
pass
else:
SCREAMING_SNAKE_CASE_: str ="""swin.""" + name
return name
def __magic_name__ ( lowercase , lowercase ):
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE_: int =orig_state_dict.pop(lowercase )
if "attn_mask" in key:
pass
elif "qkv" in key:
SCREAMING_SNAKE_CASE_: Dict =key.split(""".""" )
SCREAMING_SNAKE_CASE_: List[Any] =int(key_split[2] )
SCREAMING_SNAKE_CASE_: Tuple =int(key_split[4] )
SCREAMING_SNAKE_CASE_: Optional[Any] =model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
SCREAMING_SNAKE_CASE_: Any =val[:dim, :]
SCREAMING_SNAKE_CASE_: Any =val[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE_: Dict =val[-dim:, :]
else:
SCREAMING_SNAKE_CASE_: Tuple =val[
:dim
]
SCREAMING_SNAKE_CASE_: Optional[Any] =val[
dim : dim * 2
]
SCREAMING_SNAKE_CASE_: Union[str, Any] =val[
-dim:
]
else:
SCREAMING_SNAKE_CASE_: str =val
return orig_state_dict
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =torch.load(lowercase , map_location="""cpu""" )["""model"""]
SCREAMING_SNAKE_CASE_: Tuple =get_swin_config(lowercase )
SCREAMING_SNAKE_CASE_: str =SwinForMaskedImageModeling(lowercase )
model.eval()
SCREAMING_SNAKE_CASE_: Tuple =convert_state_dict(lowercase , lowercase )
model.load_state_dict(lowercase )
SCREAMING_SNAKE_CASE_: Any ="""http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE_: Dict =ViTImageProcessor(size={"""height""": 192, """width""": 192} )
SCREAMING_SNAKE_CASE_: int =Image.open(requests.get(lowercase , stream=lowercase ).raw )
SCREAMING_SNAKE_CASE_: Tuple =image_processor(images=lowercase , return_tensors="""pt""" )
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Any =model(**lowercase ).logits
print(outputs.keys() )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase )
if push_to_hub:
print(f'''Pushing model and image processor for {model_name} to hub''' )
model.push_to_hub(f'''microsoft/{model_name}''' )
image_processor.push_to_hub(f'''microsoft/{model_name}''' )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""swin-base-simmim-window6-192""",
type=str,
choices=["""swin-base-simmim-window6-192""", """swin-large-simmim-window12-192"""],
help="""Name of the Swin SimMIM model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth""",
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_UpperCAmelCase = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 700
|
"""simple docstring"""
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class a ( yaml.SafeLoader ):
def lowerCamelCase__ ( self : int , lowerCAmelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =[self.constructed_objects[key_node] for key_node, _ in node.value]
SCREAMING_SNAKE_CASE_: Any =[tuple(lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else key for key in keys]
SCREAMING_SNAKE_CASE_: Dict =Counter(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =[key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f'''Got duplicate yaml keys: {duplicate_keys}''' )
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : str , lowerCAmelCase : Optional[int]=False ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =super().construct_mapping(lowerCAmelCase , deep=lowerCAmelCase )
self._check_no_duplicates_on_constructed_node(lowerCAmelCase )
return mapping
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Dict =list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
SCREAMING_SNAKE_CASE_: Union[str, Any] =full_content[1:].index("""---""" ) + 1
SCREAMING_SNAKE_CASE_: List[str] ="""\n""".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(lowercase )
class a ( UpperCAmelCase__ ):
# class attributes
UpperCamelCase : Tuple = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def lowerCamelCase__ ( cls : List[Any] , lowerCAmelCase : Path ) -> "DatasetMetadata":
'''simple docstring'''
with open(lowerCAmelCase , encoding="""utf-8""" ) as readme_file:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] =_split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(lowerCAmelCase )
else:
return cls()
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Path ) -> List[str]:
'''simple docstring'''
if path.exists():
with open(lowerCAmelCase , encoding="""utf-8""" ) as readme_file:
SCREAMING_SNAKE_CASE_: str =readme_file.read()
else:
SCREAMING_SNAKE_CASE_: str =None
SCREAMING_SNAKE_CASE_: Tuple =self._to_readme(lowerCAmelCase )
with open(lowerCAmelCase , """w""" , encoding="""utf-8""" ) as readme_file:
readme_file.write(lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : Optional[str] = None ) -> str:
'''simple docstring'''
if readme_content is not None:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] =_split_yaml_from_readme(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] ="""---\n""" + self.to_yaml_string() + """---\n""" + content
else:
SCREAMING_SNAKE_CASE_: List[Any] ="""---\n""" + self.to_yaml_string() + """---\n"""
return full_content
@classmethod
def lowerCamelCase__ ( cls : Optional[int] , lowerCAmelCase : str ) -> "DatasetMetadata":
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =yaml.load(lowerCAmelCase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
SCREAMING_SNAKE_CASE_: List[Any] ={
(key.replace("""-""" , """_""" ) if key.replace("""-""" , """_""" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**lowerCAmelCase )
def lowerCamelCase__ ( self : Dict ) -> str:
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace("""_""" , """-""" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=lowerCAmelCase , allow_unicode=lowerCAmelCase , encoding="""utf-8""" , ).decode("""utf-8""" )
_UpperCAmelCase = {
"""image-classification""": [],
"""translation""": [],
"""image-segmentation""": [],
"""fill-mask""": [],
"""automatic-speech-recognition""": [],
"""token-classification""": [],
"""sentence-similarity""": [],
"""audio-classification""": [],
"""question-answering""": [],
"""summarization""": [],
"""zero-shot-classification""": [],
"""table-to-text""": [],
"""feature-extraction""": [],
"""other""": [],
"""multiple-choice""": [],
"""text-classification""": [],
"""text-to-image""": [],
"""text2text-generation""": [],
"""zero-shot-image-classification""": [],
"""tabular-classification""": [],
"""tabular-regression""": [],
"""image-to-image""": [],
"""tabular-to-text""": [],
"""unconditional-image-generation""": [],
"""text-retrieval""": [],
"""text-to-speech""": [],
"""object-detection""": [],
"""audio-to-audio""": [],
"""text-generation""": [],
"""conversational""": [],
"""table-question-answering""": [],
"""visual-question-answering""": [],
"""image-to-text""": [],
"""reinforcement-learning""": [],
"""voice-activity-detection""": [],
"""time-series-forecasting""": [],
"""document-question-answering""": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
_UpperCAmelCase = ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""")
ap.add_argument("""readme_filepath""")
_UpperCAmelCase = ap.parse_args()
_UpperCAmelCase = Path(args.readme_filepath)
_UpperCAmelCase = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 36
| 0
|
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class a ( UpperCAmelCase__ ):
UpperCamelCase : Any = ''
UpperCamelCase : Dict = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self : Any , lowerCAmelCase : Optional[DatasetInfo] = None , lowerCAmelCase : Optional[str] = None , **lowerCAmelCase : Union[str, Any] , ) -> Dict:
'''simple docstring'''
super().__init__(self , **lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =repo_info
SCREAMING_SNAKE_CASE_: Union[str, Any] =token
SCREAMING_SNAKE_CASE_: int =None
def lowerCamelCase__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
if self.dir_cache is None:
SCREAMING_SNAKE_CASE_: List[str] ={}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
SCREAMING_SNAKE_CASE_: Tuple ={
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(lowerCAmelCase ): {"""name""": str(lowerCAmelCase ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : str , lowerCAmelCase : str = "rb" , **lowerCAmelCase : str , ) -> List[str]:
'''simple docstring'''
if not isinstance(self.repo_info , lowerCAmelCase ):
raise NotImplementedError(f'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
SCREAMING_SNAKE_CASE_: Optional[Any] =hf_hub_url(self.repo_info.id , lowerCAmelCase , revision=self.repo_info.sha )
return fsspec.open(
lowerCAmelCase , mode=lowerCAmelCase , headers=get_authentication_headers_for_url(lowerCAmelCase , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Tuple , **lowerCAmelCase : int ) -> Optional[Any]:
'''simple docstring'''
self._get_dirs()
SCREAMING_SNAKE_CASE_: str =self._strip_protocol(lowerCAmelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(lowerCAmelCase )
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple=False , **lowerCAmelCase : Optional[Any] ) -> Tuple:
'''simple docstring'''
self._get_dirs()
SCREAMING_SNAKE_CASE_: int =PurePosixPath(path.strip("""/""" ) )
SCREAMING_SNAKE_CASE_: Optional[Any] ={}
for p, f in self.dir_cache.items():
SCREAMING_SNAKE_CASE_: int =PurePosixPath(p.strip("""/""" ) )
SCREAMING_SNAKE_CASE_: Any =p.parent
if root == path:
SCREAMING_SNAKE_CASE_: Optional[Any] =f
SCREAMING_SNAKE_CASE_: str =list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out )
| 701
|
"""simple docstring"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def __magic_name__ ( lowercase ):
return (data["data"], data["target"])
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Dict =XGBClassifier()
classifier.fit(lowercase , lowercase )
return classifier
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Optional[Any] =load_iris()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =data_handling(lowercase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =train_test_split(
lowercase , lowercase , test_size=0.25 )
SCREAMING_SNAKE_CASE_: Tuple =iris["""target_names"""]
# Create an XGBoost Classifier from the training data
SCREAMING_SNAKE_CASE_: Optional[int] =xgboost(lowercase , lowercase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
lowercase , lowercase , lowercase , display_labels=lowercase , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 36
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_UpperCAmelCase = {"""tokenization_bertweet""": ["""BertweetTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 702
|
"""simple docstring"""
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =[]
SCREAMING_SNAKE_CASE_: List[str] =[]
SCREAMING_SNAKE_CASE_: Any =[]
for rt in rc.restypes:
SCREAMING_SNAKE_CASE_: Optional[int] =rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
SCREAMING_SNAKE_CASE_: Any ={name: i for i, name in enumerate(lowercase )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =torch.tensor(
lowercase , dtype=torch.intaa , device=protein["""aatype"""].device , )
SCREAMING_SNAKE_CASE_: Optional[Any] =torch.tensor(
lowercase , dtype=torch.intaa , device=protein["""aatype"""].device , )
SCREAMING_SNAKE_CASE_: List[str] =torch.tensor(
lowercase , dtype=torch.floataa , device=protein["""aatype"""].device , )
SCREAMING_SNAKE_CASE_: Optional[Any] =protein["""aatype"""].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
SCREAMING_SNAKE_CASE_: List[str] =restype_atomaa_to_atomaa[protein_aatype]
SCREAMING_SNAKE_CASE_: Any =restype_atomaa_mask[protein_aatype]
SCREAMING_SNAKE_CASE_: Tuple =residx_atomaa_mask
SCREAMING_SNAKE_CASE_: Dict =residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
SCREAMING_SNAKE_CASE_: Dict =restype_atomaa_to_atomaa[protein_aatype]
SCREAMING_SNAKE_CASE_: Optional[int] =residx_atomaa_to_atomaa.long()
# create the corresponding mask
SCREAMING_SNAKE_CASE_: Optional[int] =torch.zeros([21, 37] , dtype=torch.floataa , device=protein["""aatype"""].device )
for restype, restype_letter in enumerate(rc.restypes ):
SCREAMING_SNAKE_CASE_: int =rc.restype_atoa[restype_letter]
SCREAMING_SNAKE_CASE_: Any =rc.residue_atoms[restype_name]
for atom_name in atom_names:
SCREAMING_SNAKE_CASE_: Optional[int] =rc.atom_order[atom_name]
SCREAMING_SNAKE_CASE_: Dict =1
SCREAMING_SNAKE_CASE_: List[str] =restype_atomaa_mask[protein_aatype]
SCREAMING_SNAKE_CASE_: List[Any] =residx_atomaa_mask
return protein
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Any =tree_map(lambda lowercase : torch.tensor(lowercase , device=batch["""aatype"""].device ) , lowercase , np.ndarray )
SCREAMING_SNAKE_CASE_: int =tensor_tree_map(lambda lowercase : np.array(lowercase ) , make_atomaa_masks(lowercase ) )
return out
| 36
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_UpperCAmelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class a ( UpperCAmelCase__ ):
UpperCamelCase : int = ['pixel_values']
def __init__( self : Optional[Any] , lowerCAmelCase : bool = True , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase : bool = True , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : bool = True , lowerCAmelCase : Union[int, float] = 1 / 255 , lowerCAmelCase : bool = True , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : bool = True , **lowerCAmelCase : Dict , ) -> None:
'''simple docstring'''
super().__init__(**lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =size if size is not None else {"""shortest_edge""": 224}
SCREAMING_SNAKE_CASE_: Union[str, Any] =get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
SCREAMING_SNAKE_CASE_: str =get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase , param_name="""crop_size""" )
SCREAMING_SNAKE_CASE_: Dict =do_resize
SCREAMING_SNAKE_CASE_: List[Any] =size
SCREAMING_SNAKE_CASE_: Dict =resample
SCREAMING_SNAKE_CASE_: str =do_center_crop
SCREAMING_SNAKE_CASE_: List[str] =crop_size
SCREAMING_SNAKE_CASE_: Any =do_rescale
SCREAMING_SNAKE_CASE_: Union[str, Any] =rescale_factor
SCREAMING_SNAKE_CASE_: Union[str, Any] =do_normalize
SCREAMING_SNAKE_CASE_: Tuple =image_mean if image_mean is not None else OPENAI_CLIP_MEAN
SCREAMING_SNAKE_CASE_: Optional[int] =image_std if image_std is not None else OPENAI_CLIP_STD
SCREAMING_SNAKE_CASE_: List[str] =do_convert_rgb
def lowerCamelCase__ ( self : Optional[Any] , lowerCAmelCase : np.ndarray , lowerCAmelCase : Dict[str, int] , lowerCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : str , ) -> np.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
SCREAMING_SNAKE_CASE_: Any =get_resize_output_image_size(lowerCAmelCase , size=size["""shortest_edge"""] , default_to_square=lowerCAmelCase )
return resize(lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def lowerCamelCase__ ( self : int , lowerCAmelCase : np.ndarray , lowerCAmelCase : Dict[str, int] , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : Tuple , ) -> np.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =get_size_dict(lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(lowerCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=lowerCAmelCase , **lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : np.ndarray , lowerCAmelCase : Union[int, float] , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : Optional[Any] , ) -> Any:
'''simple docstring'''
return rescale(lowerCAmelCase , scale=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def lowerCamelCase__ ( self : int , lowerCAmelCase : np.ndarray , lowerCAmelCase : Union[float, List[float]] , lowerCAmelCase : Union[float, List[float]] , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : str , ) -> np.ndarray:
'''simple docstring'''
return normalize(lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : ImageInput , lowerCAmelCase : bool = None , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : PILImageResampling = None , lowerCAmelCase : bool = None , lowerCAmelCase : int = None , lowerCAmelCase : bool = None , lowerCAmelCase : float = None , lowerCAmelCase : bool = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : bool = None , lowerCAmelCase : Optional[Union[str, TensorType]] = None , lowerCAmelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase : List[str] , ) -> PIL.Image.Image:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_: Tuple =size if size is not None else self.size
SCREAMING_SNAKE_CASE_: List[str] =get_size_dict(lowerCAmelCase , param_name="""size""" , default_to_square=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int =resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_: str =do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE_: Union[str, Any] =crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE_: Union[str, Any] =get_size_dict(lowerCAmelCase , param_name="""crop_size""" , default_to_square=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_: Dict =rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_: int =do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_: Optional[Any] =image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_: str =image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_: Union[str, Any] =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
SCREAMING_SNAKE_CASE_: Optional[Any] =make_list_of_images(lowerCAmelCase )
if not valid_images(lowerCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
SCREAMING_SNAKE_CASE_: int =[convert_to_rgb(lowerCAmelCase ) for image in images]
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_: List[Any] =[to_numpy_array(lowerCAmelCase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_: Any =[self.resize(image=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE_: List[str] =[self.center_crop(image=lowerCAmelCase , size=lowerCAmelCase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE_: Optional[int] =[self.rescale(image=lowerCAmelCase , scale=lowerCAmelCase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_: str =[self.normalize(image=lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase ) for image in images]
SCREAMING_SNAKE_CASE_: Optional[int] =[to_channel_dimension_format(lowerCAmelCase , lowerCAmelCase ) for image in images]
SCREAMING_SNAKE_CASE_: str ={"""pixel_values""": images}
return BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
| 703
|
"""simple docstring"""
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_UpperCAmelCase = ["""text""", """image""", """audio"""]
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: str =[]
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(lowercase , lowercase ):
inputs.append(create_inputs(lowercase ) )
else:
raise ValueError(f'''Invalid type requested: {input_type}''' )
return inputs
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: int =[]
for output in outputs:
if isinstance(lowercase , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(lowercase , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(lowercase , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(f'''Invalid output: {output}''' )
return output_types
@is_tool_test
class a :
def lowerCamelCase__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , """inputs""" ) )
self.assertTrue(hasattr(self.tool , """outputs""" ) )
SCREAMING_SNAKE_CASE_: Optional[int] =self.tool.inputs
for _input in inputs:
if isinstance(_input , lowerCAmelCase ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
SCREAMING_SNAKE_CASE_: Any =self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowerCamelCase__ ( self : str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_: List[Any] =self.tool(*lowerCAmelCase )
# There is a single output
if len(self.tool.outputs ) == 1:
SCREAMING_SNAKE_CASE_: str =[outputs]
self.assertListEqual(output_types(lowerCAmelCase ) , self.tool.outputs )
def lowerCamelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
self.assertTrue(hasattr(self.tool , """description""" ) )
self.assertTrue(hasattr(self.tool , """default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def lowerCamelCase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_: Tuple =self.tool(*lowerCAmelCase )
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: int =[outputs]
self.assertEqual(len(lowerCAmelCase ) , len(self.tool.outputs ) )
for output, output_type in zip(lowerCAmelCase , self.tool.outputs ):
SCREAMING_SNAKE_CASE_: int =AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(lowerCAmelCase , lowerCAmelCase ) )
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE_: Union[str, Any] =[]
for _input, input_type in zip(lowerCAmelCase , self.tool.inputs ):
if isinstance(lowerCAmelCase , lowerCAmelCase ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
SCREAMING_SNAKE_CASE_: Dict =self.tool(*lowerCAmelCase )
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] =[outputs]
self.assertEqual(len(lowerCAmelCase ) , len(self.tool.outputs ) )
| 36
| 0
|
import gc
import threading
import time
import psutil
import torch
class a :
def __init__( self : Tuple ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =psutil.Process()
SCREAMING_SNAKE_CASE_: Tuple =False
def lowerCamelCase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =-1
while True:
SCREAMING_SNAKE_CASE_: Optional[int] =max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def lowerCamelCase__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =True
SCREAMING_SNAKE_CASE_: int =threading.Thread(target=self.peak_monitor )
SCREAMING_SNAKE_CASE_: Optional[Any] =True
self.thread.start()
def lowerCamelCase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =False
self.thread.join()
return self.cpu_memory_peak
_UpperCAmelCase = PeakCPUMemory()
def __magic_name__ ( ):
# Time
SCREAMING_SNAKE_CASE_: Optional[int] ={"""time""": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
SCREAMING_SNAKE_CASE_: List[Any] =psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
SCREAMING_SNAKE_CASE_: List[Any] =torch.cuda.memory_allocated(lowercase )
torch.cuda.reset_peak_memory_stats()
return measures
def __magic_name__ ( lowercase ):
# Time
SCREAMING_SNAKE_CASE_: Union[str, Any] ={"""time""": time.time() - start_measures["""time"""]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
SCREAMING_SNAKE_CASE_: Any =(psutil.Process().memory_info().rss - start_measures["""cpu"""]) / 2**20
SCREAMING_SNAKE_CASE_: Dict =(cpu_peak_tracker.stop() - start_measures["""cpu"""]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
SCREAMING_SNAKE_CASE_: Dict =(torch.cuda.memory_allocated(lowercase ) - start_measures[str(lowercase )]) / 2**20
SCREAMING_SNAKE_CASE_: Optional[int] =(torch.cuda.max_memory_allocated(lowercase ) - start_measures[str(lowercase )]) / 2**20
return measures
def __magic_name__ ( lowercase , lowercase ):
print(f'''{description}:''' )
print(f'''- Time: {measures["time"]:.2f}s''' )
for i in range(torch.cuda.device_count() ):
print(f'''- GPU {i} allocated: {measures[str(lowercase )]:.2f}MiB''' )
SCREAMING_SNAKE_CASE_: int =measures[f'''{i}-peak''']
print(f'''- GPU {i} peak: {peak:.2f}MiB''' )
print(f'''- CPU RAM allocated: {measures["cpu"]:.2f}MiB''' )
print(f'''- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB''' )
| 704
|
"""simple docstring"""
from __future__ import annotations
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =sorted(numsa + numsa )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple =divmod(len(lowercase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase = [float(x) for x in input("""Enter the elements of first array: """).split()]
_UpperCAmelCase = [float(x) for x in input("""Enter the elements of second array: """).split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 36
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_UpperCAmelCase = {
"""configuration_owlvit""": [
"""OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""OwlViTConfig""",
"""OwlViTOnnxConfig""",
"""OwlViTTextConfig""",
"""OwlViTVisionConfig""",
],
"""processing_owlvit""": ["""OwlViTProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""OwlViTFeatureExtractor"""]
_UpperCAmelCase = ["""OwlViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""OwlViTModel""",
"""OwlViTPreTrainedModel""",
"""OwlViTTextModel""",
"""OwlViTVisionModel""",
"""OwlViTForObjectDetection""",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 705
|
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a :
def __init__( self : Any , lowerCAmelCase : Any , lowerCAmelCase : List[str]=13 , lowerCAmelCase : Dict=3 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Dict=True , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : str=0.1 , lowerCAmelCase : List[str]=224 , lowerCAmelCase : List[str]=1000 , lowerCAmelCase : Optional[Any]=[3, 3, 6, 4] , lowerCAmelCase : int=[48, 56, 112, 220] , ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =parent
SCREAMING_SNAKE_CASE_: Any =batch_size
SCREAMING_SNAKE_CASE_: Tuple =num_channels
SCREAMING_SNAKE_CASE_: Union[str, Any] =is_training
SCREAMING_SNAKE_CASE_: Tuple =use_labels
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Tuple =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: List[Any] =num_labels
SCREAMING_SNAKE_CASE_: int =image_size
SCREAMING_SNAKE_CASE_: Optional[Any] =layer_depths
SCREAMING_SNAKE_CASE_: List[Any] =embed_dims
def lowerCamelCase__ ( self : List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_: List[Any] =None
if self.use_labels:
SCREAMING_SNAKE_CASE_: List[str] =ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE_: Tuple =self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowerCAmelCase , layer_scale_init_value=1E-5 , )
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =SwiftFormerModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Any =model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =self.num_labels
SCREAMING_SNAKE_CASE_: Dict =SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Dict =model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
SCREAMING_SNAKE_CASE_: int =SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_: Tuple =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_: Dict =model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)): str =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_: Tuple ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : Optional[int] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
UpperCamelCase : Tuple = (
{'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase : Any = False
UpperCamelCase : Optional[int] = False
UpperCamelCase : Optional[Any] = False
UpperCamelCase : Dict = False
UpperCamelCase : List[str] = False
def lowerCamelCase__ ( self : Dict ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =SwiftFormerModelTester(self )
SCREAMING_SNAKE_CASE_: Union[str, Any] =ConfigTester(
self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def lowerCamelCase__ ( self : Tuple ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def lowerCamelCase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Any =model_class(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase , nn.Linear ) )
def lowerCamelCase__ ( self : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: int =model_class(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_: Any =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_: Tuple =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def lowerCamelCase__ ( self : int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def lowerCamelCase__ ( self : Any ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@slow
def lowerCamelCase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_: Optional[Any] =SwiftFormerModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def lowerCamelCase__ ( self : Optional[int] ) -> str:
'''simple docstring'''
pass
def lowerCamelCase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
def check_hidden_states_output(lowerCAmelCase : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : Optional[int] ):
SCREAMING_SNAKE_CASE_: Optional[Any] =model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_: List[str] =model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_: Dict =outputs.hidden_states
SCREAMING_SNAKE_CASE_: List[Any] =8
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowerCAmelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Dict =True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_: Any =True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
def _config_zero_init(lowerCAmelCase : str ):
SCREAMING_SNAKE_CASE_: Dict =copy.deepcopy(lowerCAmelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowerCAmelCase , lowerCAmelCase , 1E-10 )
if isinstance(getattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_: Tuple =_config_zero_init(getattr(lowerCAmelCase , lowerCAmelCase ) )
setattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return configs_no_init
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_: List[Any] =_config_zero_init(lowerCAmelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Any =model_class(config=lowerCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
pass
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: List[Any] =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self : str ) -> str:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =self.default_image_processor
SCREAMING_SNAKE_CASE_: int =prepare_img()
SCREAMING_SNAKE_CASE_: Union[str, Any] =image_processor(images=lowerCAmelCase , return_tensors="""pt""" ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Dict =model(**lowerCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE_: Optional[Any] =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =torch.tensor([[-2.1_703E00, 2.1_107E00, -2.0_811E00]] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
| 36
| 0
|
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class a :
def __init__( self : Union[str, Any] , lowerCAmelCase : List[str]=2 , lowerCAmelCase : int=3 , lowerCAmelCase : Optional[Any]=64 , lowerCAmelCase : Union[str, Any]=None ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =np.random.default_rng(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =length
SCREAMING_SNAKE_CASE_: Union[str, Any] =rng.normal(size=(length,) ).astype(np.floataa )
SCREAMING_SNAKE_CASE_: Tuple =a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : List[Any] ) -> str:
'''simple docstring'''
return self.length
def __getitem__( self : Union[str, Any] , lowerCAmelCase : Any ) -> List[str]:
'''simple docstring'''
return {"x": self.x[i], "y": self.y[i]}
class a ( torch.nn.Module ):
def __init__( self : Optional[int] , lowerCAmelCase : str=0 , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : Optional[int]=False ) -> Tuple:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_: int =torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE_: int =torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE_: Dict =True
def lowerCamelCase__ ( self : str , lowerCAmelCase : Tuple=None ) -> int:
'''simple docstring'''
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
SCREAMING_SNAKE_CASE_: Union[str, Any] =False
return x * self.a[0] + self.b[0]
class a ( torch.nn.Module ):
def __init__( self : Union[str, Any] , lowerCAmelCase : Any=0 , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : List[Any]=False ) -> str:
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_: List[str] =torch.nn.Parameter(torch.tensor(lowerCAmelCase ).float() )
SCREAMING_SNAKE_CASE_: Optional[int] =torch.nn.Parameter(torch.tensor(lowerCAmelCase ).float() )
SCREAMING_SNAKE_CASE_: List[Any] =True
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : int=None ) -> Any:
'''simple docstring'''
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
SCREAMING_SNAKE_CASE_: Optional[int] =False
return x * self.a + self.b
def __magic_name__ ( lowercase , lowercase = 16 ):
from datasets import load_dataset
from transformers import AutoTokenizer
SCREAMING_SNAKE_CASE_: Optional[Any] =AutoTokenizer.from_pretrained("""bert-base-cased""" )
SCREAMING_SNAKE_CASE_: Optional[int] ={"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""}
SCREAMING_SNAKE_CASE_: Any =load_dataset("""csv""" , data_files=lowercase )
SCREAMING_SNAKE_CASE_: Any =datasets["""train"""].unique("""label""" )
SCREAMING_SNAKE_CASE_: List[Any] ={v: i for i, v in enumerate(lowercase )}
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_: Dict =tokenizer(
examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase , max_length=lowercase , padding="""max_length""" )
if "label" in examples:
SCREAMING_SNAKE_CASE_: Optional[int] =[label_to_id[l] for l in examples["""label"""]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE_: List[Any] =datasets.map(
lowercase , batched=lowercase , remove_columns=["""sentence1""", """sentence2""", """label"""] , )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(lowercase , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_: Optional[int] =DataLoader(tokenized_datasets["""train"""] , shuffle=lowercase , collate_fn=lowercase , batch_size=2 )
SCREAMING_SNAKE_CASE_: Dict =DataLoader(tokenized_datasets["""validation"""] , shuffle=lowercase , collate_fn=lowercase , batch_size=1 )
return train_dataloader, eval_dataloader
| 706
|
"""simple docstring"""
from math import pi
def __magic_name__ ( lowercase , lowercase ):
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(9_0, 1_0))
| 36
| 0
|
"""simple docstring"""
def __magic_name__ ( lowercase , lowercase ):
return int((input_a, input_a).count(0 ) == 0 )
def __magic_name__ ( ):
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 707
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : Dict ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict =FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
SCREAMING_SNAKE_CASE_: Union[str, Any] ="""A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE_: Any =jax.device_count()
SCREAMING_SNAKE_CASE_: Dict =num_samples * [prompt]
SCREAMING_SNAKE_CASE_: List[Any] =sd_pipe.prepare_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =replicate(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =shard(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =jax.random.split(lowerCAmelCase , jax.device_count() )
SCREAMING_SNAKE_CASE_: Tuple =sd_pipe(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , num_inference_steps=25 , jit=lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
SCREAMING_SNAKE_CASE_: Dict =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE_: Dict =images[0, 253:256, 253:256, -1]
SCREAMING_SNAKE_CASE_: Optional[int] =jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE_: Optional[int] =jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int ="""stabilityai/stable-diffusion-2"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =FlaxDPMSolverMultistepScheduler.from_pretrained(lowerCAmelCase , subfolder="""scheduler""" )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =FlaxStableDiffusionPipeline.from_pretrained(
lowerCAmelCase , scheduler=lowerCAmelCase , revision="""bf16""" , dtype=jnp.bfloataa , )
SCREAMING_SNAKE_CASE_: Optional[int] =scheduler_params
SCREAMING_SNAKE_CASE_: Tuple ="""A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE_: Union[str, Any] =jax.device_count()
SCREAMING_SNAKE_CASE_: Optional[Any] =num_samples * [prompt]
SCREAMING_SNAKE_CASE_: List[Any] =sd_pipe.prepare_inputs(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =replicate(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =shard(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE_: Any =jax.random.split(lowerCAmelCase , jax.device_count() )
SCREAMING_SNAKE_CASE_: Tuple =sd_pipe(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , num_inference_steps=25 , jit=lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
SCREAMING_SNAKE_CASE_: str =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE_: Any =images[0, 253:256, 253:256, -1]
SCREAMING_SNAKE_CASE_: Optional[Any] =jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE_: Optional[int] =jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 36
| 0
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
_UpperCAmelCase = False
class a ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def lowerCamelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
SCREAMING_SNAKE_CASE_: Optional[int] =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_: Dict =pipe(
image=lowerCAmelCase , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
SCREAMING_SNAKE_CASE_: int =image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_: Tuple =np.array([0.0_4_4_1, 0.0_4_6_9, 0.0_5_0_7, 0.0_5_7_5, 0.0_6_3_2, 0.0_6_5_0, 0.0_8_6_5, 0.0_9_0_9, 0.0_9_4_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 708
|
"""simple docstring"""
def __magic_name__ ( lowercase = 200_0000 ):
SCREAMING_SNAKE_CASE_: List[Any] =[0 for i in range(n + 1 )]
SCREAMING_SNAKE_CASE_: Union[str, Any] =1
SCREAMING_SNAKE_CASE_: Optional[Any] =1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =1
SCREAMING_SNAKE_CASE_: Dict =0
for i in range(lowercase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f"""{solution() = }""")
| 36
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.