code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' import re def a_ ( __snake_case : str ) -> str: """simple docstring""" if len(re.findall('''[ATCG]''' , __snake_case ) ) != len(__snake_case ): raise ValueError('''Invalid Strand''' ) return dna.translate(dna.maketrans('''ATCG''' , '''TAGC''' ) ) if __name__ == "__main__": import doctest doctest.testmod()
676
'''simple docstring''' import os from pickle import UnpicklingError from typing import Dict, Tuple import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict, unflatten_dict import transformers from .utils import logging a_ : List[Any] = logging.get_logger(__name__) def a_ ( __snake_case : Optional[int] , __snake_case : str , __snake_case : List[Any] , __snake_case : int=False ) -> List[str]: """simple docstring""" try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise if not is_sharded: lowerCamelCase_ =os.path.abspath(__snake_case ) logger.info(F'''Loading PyTorch weights from {pt_path}''' ) lowerCamelCase_ =torch.load(__snake_case , map_location='''cpu''' ) logger.info(F'''PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.''' ) lowerCamelCase_ =convert_pytorch_state_dict_to_flax(__snake_case , __snake_case ) else: # model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files lowerCamelCase_ =convert_pytorch_sharded_state_dict_to_flax(__snake_case , __snake_case ) return flax_state_dict def a_ ( __snake_case : Tuple[str] , __snake_case : np.ndarray , __snake_case : Dict[str, jnp.ndarray] , __snake_case : str , ) -> (Tuple[str], np.ndarray): """simple docstring""" def is_key_or_prefix_key_in_dict(__snake_case : Tuple[str] ) -> bool: return len(set(__snake_case ) & {key, (model_prefix,) + key} ) > 0 # layer norm lowerCamelCase_ =pt_tuple_key[:-1] + ('''scale''',) if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(__snake_case ): return renamed_pt_tuple_key, pt_tensor # batch norm layer mean lowerCamelCase_ =pt_tuple_key[:-1] + ('''mean''',) if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(__snake_case ): return renamed_pt_tuple_key, pt_tensor # batch norm layer var lowerCamelCase_ =pt_tuple_key[:-1] + ('''var''',) if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(__snake_case ): return renamed_pt_tuple_key, pt_tensor # embedding lowerCamelCase_ =pt_tuple_key[:-1] + ('''embedding''',) if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(__snake_case ): return renamed_pt_tuple_key, pt_tensor # conv layer lowerCamelCase_ =pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(__snake_case ): lowerCamelCase_ =pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowerCamelCase_ =pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(__snake_case ): lowerCamelCase_ =pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowerCamelCase_ =pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowerCamelCase_ =pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 lowerCamelCase_ =None if pt_tuple_key[-3::2] == ("parametrizations", "original0"): lowerCamelCase_ =pt_tuple_key[-2] + '''_g''' elif pt_tuple_key[-3::2] == ("parametrizations", "original1"): lowerCamelCase_ =pt_tuple_key[-2] + '''_v''' if name is not None: lowerCamelCase_ =pt_tuple_key[:-3] + (name,) return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def a_ ( __snake_case : Union[str, Any] , __snake_case : str ) -> str: """simple docstring""" # convert pytorch tensor to numpy lowerCamelCase_ ={k: v.numpy() for k, v in pt_state_dict.items()} lowerCamelCase_ =flax_model.base_model_prefix # use params dict if the model contains batch norm layers if "params" in flax_model.params: lowerCamelCase_ =flax_model.params['''params'''] else: lowerCamelCase_ =flax_model.params lowerCamelCase_ =flatten_dict(__snake_case ) # add batch_stats keys,values to dict if "batch_stats" in flax_model.params: lowerCamelCase_ =flatten_dict(flax_model.params['''batch_stats'''] ) random_flax_state_dict.update(__snake_case ) lowerCamelCase_ ={} lowerCamelCase_ =(model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) lowerCamelCase_ =(model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCamelCase_ =tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary lowerCamelCase_ =pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: lowerCamelCase_ =pt_tuple_key[1:] # Correctly rename weight parameters lowerCamelCase_, lowerCamelCase_ =rename_key_and_reshape_tensor( __snake_case , __snake_case , __snake_case , __snake_case ) # add model prefix if necessary lowerCamelCase_ =(model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: lowerCamelCase_ =(model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1] or "var" in flax_key[-1]: lowerCamelCase_ =jnp.asarray(__snake_case ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(__snake_case , __snake_case ) continue # also add unexpected weight so that warning is thrown lowerCamelCase_ =jnp.asarray(__snake_case ) else: # also add unexpected weight so that warning is thrown lowerCamelCase_ =jnp.asarray(__snake_case ) return unflatten_dict(__snake_case ) def a_ ( __snake_case : Union[str, Any] , __snake_case : List[Any] ) -> Optional[Any]: """simple docstring""" import torch # Load the index lowerCamelCase_ ={} for shard_file in shard_filenames: # load using msgpack utils lowerCamelCase_ =torch.load(__snake_case ) lowerCamelCase_ ={k: v.numpy() for k, v in pt_state_dict.items()} lowerCamelCase_ =flax_model.base_model_prefix # use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict if "batch_stats" in flax_model.params: lowerCamelCase_ =flax_model.params['''params'''] lowerCamelCase_ =flatten_dict(__snake_case ) random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) ) else: lowerCamelCase_ =flax_model.params lowerCamelCase_ =flatten_dict(__snake_case ) lowerCamelCase_ =(model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) lowerCamelCase_ =(model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCamelCase_ =tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary lowerCamelCase_ =pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: lowerCamelCase_ =pt_tuple_key[1:] # Correctly rename weight parameters lowerCamelCase_, lowerCamelCase_ =rename_key_and_reshape_tensor( __snake_case , __snake_case , __snake_case , __snake_case ) # add model prefix if necessary lowerCamelCase_ =(model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: lowerCamelCase_ =(model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1]: lowerCamelCase_ =jnp.asarray(__snake_case ) continue if "var" in flax_key[-1]: lowerCamelCase_ =jnp.asarray(__snake_case ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(__snake_case , __snake_case ) continue # also add unexpected weight so that warning is thrown lowerCamelCase_ =jnp.asarray(__snake_case ) else: # also add unexpected weight so that warning is thrown lowerCamelCase_ =jnp.asarray(__snake_case ) return unflatten_dict(__snake_case ) def a_ ( __snake_case : List[str] , __snake_case : Dict ) -> str: """simple docstring""" lowerCamelCase_ =os.path.abspath(__snake_case ) logger.info(F'''Loading Flax weights from {flax_checkpoint_path}''' ) # import correct flax class lowerCamelCase_ =getattr(__snake_case , '''Flax''' + model.__class__.__name__ ) # load flax weight dict with open(__snake_case , '''rb''' ) as state_f: try: lowerCamelCase_ =from_bytes(__snake_case , state_f.read() ) except UnpicklingError: raise EnvironmentError(F'''Unable to convert {flax_checkpoint_path} to Flax deserializable object. ''' ) return load_flax_weights_in_pytorch_model(__snake_case , __snake_case ) def a_ ( __snake_case : Optional[Any] , __snake_case : Union[str, Any] ) -> Optional[int]: """simple docstring""" try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise # check if we have bf16 weights lowerCamelCase_ =flatten_dict(jax.tree_util.tree_map(lambda __snake_case : x.dtype == jnp.bfloataa , __snake_case ) ).values() if any(__snake_case ): # convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( '''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` ''' '''before loading those in PyTorch model.''' ) lowerCamelCase_ =jax.tree_util.tree_map( lambda __snake_case : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __snake_case ) lowerCamelCase_ =flatten_dict(__snake_case ) lowerCamelCase_ =pt_model.state_dict() lowerCamelCase_ =(pt_model.base_model_prefix in flax_state) and ( pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) lowerCamelCase_ =(pt_model.base_model_prefix not in flax_state) and ( pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) # keep track of unexpected & missing keys lowerCamelCase_ =[] lowerCamelCase_ =set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): lowerCamelCase_ =flax_key_tuple[0] == pt_model.base_model_prefix lowerCamelCase_ ='''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict # adapt flax_key to prepare for loading from/to base model only if load_model_with_head_into_base_model and has_base_model_prefix: lowerCamelCase_ =flax_key_tuple[1:] elif load_base_model_into_model_with_head and require_base_model_prefix: lowerCamelCase_ =(pt_model.base_model_prefix,) + flax_key_tuple # rename flax weights to PyTorch format if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(__snake_case ) not in pt_model_dict: # conv layer lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',) lowerCamelCase_ =jnp.transpose(__snake_case , (3, 2, 0, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(__snake_case ) not in pt_model_dict: # linear layer lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',) lowerCamelCase_ =flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',) # adding batch stats from flax batch norm to pt elif "mean" in flax_key_tuple[-1]: lowerCamelCase_ =flax_key_tuple[:-1] + ('''running_mean''',) elif "var" in flax_key_tuple[-1]: lowerCamelCase_ =flax_key_tuple[:-1] + ('''running_var''',) if "batch_stats" in flax_state: lowerCamelCase_ ='''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header else: lowerCamelCase_ ='''.'''.join(__snake_case ) # We also need to look at `pt_model_dict` and see if there are keys requiring further transformation. lowerCamelCase_ ={} # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 for key in pt_model_dict: lowerCamelCase_ =key.split('''.''' ) lowerCamelCase_ =None if key_components[-3::2] == ["parametrizations", "original0"]: lowerCamelCase_ =key_components[-2] + '''_g''' elif key_components[-3::2] == ["parametrizations", "original1"]: lowerCamelCase_ =key_components[-2] + '''_v''' if name is not None: lowerCamelCase_ =key_components[:-3] + [name] lowerCamelCase_ ='''.'''.join(__snake_case ) lowerCamelCase_ =key if flax_key in special_pt_names: lowerCamelCase_ =special_pt_names[flax_key] if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( F'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected ''' F'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) else: # add weight to pytorch dict lowerCamelCase_ =np.asarray(__snake_case ) if not isinstance(__snake_case , np.ndarray ) else flax_tensor lowerCamelCase_ =torch.from_numpy(__snake_case ) # remove from missing keys missing_keys.remove(__snake_case ) else: # weight is not expected by PyTorch model unexpected_keys.append(__snake_case ) pt_model.load_state_dict(__snake_case ) # re-transform missing_keys to list lowerCamelCase_ =list(__snake_case ) if len(__snake_case ) > 0: logger.warning( '''Some weights of the Flax model were not used when initializing the PyTorch model''' F''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing''' F''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture''' ''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This''' F''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect''' ''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a''' ''' FlaxBertForSequenceClassification model).''' ) else: logger.warning(F'''All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n''' ) if len(__snake_case ) > 0: logger.warning( F'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly''' F''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to''' ''' use it for predictions and inference.''' ) else: logger.warning( F'''All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n''' '''If your task is similar to the task the model of the checkpoint was trained on, ''' F'''you can already use {pt_model.__class__.__name__} for predictions without further training.''' ) return pt_model
676
1
'''simple docstring''' def a_ ( __snake_case : list ) -> list: """simple docstring""" if len(__snake_case ) <= 1: return [tuple(__snake_case )] lowerCamelCase_ =[] def generate(__snake_case : int , __snake_case : list ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1 , __snake_case ) for i in range(k - 1 ): if k % 2 == 0: # k is even lowerCamelCase_, lowerCamelCase_ =arr[k - 1], arr[i] else: # k is odd lowerCamelCase_, lowerCamelCase_ =arr[k - 1], arr[0] generate(k - 1 , __snake_case ) generate(len(__snake_case ) , __snake_case ) return res if __name__ == "__main__": a_ : Tuple = input("""Enter numbers separated by a comma:\n""").strip() a_ : List[str] = [int(item) for item in user_input.split(""",""")] print(heaps(arr))
676
'''simple docstring''' def a_ ( __snake_case : str , __snake_case : str ) -> str: """simple docstring""" lowerCamelCase_ =len(__snake_case ) lowerCamelCase_ =len(__snake_case ) lowerCamelCase_ =( first_str_length if first_str_length > second_str_length else second_str_length ) lowerCamelCase_ =[] for char_count in range(__snake_case ): if char_count < first_str_length: output_list.append(first_str[char_count] ) if char_count < second_str_length: output_list.append(second_str[char_count] ) return "".join(__snake_case ) if __name__ == "__main__": print(alternative_string_arrange("""AB""", """XYZ"""), end=""" """)
676
1
'''simple docstring''' def a_ ( __snake_case : list[list[float]] ) -> list[list[float]]: """simple docstring""" lowerCamelCase_ =[] for data in source_data: for i, el in enumerate(__snake_case ): if len(__snake_case ) < i + 1: data_lists.append([] ) data_lists[i].append(float(__snake_case ) ) return data_lists def a_ ( __snake_case : list[list[float]] , __snake_case : list[int] ) -> list[list[float]]: """simple docstring""" lowerCamelCase_ =[] for dlist, weight in zip(__snake_case , __snake_case ): lowerCamelCase_ =min(__snake_case ) lowerCamelCase_ =max(__snake_case ) lowerCamelCase_ =[] # for weight 0 score is 1 - actual score if weight == 0: for item in dlist: try: score.append(1 - ((item - mind) / (maxd - mind)) ) except ZeroDivisionError: score.append(1 ) elif weight == 1: for item in dlist: try: score.append((item - mind) / (maxd - mind) ) except ZeroDivisionError: score.append(0 ) # weight not 0 or 1 else: lowerCamelCase_ =F'''Invalid weight of {weight:f} provided''' raise ValueError(__snake_case ) score_lists.append(__snake_case ) return score_lists def a_ ( __snake_case : list[list[float]] ) -> list[float]: """simple docstring""" lowerCamelCase_ =[0 for i in range(len(score_lists[0] ) )] for slist in score_lists: for j, ele in enumerate(__snake_case ): lowerCamelCase_ =final_scores[j] + ele return final_scores def a_ ( __snake_case : list[list[float]] , __snake_case : list[int] ) -> list[list[float]]: """simple docstring""" lowerCamelCase_ =get_data(__snake_case ) lowerCamelCase_ =calculate_each_score(__snake_case , __snake_case ) lowerCamelCase_ =generate_final_scores(__snake_case ) # append scores to source data for i, ele in enumerate(__snake_case ): source_data[i].append(__snake_case ) return source_data
676
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ : Any = {"""configuration_timm_backbone""": ["""TimmBackboneConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Optional[Any] = ["""TimmBackbone"""] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys a_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
676
1
'''simple docstring''' import json import sys def a_ ( __snake_case : Optional[Any] , __snake_case : Union[str, Any] ) -> List[Any]: """simple docstring""" with open(__snake_case , encoding='''utf-8''' ) as f: lowerCamelCase_ =json.load(__snake_case ) lowerCamelCase_ =['''<details>''', '''<summary>Show updated benchmarks!</summary>''', ''' '''] for benchmark_name in sorted(__snake_case ): lowerCamelCase_ =results[benchmark_name] lowerCamelCase_ =benchmark_name.split('''/''' )[-1] output_md.append(F'''### Benchmark: {benchmark_file_name}''' ) lowerCamelCase_ ='''| metric |''' lowerCamelCase_ ='''|--------|''' lowerCamelCase_ ='''| new / old (diff) |''' for metric_name in sorted(__snake_case ): lowerCamelCase_ =benchmark_res[metric_name] lowerCamelCase_ =metric_vals['''new'''] lowerCamelCase_ =metric_vals.get('''old''' , __snake_case ) lowerCamelCase_ =metric_vals.get('''diff''' , __snake_case ) lowerCamelCase_ =F''' {new_val:f}''' if isinstance(__snake_case , (int, float) ) else '''None''' if old_val is not None: val_str += F''' / {old_val:f}''' if isinstance(__snake_case , (int, float) ) else "None" if dif_val is not None: val_str += F''' ({dif_val:f})''' if isinstance(__snake_case , (int, float) ) else "None" title += " " + metric_name + " |" lines += "---|" value += val_str + " |" output_md += [title, lines, value, " "] output_md.append('''</details>''' ) with open(__snake_case , '''w''' , encoding='''utf-8''' ) as f: f.writelines('''\n'''.join(__snake_case ) ) if __name__ == "__main__": a_ : str = sys.argv[1] a_ : str = sys.argv[2] format_json_to_md(input_json_file, output_md_file)
676
'''simple docstring''' import functools def a_ ( __snake_case : str , __snake_case : str ) -> int: """simple docstring""" lowerCamelCase_ =len(__snake_case ) lowerCamelCase_ =len(__snake_case ) @functools.cache def min_distance(__snake_case : int , __snake_case : int ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa lowerCamelCase_ =int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , __snake_case ) , 1 + min_distance(__snake_case , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
676
1
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ : List[Any] = { """configuration_xmod""": [ """XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XmodConfig""", """XmodOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Union[str, Any] = [ """XMOD_PRETRAINED_MODEL_ARCHIVE_LIST""", """XmodForCausalLM""", """XmodForMaskedLM""", """XmodForMultipleChoice""", """XmodForQuestionAnswering""", """XmodForSequenceClassification""", """XmodForTokenClassification""", """XmodModel""", """XmodPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys a_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
676
'''simple docstring''' def a_ ( __snake_case : int ) -> bool: """simple docstring""" if not isinstance(__snake_case , __snake_case ): lowerCamelCase_ =F'''Input value of [number={number}] must be an integer''' raise TypeError(__snake_case ) if number < 0: return False lowerCamelCase_ =number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
676
1
'''simple docstring''' from copy import deepcopy from typing import Optional, Union import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, is_tf_available, is_torch_available if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf class __UpperCamelCase ( lowerCamelCase__ ): lowercase : List[str] =['image_processor'] lowercase : Union[str, Any] ='SamImageProcessor' def __init__( self, lowerCAmelCase ): """simple docstring""" super().__init__(lowerCAmelCase ) lowerCamelCase_ =self.image_processor lowerCamelCase_ =-10 lowerCamelCase_ =self.image_processor.size['''longest_edge'''] def __call__( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =self.image_processor( lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase, ) # pop arguments that are not used in the foward but used nevertheless lowerCamelCase_ =encoding_image_processor['''original_sizes'''] if hasattr(lowerCAmelCase, '''numpy''' ): # Checks if Torch or TF tensor lowerCamelCase_ =original_sizes.numpy() lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =self._check_and_preprocess_points( input_points=lowerCAmelCase, input_labels=lowerCAmelCase, input_boxes=lowerCAmelCase, ) lowerCamelCase_ =self._normalize_and_convert( lowerCAmelCase, lowerCAmelCase, input_points=lowerCAmelCase, input_labels=lowerCAmelCase, input_boxes=lowerCAmelCase, return_tensors=lowerCAmelCase, ) return encoding_image_processor def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase="pt", ): """simple docstring""" if input_points is not None: if len(lowerCAmelCase ) != len(lowerCAmelCase ): lowerCamelCase_ =[ self._normalize_coordinates(self.target_size, lowerCAmelCase, original_sizes[0] ) for point in input_points ] else: lowerCamelCase_ =[ self._normalize_coordinates(self.target_size, lowerCAmelCase, lowerCAmelCase ) for point, original_size in zip(lowerCAmelCase, lowerCAmelCase ) ] # check that all arrays have the same shape if not all(point.shape == input_points[0].shape for point in input_points ): if input_labels is not None: lowerCamelCase_, lowerCamelCase_ =self._pad_points_and_labels(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =np.array(lowerCAmelCase ) if input_labels is not None: lowerCamelCase_ =np.array(lowerCAmelCase ) if input_boxes is not None: if len(lowerCAmelCase ) != len(lowerCAmelCase ): lowerCamelCase_ =[ self._normalize_coordinates(self.target_size, lowerCAmelCase, original_sizes[0], is_bounding_box=lowerCAmelCase ) for box in input_boxes ] else: lowerCamelCase_ =[ self._normalize_coordinates(self.target_size, lowerCAmelCase, lowerCAmelCase, is_bounding_box=lowerCAmelCase ) for box, original_size in zip(lowerCAmelCase, lowerCAmelCase ) ] lowerCamelCase_ =np.array(lowerCAmelCase ) if input_boxes is not None: if return_tensors == "pt": lowerCamelCase_ =torch.from_numpy(lowerCAmelCase ) # boxes batch size of 1 by default lowerCamelCase_ =input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes elif return_tensors == "tf": lowerCamelCase_ =tf.convert_to_tensor(lowerCAmelCase ) # boxes batch size of 1 by default lowerCamelCase_ =tf.expand_dims(lowerCAmelCase, 1 ) if len(input_boxes.shape ) != 3 else input_boxes encoding_image_processor.update({'''input_boxes''': input_boxes} ) if input_points is not None: if return_tensors == "pt": lowerCamelCase_ =torch.from_numpy(lowerCAmelCase ) # point batch size of 1 by default lowerCamelCase_ =input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points elif return_tensors == "tf": lowerCamelCase_ =tf.convert_to_tensor(lowerCAmelCase ) # point batch size of 1 by default lowerCamelCase_ =tf.expand_dims(lowerCAmelCase, 1 ) if len(input_points.shape ) != 4 else input_points encoding_image_processor.update({'''input_points''': input_points} ) if input_labels is not None: if return_tensors == "pt": lowerCamelCase_ =torch.from_numpy(lowerCAmelCase ) # point batch size of 1 by default lowerCamelCase_ =input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels elif return_tensors == "tf": lowerCamelCase_ =tf.convert_to_tensor(lowerCAmelCase ) # point batch size of 1 by default lowerCamelCase_ =tf.expand_dims(lowerCAmelCase, 1 ) if len(input_labels.shape ) != 3 else input_labels encoding_image_processor.update({'''input_labels''': input_labels} ) return encoding_image_processor def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =max([point.shape[0] for point in input_points] ) lowerCamelCase_ =[] for i, point in enumerate(lowerCAmelCase ): if point.shape[0] != expected_nb_points: lowerCamelCase_ =np.concatenate( [point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value], axis=0 ) lowerCamelCase_ =np.append(input_labels[i], [self.point_pad_value] ) processed_input_points.append(lowerCAmelCase ) lowerCamelCase_ =processed_input_points return input_points, input_labels def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=False ): """simple docstring""" lowerCamelCase_, lowerCamelCase_ =original_size lowerCamelCase_, lowerCamelCase_ =self.image_processor._get_preprocess_shape(lowerCAmelCase, longest_edge=lowerCAmelCase ) lowerCamelCase_ =deepcopy(lowerCAmelCase ).astype(lowerCAmelCase ) if is_bounding_box: lowerCamelCase_ =coords.reshape(-1, 2, 2 ) lowerCamelCase_ =coords[..., 0] * (new_w / old_w) lowerCamelCase_ =coords[..., 1] * (new_h / old_h) if is_bounding_box: lowerCamelCase_ =coords.reshape(-1, 4 ) return coords def lowercase__ ( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, ): """simple docstring""" if input_points is not None: if hasattr(lowerCAmelCase, '''numpy''' ): # Checks for TF or Torch tensor lowerCamelCase_ =input_points.numpy().tolist() if not isinstance(lowerCAmelCase, lowerCAmelCase ) or not isinstance(input_points[0], lowerCAmelCase ): raise ValueError('''Input points must be a list of list of floating points.''' ) lowerCamelCase_ =[np.array(lowerCAmelCase ) for input_point in input_points] else: lowerCamelCase_ =None if input_labels is not None: if hasattr(lowerCAmelCase, '''numpy''' ): lowerCamelCase_ =input_labels.numpy().tolist() if not isinstance(lowerCAmelCase, lowerCAmelCase ) or not isinstance(input_labels[0], lowerCAmelCase ): raise ValueError('''Input labels must be a list of list integers.''' ) lowerCamelCase_ =[np.array(lowerCAmelCase ) for label in input_labels] else: lowerCamelCase_ =None if input_boxes is not None: if hasattr(lowerCAmelCase, '''numpy''' ): lowerCamelCase_ =input_boxes.numpy().tolist() if ( not isinstance(lowerCAmelCase, lowerCAmelCase ) or not isinstance(input_boxes[0], lowerCAmelCase ) or not isinstance(input_boxes[0][0], lowerCAmelCase ) ): raise ValueError('''Input boxes must be a list of list of list of floating points.''' ) lowerCamelCase_ =[np.array(lowerCAmelCase ).astype(np.floataa ) for box in input_boxes] else: lowerCamelCase_ =None return input_points, input_labels, input_boxes @property def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.image_processor.model_input_names return list(dict.fromkeys(lowerCAmelCase ) ) def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return self.image_processor.post_process_masks(*lowerCAmelCase, **lowerCAmelCase )
676
'''simple docstring''' from __future__ import annotations a_ : int = list[list[int]] # assigning initial values to the grid a_ : Matrix = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution a_ : Matrix = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def a_ ( __snake_case : Matrix , __snake_case : int , __snake_case : int , __snake_case : int ) -> bool: """simple docstring""" for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def a_ ( __snake_case : Matrix ) -> tuple[int, int] | None: """simple docstring""" for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def a_ ( __snake_case : Matrix ) -> Matrix | None: """simple docstring""" if location := find_empty_location(__snake_case ): lowerCamelCase_, lowerCamelCase_ =location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(__snake_case , __snake_case , __snake_case , __snake_case ): lowerCamelCase_ =digit if sudoku(__snake_case ) is not None: return grid lowerCamelCase_ =0 return None def a_ ( __snake_case : Matrix ) -> None: """simple docstring""" for row in grid: for cell in row: print(__snake_case , end=''' ''' ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print("""\nExample grid:\n""" + """=""" * 20) print_solution(example_grid) print("""\nExample grid solution:""") a_ : Union[str, Any] = sudoku(example_grid) if solution is not None: print_solution(solution) else: print("""Cannot find a solution.""")
676
1
'''simple docstring''' import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def a_ ( __snake_case : Dataset , __snake_case : Dict[str, str] ) -> Optional[int]: """simple docstring""" lowerCamelCase_ =args.log_outputs lowerCamelCase_ ='''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] ) # load metric lowerCamelCase_ =load_metric('''wer''' ) lowerCamelCase_ =load_metric('''cer''' ) # compute metrics lowerCamelCase_ =wer.compute(references=result['''target'''] , predictions=result['''prediction'''] ) lowerCamelCase_ =cer.compute(references=result['''target'''] , predictions=result['''prediction'''] ) # print & log results lowerCamelCase_ =F'''WER: {wer_result}\nCER: {cer_result}''' print(__snake_case ) with open(F'''{dataset_id}_eval_results.txt''' , '''w''' ) as f: f.write(__snake_case ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: lowerCamelCase_ =F'''log_{dataset_id}_predictions.txt''' lowerCamelCase_ =F'''log_{dataset_id}_targets.txt''' with open(__snake_case , '''w''' ) as p, open(__snake_case , '''w''' ) as t: # mapping function to write output def write_to_file(__snake_case : Union[str, Any] , __snake_case : Tuple ): p.write(F'''{i}''' + '''\n''' ) p.write(batch['''prediction'''] + '''\n''' ) t.write(F'''{i}''' + '''\n''' ) t.write(batch['''target'''] + '''\n''' ) result.map(__snake_case , with_indices=__snake_case ) def a_ ( __snake_case : str ) -> str: """simple docstring""" lowerCamelCase_ ='''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training lowerCamelCase_ =re.sub(__snake_case , '''''' , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! lowerCamelCase_ =['''\n\n''', '''\n''', ''' ''', ''' '''] for t in token_sequences_to_ignore: lowerCamelCase_ =''' '''.join(text.split(__snake_case ) ) return text def a_ ( __snake_case : Dict ) -> Optional[int]: """simple docstring""" # load dataset lowerCamelCase_ =load_dataset(args.dataset , args.config , split=args.split , use_auth_token=__snake_case ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor lowerCamelCase_ =AutoFeatureExtractor.from_pretrained(args.model_id ) lowerCamelCase_ =feature_extractor.sampling_rate # resample audio lowerCamelCase_ =dataset.cast_column('''audio''' , Audio(sampling_rate=__snake_case ) ) # load eval pipeline if args.device is None: lowerCamelCase_ =0 if torch.cuda.is_available() else -1 lowerCamelCase_ =pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(__snake_case : Tuple ): lowerCamelCase_ =asr( batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) lowerCamelCase_ =prediction['''text'''] lowerCamelCase_ =normalize_text(batch['''sentence'''] ) return batch # run inference on all examples lowerCamelCase_ =dataset.map(__snake_case , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(__snake_case , __snake_case ) if __name__ == "__main__": a_ : List[Any] = argparse.ArgumentParser() parser.add_argument( """--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers""" ) parser.add_argument( """--dataset""", type=str, required=True, help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""", ) parser.add_argument( """--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice""" ) parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""") parser.add_argument( """--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds.""" ) parser.add_argument( """--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second.""" ) parser.add_argument( """--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis.""" ) parser.add_argument( """--device""", type=int, default=None, help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""", ) a_ : Optional[Any] = parser.parse_args() main(args)
676
'''simple docstring''' from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : Union[str, Any] = logging.get_logger(__name__) a_ : Tuple = { """huggingface/informer-tourism-monthly""": ( """https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json""" ), # See all Informer models at https://huggingface.co/models?filter=informer } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Union[str, Any] ='informer' lowercase : Union[str, Any] ={ 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', 'num_hidden_layers': 'encoder_layers', } def __init__( self, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = "student_t", lowerCAmelCase = "nll", lowerCAmelCase = 1, lowerCAmelCase = None, lowerCAmelCase = "mean", lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = 64, lowerCAmelCase = 32, lowerCAmelCase = 32, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = True, lowerCAmelCase = "gelu", lowerCAmelCase = 0.0_5, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 100, lowerCAmelCase = 0.0_2, lowerCAmelCase=True, lowerCAmelCase = "prob", lowerCAmelCase = 5, lowerCAmelCase = True, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =prediction_length lowerCamelCase_ =context_length or prediction_length lowerCamelCase_ =distribution_output lowerCamelCase_ =loss lowerCamelCase_ =input_size lowerCamelCase_ =num_time_features lowerCamelCase_ =lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7] lowerCamelCase_ =scaling lowerCamelCase_ =num_dynamic_real_features lowerCamelCase_ =num_static_real_features lowerCamelCase_ =num_static_categorical_features # set cardinality if cardinality and num_static_categorical_features > 0: if len(lowerCAmelCase ) != num_static_categorical_features: raise ValueError( '''The cardinality should be a list of the same length as `num_static_categorical_features`''' ) lowerCamelCase_ =cardinality else: lowerCamelCase_ =[0] # set embedding_dimension if embedding_dimension and num_static_categorical_features > 0: if len(lowerCAmelCase ) != num_static_categorical_features: raise ValueError( '''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' ) lowerCamelCase_ =embedding_dimension else: lowerCamelCase_ =[min(50, (cat + 1) // 2 ) for cat in self.cardinality] lowerCamelCase_ =num_parallel_samples # Transformer architecture configuration lowerCamelCase_ =input_size * len(self.lags_sequence ) + self._number_of_features lowerCamelCase_ =d_model lowerCamelCase_ =encoder_attention_heads lowerCamelCase_ =decoder_attention_heads lowerCamelCase_ =encoder_ffn_dim lowerCamelCase_ =decoder_ffn_dim lowerCamelCase_ =encoder_layers lowerCamelCase_ =decoder_layers lowerCamelCase_ =dropout lowerCamelCase_ =attention_dropout lowerCamelCase_ =activation_dropout lowerCamelCase_ =encoder_layerdrop lowerCamelCase_ =decoder_layerdrop lowerCamelCase_ =activation_function lowerCamelCase_ =init_std lowerCamelCase_ =use_cache # Informer lowerCamelCase_ =attention_type lowerCamelCase_ =sampling_factor lowerCamelCase_ =distil super().__init__(is_encoder_decoder=lowerCAmelCase, **lowerCAmelCase ) @property def lowercase__ ( self ): """simple docstring""" return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
676
1
'''simple docstring''' import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): @register_to_config def __init__( self, *, lowerCAmelCase = 4, lowerCAmelCase = 768, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" super().__init__() lowerCamelCase_ =nn.Parameter(torch.zeros(lowerCAmelCase ) ) # parameters for additional clip time embeddings lowerCamelCase_ =nn.Linear(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =nn.Linear(lowerCAmelCase, lowerCAmelCase ) # parameters for encoder hidden states lowerCamelCase_ =clip_extra_context_tokens lowerCamelCase_ =nn.Linear( lowerCAmelCase, self.clip_extra_context_tokens * cross_attention_dim ) lowerCamelCase_ =nn.Linear(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =nn.LayerNorm(lowerCAmelCase ) def lowercase__ ( self, *, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" if do_classifier_free_guidance: # Add the classifier free guidance embeddings to the image embeddings lowerCamelCase_ =image_embeddings.shape[0] lowerCamelCase_ =self.learned_classifier_free_guidance_embeddings.unsqueeze(0 ) lowerCamelCase_ =classifier_free_guidance_embeddings.expand( lowerCAmelCase, -1 ) lowerCamelCase_ =torch.cat([classifier_free_guidance_embeddings, image_embeddings], dim=0 ) # The image embeddings batch size and the text embeddings batch size are equal assert image_embeddings.shape[0] == prompt_embeds.shape[0] lowerCamelCase_ =prompt_embeds.shape[0] # "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and # adding CLIP embeddings to the existing timestep embedding, ... lowerCamelCase_ =self.embedding_proj(lowerCAmelCase ) lowerCamelCase_ =self.clip_image_embeddings_project_to_time_embeddings(lowerCAmelCase ) lowerCamelCase_ =time_projected_image_embeddings + time_projected_prompt_embeds # ... and by projecting CLIP embeddings into four # extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder" lowerCamelCase_ =self.clip_extra_context_tokens_proj(lowerCAmelCase ) lowerCamelCase_ =clip_extra_context_tokens.reshape(lowerCAmelCase, -1, self.clip_extra_context_tokens ) lowerCamelCase_ =clip_extra_context_tokens.permute(0, 2, 1 ) lowerCamelCase_ =self.encoder_hidden_states_proj(lowerCAmelCase ) lowerCamelCase_ =self.text_encoder_hidden_states_norm(lowerCAmelCase ) lowerCamelCase_ =torch.cat([clip_extra_context_tokens, text_encoder_hidden_states], dim=1 ) return text_encoder_hidden_states, additive_clip_time_embeddings
676
'''simple docstring''' from __future__ import annotations def a_ ( __snake_case : int ) -> list[int]: """simple docstring""" lowerCamelCase_ =[True] * limit lowerCamelCase_ =False lowerCamelCase_ =False lowerCamelCase_ =True for i in range(3 , int(limit**0.5 + 1 ) , 2 ): lowerCamelCase_ =i * 2 while index < limit: lowerCamelCase_ =False lowerCamelCase_ =index + i lowerCamelCase_ =[2] for i in range(3 , __snake_case , 2 ): if is_prime[i]: primes.append(__snake_case ) return primes def a_ ( __snake_case : int = 100_0000 ) -> int: """simple docstring""" lowerCamelCase_ =prime_sieve(__snake_case ) lowerCamelCase_ =0 lowerCamelCase_ =0 for i in range(len(__snake_case ) ): for j in range(i + length , len(__snake_case ) ): lowerCamelCase_ =sum(primes[i:j] ) if sol >= ceiling: break if sol in primes: lowerCamelCase_ =j - i lowerCamelCase_ =sol return largest if __name__ == "__main__": print(F"""{solution() = }""")
676
1
'''simple docstring''' def a_ ( __snake_case : list[int] , __snake_case : int ) -> bool: """simple docstring""" lowerCamelCase_ =len(__snake_case ) lowerCamelCase_ =[[False] * (required_sum + 1) for _ in range(arr_len + 1 )] # for each arr value, a sum of zero(0) can be formed by not taking any element # hence True/1 for i in range(arr_len + 1 ): lowerCamelCase_ =True # sum is not zero and set is empty then false for i in range(1 , required_sum + 1 ): lowerCamelCase_ =False for i in range(1 , arr_len + 1 ): for j in range(1 , required_sum + 1 ): if arr[i - 1] > j: lowerCamelCase_ =subset[i - 1][j] if arr[i - 1] <= j: lowerCamelCase_ =subset[i - 1][j] or subset[i - 1][j - arr[i - 1]] return subset[arr_len][required_sum] if __name__ == "__main__": import doctest doctest.testmod()
676
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" super().__init__() # make sure scheduler can always be converted to DDIM lowerCamelCase_ =DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=lowerCAmelCase, scheduler=lowerCAmelCase ) @torch.no_grad() def __call__( self, lowerCAmelCase = 1, lowerCAmelCase = None, lowerCAmelCase = 0.0, lowerCAmelCase = 50, lowerCAmelCase = None, lowerCAmelCase = "pil", lowerCAmelCase = True, ): """simple docstring""" if isinstance(self.unet.config.sample_size, lowerCAmelCase ): lowerCamelCase_ =( batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size, ) else: lowerCamelCase_ =(batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) if isinstance(lowerCAmelCase, lowerCAmelCase ) and len(lowerCAmelCase ) != batch_size: raise ValueError( f'''You have passed a list of generators of length {len(lowerCAmelCase )}, but requested an effective batch''' f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) lowerCamelCase_ =randn_tensor(lowerCAmelCase, generator=lowerCAmelCase, device=self.device, dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(lowerCAmelCase ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output lowerCamelCase_ =self.unet(lowerCAmelCase, lowerCAmelCase ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 lowerCamelCase_ =self.scheduler.step( lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, eta=lowerCAmelCase, use_clipped_model_output=lowerCAmelCase, generator=lowerCAmelCase ).prev_sample lowerCamelCase_ =(image / 2 + 0.5).clamp(0, 1 ) lowerCamelCase_ =image.cpu().permute(0, 2, 3, 1 ).numpy() if output_type == "pil": lowerCamelCase_ =self.numpy_to_pil(lowerCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowerCAmelCase )
676
1
'''simple docstring''' from .imports import is_rich_available if is_rich_available(): from rich.traceback import install install(show_locals=False) else: raise ModuleNotFoundError("""To use the rich extension, install rich with `pip install rich`""")
676
'''simple docstring''' from maths.prime_check import is_prime def a_ ( __snake_case : int ) -> int: """simple docstring""" if not isinstance(__snake_case , __snake_case ): lowerCamelCase_ =F'''Input value of [number={number}] must be an integer''' raise TypeError(__snake_case ) if is_prime(__snake_case ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
676
1
'''simple docstring''' from maths.prime_check import is_prime def a_ ( __snake_case : int ) -> int: """simple docstring""" if not isinstance(__snake_case , __snake_case ): lowerCamelCase_ =F'''Input value of [number={number}] must be an integer''' raise TypeError(__snake_case ) if is_prime(__snake_case ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
676
'''simple docstring''' # DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin, SchedulerOutput @dataclass class __UpperCamelCase ( lowerCamelCase__ ): lowercase : torch.FloatTensor lowercase : torch.FloatTensor class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): lowercase : Tuple =1 @register_to_config def __init__( self, lowerCAmelCase = 2_000, lowerCAmelCase = 0.1_5, lowerCAmelCase = 0.0_1, lowerCAmelCase = 1_3_4_8.0, lowerCAmelCase = 1e-5, lowerCAmelCase = 1, ): """simple docstring""" lowerCamelCase_ =sigma_max # setable values lowerCamelCase_ =None self.set_sigmas(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ): """simple docstring""" return sample def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None ): """simple docstring""" lowerCamelCase_ =sampling_eps if sampling_eps is not None else self.config.sampling_eps lowerCamelCase_ =torch.linspace(1, lowerCAmelCase, lowerCAmelCase, device=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None ): """simple docstring""" lowerCamelCase_ =sigma_min if sigma_min is not None else self.config.sigma_min lowerCamelCase_ =sigma_max if sigma_max is not None else self.config.sigma_max lowerCamelCase_ =sampling_eps if sampling_eps is not None else self.config.sampling_eps if self.timesteps is None: self.set_timesteps(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps) lowerCamelCase_ =torch.exp(torch.linspace(math.log(lowerCAmelCase ), math.log(lowerCAmelCase ), lowerCAmelCase ) ) lowerCamelCase_ =torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" return torch.where( timesteps == 0, torch.zeros_like(t.to(timesteps.device ) ), self.discrete_sigmas[timesteps - 1].to(timesteps.device ), ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = True, ): """simple docstring""" if self.timesteps is None: raise ValueError( '''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' ) lowerCamelCase_ =timestep * torch.ones( sample.shape[0], device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0]) lowerCamelCase_ =(timestep * (len(self.timesteps ) - 1)).long() # mps requires indices to be in the same device, so we use cpu as is the default with cuda lowerCamelCase_ =timesteps.to(self.discrete_sigmas.device ) lowerCamelCase_ =self.discrete_sigmas[timesteps].to(sample.device ) lowerCamelCase_ =self.get_adjacent_sigma(lowerCAmelCase, lowerCAmelCase ).to(sample.device ) lowerCamelCase_ =torch.zeros_like(lowerCAmelCase ) lowerCamelCase_ =(sigma**2 - adjacent_sigma**2) ** 0.5 # equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x) # also equation 47 shows the analog from SDE models to ancestral sampling methods lowerCamelCase_ =diffusion.flatten() while len(diffusion.shape ) < len(sample.shape ): lowerCamelCase_ =diffusion.unsqueeze(-1 ) lowerCamelCase_ =drift - diffusion**2 * model_output # equation 6: sample noise for the diffusion term of lowerCamelCase_ =randn_tensor( sample.shape, layout=sample.layout, generator=lowerCAmelCase, device=sample.device, dtype=sample.dtype ) lowerCamelCase_ =sample - drift # subtract because `dt` is a small negative timestep # TODO is the variable diffusion the correct scaling term for the noise? lowerCamelCase_ =prev_sample_mean + diffusion * noise # add impact of diffusion field g if not return_dict: return (prev_sample, prev_sample_mean) return SdeVeOutput(prev_sample=lowerCAmelCase, prev_sample_mean=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = True, ): """simple docstring""" if self.timesteps is None: raise ValueError( '''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' ) # For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z" # sample noise for correction lowerCamelCase_ =randn_tensor(sample.shape, layout=sample.layout, generator=lowerCAmelCase ).to(sample.device ) # compute step size from the model_output, the noise, and the snr lowerCamelCase_ =torch.norm(model_output.reshape(model_output.shape[0], -1 ), dim=-1 ).mean() lowerCamelCase_ =torch.norm(noise.reshape(noise.shape[0], -1 ), dim=-1 ).mean() lowerCamelCase_ =(self.config.snr * noise_norm / grad_norm) ** 2 * 2 lowerCamelCase_ =step_size * torch.ones(sample.shape[0] ).to(sample.device ) # self.repeat_scalar(step_size, sample.shape[0]) # compute corrected sample: model_output term and noise term lowerCamelCase_ =step_size.flatten() while len(step_size.shape ) < len(sample.shape ): lowerCamelCase_ =step_size.unsqueeze(-1 ) lowerCamelCase_ =sample + step_size * model_output lowerCamelCase_ =prev_sample_mean + ((step_size * 2) ** 0.5) * noise if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =timesteps.to(original_samples.device ) lowerCamelCase_ =self.discrete_sigmas.to(original_samples.device )[timesteps] lowerCamelCase_ =( noise * sigmas[:, None, None, None] if noise is not None else torch.randn_like(lowerCAmelCase ) * sigmas[:, None, None, None] ) lowerCamelCase_ =noise + original_samples return noisy_samples def __len__( self ): """simple docstring""" return self.config.num_train_timesteps
676
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available, is_vision_available, ) a_ : List[str] = {"""configuration_beit""": ["""BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BeitConfig""", """BeitOnnxConfig"""]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Tuple = ["""BeitFeatureExtractor"""] a_ : Any = ["""BeitImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Optional[int] = [ """BEIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """BeitForImageClassification""", """BeitForMaskedImageModeling""", """BeitForSemanticSegmentation""", """BeitModel""", """BeitPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : str = [ """FlaxBeitForImageClassification""", """FlaxBeitForMaskedImageModeling""", """FlaxBeitModel""", """FlaxBeitPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_beit import BeitFeatureExtractor from .image_processing_beit import BeitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_beit import ( BEIT_PRETRAINED_MODEL_ARCHIVE_LIST, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, BeitPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_beit import ( FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel, FlaxBeitPreTrainedModel, ) else: import sys a_ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
676
'''simple docstring''' def a_ ( __snake_case : int , __snake_case : int ) -> str: """simple docstring""" if not isinstance(__snake_case , __snake_case ): raise ValueError('''iterations must be defined as integers''' ) if not isinstance(__snake_case , __snake_case ) or not number >= 1: raise ValueError( '''starting number must be and integer and be more than 0''' ) if not iterations >= 1: raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' ) lowerCamelCase_ ='''''' while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(__snake_case ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
676
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_tf_available, is_torch_available, ) a_ : str = { """configuration_speech_to_text""": ["""SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Speech2TextConfig"""], """processing_speech_to_text""": ["""Speech2TextProcessor"""], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Tuple = ["""Speech2TextTokenizer"""] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Union[str, Any] = ["""Speech2TextFeatureExtractor"""] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Optional[int] = [ """TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFSpeech2TextForConditionalGeneration""", """TFSpeech2TextModel""", """TFSpeech2TextPreTrainedModel""", ] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : int = [ """SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""", """Speech2TextForConditionalGeneration""", """Speech2TextModel""", """Speech2TextPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig from .processing_speech_to_text import SpeechaTextProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speech_to_text import SpeechaTextTokenizer try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_speech_to_text import ( TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, TFSpeechaTextForConditionalGeneration, TFSpeechaTextModel, TFSpeechaTextPreTrainedModel, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechaTextForConditionalGeneration, SpeechaTextModel, SpeechaTextPreTrainedModel, ) else: import sys a_ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
676
'''simple docstring''' from typing import List import numpy as np def a_ ( __snake_case : dict ) -> int: """simple docstring""" lowerCamelCase_ ={key: len(__snake_case ) for key, value in gen_kwargs.items() if isinstance(__snake_case , __snake_case )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( '''Sharding is ambiguous for this dataset: ''' + '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n''' + '''\n'''.join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() ) + '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, ''' + '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.''' ) ) lowerCamelCase_ =max(lists_lengths.values() , default=0 ) return max(1 , __snake_case ) def a_ ( __snake_case : int , __snake_case : int ) -> List[range]: """simple docstring""" lowerCamelCase_ =[] for group_idx in range(__snake_case ): lowerCamelCase_ =num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break lowerCamelCase_ =shards_indices_per_group[-1].stop if shards_indices_per_group else 0 lowerCamelCase_ =range(__snake_case , start + num_shards_to_add ) shards_indices_per_group.append(__snake_case ) return shards_indices_per_group def a_ ( __snake_case : dict , __snake_case : int ) -> List[dict]: """simple docstring""" lowerCamelCase_ =_number_of_shards_in_gen_kwargs(__snake_case ) if num_shards == 1: return [dict(__snake_case )] else: lowerCamelCase_ =_distribute_shards(num_shards=__snake_case , max_num_jobs=__snake_case ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(__snake_case , __snake_case ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(__snake_case ) ) ] def a_ ( __snake_case : List[dict] ) -> dict: """simple docstring""" return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , __snake_case ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def a_ ( __snake_case : np.random.Generator , __snake_case : dict ) -> dict: """simple docstring""" lowerCamelCase_ ={len(__snake_case ) for value in gen_kwargs.values() if isinstance(__snake_case , __snake_case )} lowerCamelCase_ ={} for size in list_sizes: lowerCamelCase_ =list(range(__snake_case ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes lowerCamelCase_ =dict(__snake_case ) for key, value in shuffled_kwargs.items(): if isinstance(__snake_case , __snake_case ): lowerCamelCase_ =[value[i] for i in indices_per_size[len(__snake_case )]] return shuffled_kwargs
676
1
'''simple docstring''' import warnings from .generation import TFGenerationMixin class __UpperCamelCase ( lowerCamelCase__ ): # warning at import time warnings.warn( 'Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will ' 'be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.' , lowerCamelCase__ , )
676
'''simple docstring''' import argparse import logging import pickle import random import time import numpy as np from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO ) a_ : int = logging.getLogger(__name__) def a_ ( ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ =argparse.ArgumentParser( description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' ) parser.add_argument('''--file_path''' , type=__snake_case , default='''data/dump.txt''' , help='''The path to the data.''' ) parser.add_argument('''--tokenizer_type''' , type=__snake_case , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] ) parser.add_argument('''--tokenizer_name''' , type=__snake_case , default='''bert-base-uncased''' , help='''The tokenizer to use.''' ) parser.add_argument('''--dump_file''' , type=__snake_case , default='''data/dump''' , help='''The dump file prefix.''' ) lowerCamelCase_ =parser.parse_args() logger.info(F'''Loading Tokenizer ({args.tokenizer_name})''' ) if args.tokenizer_type == "bert": lowerCamelCase_ =BertTokenizer.from_pretrained(args.tokenizer_name ) lowerCamelCase_ =tokenizer.special_tokens_map['''cls_token'''] # `[CLS]` lowerCamelCase_ =tokenizer.special_tokens_map['''sep_token'''] # `[SEP]` elif args.tokenizer_type == "roberta": lowerCamelCase_ =RobertaTokenizer.from_pretrained(args.tokenizer_name ) lowerCamelCase_ =tokenizer.special_tokens_map['''cls_token'''] # `<s>` lowerCamelCase_ =tokenizer.special_tokens_map['''sep_token'''] # `</s>` elif args.tokenizer_type == "gpt2": lowerCamelCase_ =GPTaTokenizer.from_pretrained(args.tokenizer_name ) lowerCamelCase_ =tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>` lowerCamelCase_ =tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>` logger.info(F'''Loading text from {args.file_path}''' ) with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp: lowerCamelCase_ =fp.readlines() logger.info('''Start encoding''' ) logger.info(F'''{len(__snake_case )} examples to process.''' ) lowerCamelCase_ =[] lowerCamelCase_ =0 lowerCamelCase_ =1_0000 lowerCamelCase_ =time.time() for text in data: lowerCamelCase_ =F'''{bos} {text.strip()} {sep}''' lowerCamelCase_ =tokenizer.encode(__snake_case , add_special_tokens=__snake_case ) rslt.append(__snake_case ) iter += 1 if iter % interval == 0: lowerCamelCase_ =time.time() logger.info(F'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' ) lowerCamelCase_ =time.time() logger.info('''Finished binarization''' ) logger.info(F'''{len(__snake_case )} examples processed.''' ) lowerCamelCase_ =F'''{args.dump_file}.{args.tokenizer_name}.pickle''' lowerCamelCase_ =tokenizer.vocab_size if vocab_size < (1 << 16): lowerCamelCase_ =[np.uintaa(__snake_case ) for d in rslt] else: lowerCamelCase_ =[np.intaa(__snake_case ) for d in rslt] random.shuffle(rslt_ ) logger.info(F'''Dump to {dp_file}''' ) with open(__snake_case , '''wb''' ) as handle: pickle.dump(rslt_ , __snake_case , protocol=pickle.HIGHEST_PROTOCOL ) if __name__ == "__main__": main()
676
1
'''simple docstring''' from urllib.parse import quote import pytest from datasets.utils.hub import hf_hub_url @pytest.mark.parametrize('''repo_id''' , ['''canonical_dataset_name''', '''org-name/dataset-name'''] ) @pytest.mark.parametrize('''path''' , ['''filename.csv''', '''filename with blanks.csv'''] ) @pytest.mark.parametrize('''revision''' , [None, '''v2'''] ) def a_ ( __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : List[str] ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ =hf_hub_url(repo_id=__snake_case , path=__snake_case , revision=__snake_case ) assert url == F'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(__snake_case )}'''
676
'''simple docstring''' import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : str = logging.get_logger(__name__) a_ : int = { """RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""", } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : List[str] ='mvp' lowercase : List[str] =['past_key_values'] lowercase : Dict ={'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self, lowerCAmelCase=50_267, lowerCAmelCase=1_024, lowerCAmelCase=12, lowerCAmelCase=4_096, lowerCAmelCase=16, lowerCAmelCase=12, lowerCAmelCase=4_096, lowerCAmelCase=16, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase="gelu", lowerCAmelCase=1_024, lowerCAmelCase=0.1, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=0.0, lowerCAmelCase=False, lowerCAmelCase=True, lowerCAmelCase=1, lowerCAmelCase=0, lowerCAmelCase=2, lowerCAmelCase=True, lowerCAmelCase=2, lowerCAmelCase=2, lowerCAmelCase=False, lowerCAmelCase=100, lowerCAmelCase=800, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =vocab_size lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =d_model lowerCamelCase_ =encoder_ffn_dim lowerCamelCase_ =encoder_layers lowerCamelCase_ =encoder_attention_heads lowerCamelCase_ =decoder_ffn_dim lowerCamelCase_ =decoder_layers lowerCamelCase_ =decoder_attention_heads lowerCamelCase_ =dropout lowerCamelCase_ =attention_dropout lowerCamelCase_ =activation_dropout lowerCamelCase_ =activation_function lowerCamelCase_ =init_std lowerCamelCase_ =encoder_layerdrop lowerCamelCase_ =decoder_layerdrop lowerCamelCase_ =classifier_dropout lowerCamelCase_ =use_cache lowerCamelCase_ =encoder_layers lowerCamelCase_ =scale_embedding # scale factor will be sqrt(d_model) if True lowerCamelCase_ =use_prompt lowerCamelCase_ =prompt_length lowerCamelCase_ =prompt_mid_dim super().__init__( pad_token_id=lowerCAmelCase, bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, is_encoder_decoder=lowerCAmelCase, decoder_start_token_id=lowerCAmelCase, forced_eos_token_id=lowerCAmelCase, **lowerCAmelCase, ) if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''', lowerCAmelCase ): lowerCamelCase_ =self.bos_token_id warnings.warn( f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. ''' '''The config can simply be saved and uploaded again to be fixed.''' )
676
1
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_distilbert import DistilBertTokenizer a_ : List[Any] = logging.get_logger(__name__) a_ : Dict = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} a_ : Dict = { """vocab_file""": { """distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt""", """distilbert-base-uncased-distilled-squad""": ( """https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt""" ), """distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt""", """distilbert-base-cased-distilled-squad""": ( """https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt""" ), """distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt""", """distilbert-base-multilingual-cased""": ( """https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json""", """distilbert-base-uncased-distilled-squad""": ( """https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json""" ), """distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json""", """distilbert-base-cased-distilled-squad""": ( """https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json""" ), """distilbert-base-german-cased""": ( """https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json""" ), """distilbert-base-multilingual-cased""": ( """https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json""" ), }, } a_ : Union[str, Any] = { """distilbert-base-uncased""": 5_12, """distilbert-base-uncased-distilled-squad""": 5_12, """distilbert-base-cased""": 5_12, """distilbert-base-cased-distilled-squad""": 5_12, """distilbert-base-german-cased""": 5_12, """distilbert-base-multilingual-cased""": 5_12, } a_ : Optional[int] = { """distilbert-base-uncased""": {"""do_lower_case""": True}, """distilbert-base-uncased-distilled-squad""": {"""do_lower_case""": True}, """distilbert-base-cased""": {"""do_lower_case""": False}, """distilbert-base-cased-distilled-squad""": {"""do_lower_case""": False}, """distilbert-base-german-cased""": {"""do_lower_case""": False}, """distilbert-base-multilingual-cased""": {"""do_lower_case""": False}, } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Tuple =VOCAB_FILES_NAMES lowercase : int =PRETRAINED_VOCAB_FILES_MAP lowercase : List[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : Tuple =PRETRAINED_INIT_CONFIGURATION lowercase : Optional[Any] =['input_ids', 'attention_mask'] lowercase : Union[str, Any] =DistilBertTokenizer def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=True, lowerCAmelCase="[UNK]", lowerCAmelCase="[SEP]", lowerCAmelCase="[PAD]", lowerCAmelCase="[CLS]", lowerCAmelCase="[MASK]", lowerCAmelCase=True, lowerCAmelCase=None, **lowerCAmelCase, ): """simple docstring""" super().__init__( lowerCAmelCase, tokenizer_file=lowerCAmelCase, do_lower_case=lowerCAmelCase, unk_token=lowerCAmelCase, sep_token=lowerCAmelCase, pad_token=lowerCAmelCase, cls_token=lowerCAmelCase, mask_token=lowerCAmelCase, tokenize_chinese_chars=lowerCAmelCase, strip_accents=lowerCAmelCase, **lowerCAmelCase, ) lowerCamelCase_ =json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''', lowerCAmelCase ) != do_lower_case or normalizer_state.get('''strip_accents''', lowerCAmelCase ) != strip_accents or normalizer_state.get('''handle_chinese_chars''', lowerCAmelCase ) != tokenize_chinese_chars ): lowerCamelCase_ =getattr(lowerCAmelCase, normalizer_state.pop('''type''' ) ) lowerCamelCase_ =do_lower_case lowerCamelCase_ =strip_accents lowerCamelCase_ =tokenize_chinese_chars lowerCamelCase_ =normalizer_class(**lowerCAmelCase ) lowerCamelCase_ =do_lower_case def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ): """simple docstring""" lowerCamelCase_ =[self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ): """simple docstring""" lowerCamelCase_ =[self.sep_token_id] lowerCamelCase_ =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ): """simple docstring""" lowerCamelCase_ =self._tokenizer.model.save(lowerCAmelCase, name=lowerCAmelCase ) return tuple(lowerCAmelCase )
676
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a_ : int = logging.get_logger(__name__) a_ : str = {"""vocab_file""": """spiece.model"""} a_ : Optional[int] = { """vocab_file""": { """bert_for_seq_generation""": ( """https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model""" ), } } a_ : List[Any] = {"""bert_for_seq_generation""": 5_12} class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[int] =VOCAB_FILES_NAMES lowercase : Optional[int] =PRETRAINED_VOCAB_FILES_MAP lowercase : Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : List[int] =[] lowercase : str =['input_ids', 'attention_mask'] def __init__( self, lowerCAmelCase, lowerCAmelCase="<s>", lowerCAmelCase="</s>", lowerCAmelCase="<unk>", lowerCAmelCase="<pad>", lowerCAmelCase="<::::>", lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ ={} if sp_model_kwargs is None else sp_model_kwargs # Add extra_ids to the special token list super().__init__( bos_token=lowerCAmelCase, eos_token=lowerCAmelCase, unk_token=lowerCAmelCase, pad_token=lowerCAmelCase, sep_token=lowerCAmelCase, sp_model_kwargs=self.sp_model_kwargs, **lowerCAmelCase, ) lowerCamelCase_ =vocab_file lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowerCAmelCase ) @property def lowercase__ ( self ): """simple docstring""" return self.sp_model.get_piece_size() def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ={self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): """simple docstring""" lowerCamelCase_ =self.__dict__.copy() lowerCamelCase_ =None return state def __setstate__( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =d # for backward compatibility if not hasattr(self, '''sp_model_kwargs''' ): lowerCamelCase_ ={} lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return self.sp_model.encode(lowerCAmelCase, out_type=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return self.sp_model.piece_to_id(lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.sp_model.IdToPiece(lowerCAmelCase ) return token def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =[] lowerCamelCase_ ='''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(lowerCAmelCase ) + token lowerCamelCase_ =[] else: current_sub_tokens.append(lowerCAmelCase ) out_string += self.sp_model.decode(lowerCAmelCase ) return out_string.strip() def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ): """simple docstring""" if not os.path.isdir(lowerCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCamelCase_ =os.path.join( lowerCAmelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file, lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(lowerCAmelCase, '''wb''' ) as fi: lowerCamelCase_ =self.sp_model.serialized_model_proto() fi.write(lowerCAmelCase ) return (out_vocab_file,)
676
1
'''simple docstring''' import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Any =(DPMSolverSDEScheduler,) lowercase : Tuple =10 def lowercase__ ( self, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ ={ '''num_train_timesteps''': 1_100, '''beta_start''': 0.0_0_0_1, '''beta_end''': 0.0_2, '''beta_schedule''': '''linear''', '''noise_sampler_seed''': 0, } config.update(**lowerCAmelCase ) return config def lowercase__ ( self ): """simple docstring""" for timesteps in [10, 50, 100, 1_000]: self.check_over_configs(num_train_timesteps=lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1], [0.0_0_0_2, 0.0_0_2, 0.0_2] ): self.check_over_configs(beta_start=lowerCAmelCase, beta_end=lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.scheduler_classes[0] lowerCamelCase_ =self.get_scheduler_config() lowerCamelCase_ =scheduler_class(**lowerCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ) lowerCamelCase_ =self.dummy_model() lowerCamelCase_ =self.dummy_sample_deter * scheduler.init_noise_sigma lowerCamelCase_ =sample.to(lowerCAmelCase ) for i, t in enumerate(scheduler.timesteps ): lowerCamelCase_ =scheduler.scale_model_input(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =model(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =scheduler.step(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =output.prev_sample lowerCamelCase_ =torch.sum(torch.abs(lowerCAmelCase ) ) lowerCamelCase_ =torch.mean(torch.abs(lowerCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 1_6_7.4_7_8_2_1_0_4_4_9_2_1_8_7_5 ) < 1e-2 assert abs(result_mean.item() - 0.2_1_7_8_7_0_5_9_6_4_5_6_5_2_7_7 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 1_7_1.5_9_3_5_2_1_1_1_8_1_6_4_0_6 ) < 1e-2 assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_6_8_9_2_2_9_9_6_5_2 ) < 1e-3 else: assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1e-2 assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1e-3 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.scheduler_classes[0] lowerCamelCase_ =self.get_scheduler_config(prediction_type='''v_prediction''' ) lowerCamelCase_ =scheduler_class(**lowerCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ) lowerCamelCase_ =self.dummy_model() lowerCamelCase_ =self.dummy_sample_deter * scheduler.init_noise_sigma lowerCamelCase_ =sample.to(lowerCAmelCase ) for i, t in enumerate(scheduler.timesteps ): lowerCamelCase_ =scheduler.scale_model_input(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =model(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =scheduler.step(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =output.prev_sample lowerCamelCase_ =torch.sum(torch.abs(lowerCAmelCase ) ) lowerCamelCase_ =torch.mean(torch.abs(lowerCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 1_2_4.7_7_1_4_9_2_0_0_4_3_9_4_5_3 ) < 1e-2 assert abs(result_mean.item() - 0.1_6_2_2_6_2_8_9_0_1_4_8_1_6_2_8_4 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 1_2_8.1_6_6_3_3_6_0_5_9_5_7_0_3 ) < 1e-2 assert abs(result_mean.item() - 0.1_6_6_8_8_3_2_6_0_0_1_1_6_7_2_9_7 ) < 1e-3 else: assert abs(result_sum.item() - 1_1_9.8_4_8_7_5_4_8_8_2_8_1_2_5 ) < 1e-2 assert abs(result_mean.item() - 0.1_5_6_0_5_3_0_6_6_2_5_3_6_6_2_1 ) < 1e-3 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.scheduler_classes[0] lowerCamelCase_ =self.get_scheduler_config() lowerCamelCase_ =scheduler_class(**lowerCAmelCase ) scheduler.set_timesteps(self.num_inference_steps, device=lowerCAmelCase ) lowerCamelCase_ =self.dummy_model() lowerCamelCase_ =self.dummy_sample_deter.to(lowerCAmelCase ) * scheduler.init_noise_sigma for t in scheduler.timesteps: lowerCamelCase_ =scheduler.scale_model_input(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =model(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =scheduler.step(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =output.prev_sample lowerCamelCase_ =torch.sum(torch.abs(lowerCAmelCase ) ) lowerCamelCase_ =torch.mean(torch.abs(lowerCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 1_6_7.4_6_9_5_7_3_9_7_4_6_0_9_3_8 ) < 1e-2 assert abs(result_mean.item() - 0.2_1_8_0_5_9_3_4_6_0_7_9_8_2_6_3_5 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 1_7_1.5_9_3_5_3_6_3_7_6_9_5_3_1_2 ) < 1e-2 assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_8_3_8_2_4_1_5_7_7_1 ) < 1e-3 else: assert abs(result_sum.item() - 1_6_2.5_2_3_8_3_4_2_2_8_5_1_5_6_2 ) < 1e-2 assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1e-3 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.scheduler_classes[0] lowerCamelCase_ =self.get_scheduler_config() lowerCamelCase_ =scheduler_class(**lowerCAmelCase, use_karras_sigmas=lowerCAmelCase ) scheduler.set_timesteps(self.num_inference_steps, device=lowerCAmelCase ) lowerCamelCase_ =self.dummy_model() lowerCamelCase_ =self.dummy_sample_deter.to(lowerCAmelCase ) * scheduler.init_noise_sigma lowerCamelCase_ =sample.to(lowerCAmelCase ) for t in scheduler.timesteps: lowerCamelCase_ =scheduler.scale_model_input(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =model(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =scheduler.step(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =output.prev_sample lowerCamelCase_ =torch.sum(torch.abs(lowerCAmelCase ) ) lowerCamelCase_ =torch.mean(torch.abs(lowerCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 1_7_6.6_6_9_7_4_1_3_5_7_4_2_1_8_8 ) < 1e-2 assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1e-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 1_7_7.6_3_6_5_3_5_6_4_4_5_3_1_2_5 ) < 1e-2 assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1e-2 else: assert abs(result_sum.item() - 1_7_0.3_1_3_5_2_2_3_3_8_8_6_7_2 ) < 1e-2 assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1e-2
676
'''simple docstring''' from collections.abc import Sequence def a_ ( __snake_case : Sequence[float] , __snake_case : float ) -> float: """simple docstring""" return sum(c * (x**i) for i, c in enumerate(__snake_case ) ) def a_ ( __snake_case : Sequence[float] , __snake_case : float ) -> float: """simple docstring""" lowerCamelCase_ =0.0 for coeff in reversed(__snake_case ): lowerCamelCase_ =result * x + coeff return result if __name__ == "__main__": a_ : Optional[int] = (0.0, 0.0, 5.0, 9.3, 7.0) a_ : Tuple = 10.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
676
1
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple import torch from torch import nn from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel from transformers.utils import ModelOutput @dataclass class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[torch.FloatTensor] =None lowercase : torch.FloatTensor =None lowercase : Optional[Tuple[torch.FloatTensor]] =None lowercase : Optional[Tuple[torch.FloatTensor]] =None class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, lowerCAmelCase=1, lowerCAmelCase=0, lowerCAmelCase=2, lowerCAmelCase=512, lowerCAmelCase="cls", lowerCAmelCase=False, lowerCAmelCase=True, **lowerCAmelCase, ): """simple docstring""" super().__init__(pad_token_id=lowerCAmelCase, bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, **lowerCAmelCase ) lowerCamelCase_ =project_dim lowerCamelCase_ =pooler_fn lowerCamelCase_ =learn_encoder lowerCamelCase_ =use_attention_mask class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[int] =[r'pooler', r'logit_scale'] lowercase : List[str] =[r'position_ids', r'predictions.decoder.bias'] lowercase : str ='roberta' lowercase : Dict =RobertaSeriesConfig def __init__( self, lowerCAmelCase ): """simple docstring""" super().__init__(lowerCAmelCase ) lowerCamelCase_ =XLMRobertaModel(lowerCAmelCase ) lowerCamelCase_ =nn.Linear(config.hidden_size, config.project_dim ) lowerCamelCase_ =getattr(lowerCAmelCase, '''has_pre_transformation''', lowerCAmelCase ) if self.has_pre_transformation: lowerCamelCase_ =nn.Linear(config.hidden_size, config.project_dim ) lowerCamelCase_ =nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps ) self.post_init() def lowercase__ ( self, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, ): """simple docstring""" lowerCamelCase_ =return_dict if return_dict is not None else self.config.use_return_dict lowerCamelCase_ =self.base_model( input_ids=lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase, position_ids=lowerCAmelCase, head_mask=lowerCAmelCase, inputs_embeds=lowerCAmelCase, encoder_hidden_states=lowerCAmelCase, encoder_attention_mask=lowerCAmelCase, output_attentions=lowerCAmelCase, output_hidden_states=True if self.has_pre_transformation else output_hidden_states, return_dict=lowerCAmelCase, ) if self.has_pre_transformation: lowerCamelCase_ =outputs['''hidden_states'''][-2] lowerCamelCase_ =self.pre_LN(lowerCAmelCase ) lowerCamelCase_ =self.transformation_pre(lowerCAmelCase ) return TransformationModelOutput( projection_state=lowerCAmelCase, last_hidden_state=outputs.last_hidden_state, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) else: lowerCamelCase_ =self.transformation(outputs.last_hidden_state ) return TransformationModelOutput( projection_state=lowerCAmelCase, last_hidden_state=outputs.last_hidden_state, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
676
'''simple docstring''' import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[int] =['image_processor', 'tokenizer'] lowercase : str ='CLIPImageProcessor' lowercase : Optional[Any] =('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast') def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''', lowerCAmelCase, ) lowerCamelCase_ =kwargs.pop('''feature_extractor''' ) lowerCamelCase_ =image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(lowerCAmelCase, lowerCAmelCase ) def __call__( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ): """simple docstring""" if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: lowerCamelCase_ =self.tokenizer(lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase ) if images is not None: lowerCamelCase_ =self.image_processor(lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase ) if text is not None and images is not None: lowerCamelCase_ =image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**lowerCAmelCase ), tensor_type=lowerCAmelCase ) def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase ) @property def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.tokenizer.model_input_names lowerCamelCase_ =self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
676
1
'''simple docstring''' import numpy as np from scipy.spatial.distance import cdist from sklearn.metrics import fa_score import datasets a_ : Optional[int] = """\ @inproceedings{kakwani2020indicnlpsuite, title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}}, author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar}, year={2020}, booktitle={Findings of EMNLP}, } """ a_ : Optional[int] = """\ IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te. """ a_ : str = """ Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset. Args: predictions: list of predictions to score (as int64), except for 'cvit-mkb-clsr' where each prediction is a vector (of float32). references: list of ground truth labels corresponding to the predictions (as int64), except for 'cvit-mkb-clsr' where each reference is a vector (of float32). Returns: depending on the IndicGLUE subset, one or several of: \"accuracy\": Accuracy \"f1\": F1 score \"precision\": Precision@10 Examples: >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"] >>> references = [0, 1] >>> predictions = [0, 1] >>> results = indic_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0} >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner') >>> references = [0, 1] >>> predictions = [0, 1] >>> results = indic_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0, 'f1': 1.0} >>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr') >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]] >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]] >>> results = indic_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {'precision@10': 1.0} """ def a_ ( __snake_case : List[Any] , __snake_case : Union[str, Any] ) -> List[str]: """simple docstring""" return float((preds == labels).mean() ) def a_ ( __snake_case : Optional[int] , __snake_case : Tuple ) -> Any: """simple docstring""" lowerCamelCase_ =simple_accuracy(__snake_case , __snake_case ) lowerCamelCase_ =float(fa_score(y_true=__snake_case , y_pred=__snake_case ) ) return { "accuracy": acc, "f1": fa, } def a_ ( __snake_case : int , __snake_case : Dict ) -> str: """simple docstring""" lowerCamelCase_ =np.array(__snake_case ) lowerCamelCase_ =np.array(__snake_case ) lowerCamelCase_ =en_sentvecs.shape[0] # mean centering lowerCamelCase_ =en_sentvecs - np.mean(__snake_case , axis=0 ) lowerCamelCase_ =in_sentvecs - np.mean(__snake_case , axis=0 ) lowerCamelCase_ =cdist(__snake_case , __snake_case , '''cosine''' ) lowerCamelCase_ =np.array(range(__snake_case ) ) lowerCamelCase_ =sim.argsort(axis=1 )[:, :10] lowerCamelCase_ =np.any(preds == actual[:, None] , axis=1 ) return float(matches.mean() ) @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __UpperCamelCase ( datasets.Metric ): def lowercase__ ( self ): """simple docstring""" if self.config_name not in [ "wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", "cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", "wiki-ner", ]: raise KeyError( '''You should supply a configuration name selected in ''' '''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", ''' '''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", ''' '''"wiki-ner"]''' ) return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { '''predictions''': datasets.Value('''int64''' ) if self.config_name != '''cvit-mkb-clsr''' else datasets.Sequence(datasets.Value('''float32''' ) ), '''references''': datasets.Value('''int64''' ) if self.config_name != '''cvit-mkb-clsr''' else datasets.Sequence(datasets.Value('''float32''' ) ), } ), codebase_urls=[], reference_urls=[], format='''numpy''' if self.config_name != '''cvit-mkb-clsr''' else None, ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" if self.config_name == "cvit-mkb-clsr": return {"precision@10": precision_at_aa(lowerCAmelCase, lowerCAmelCase )} elif self.config_name in ["wiki-ner"]: return acc_and_fa(lowerCAmelCase, lowerCAmelCase ) elif self.config_name in [ "wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md", ]: return {"accuracy": simple_accuracy(lowerCAmelCase, lowerCAmelCase )} else: raise KeyError( '''You should supply a configuration name selected in ''' '''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", ''' '''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", ''' '''"wiki-ner"]''' )
676
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING a_ : Optional[Any] = logging.get_logger(__name__) @add_end_docstrings(lowerCamelCase__ ) class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" super().__init__(*lowerCAmelCase, **lowerCAmelCase ) requires_backends(self, '''vision''' ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING ) def lowercase__ ( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None ): """simple docstring""" lowerCamelCase_ ={} lowerCamelCase_ ={} if prompt is not None: lowerCamelCase_ =prompt if generate_kwargs is not None: lowerCamelCase_ =generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: lowerCamelCase_ ={} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( '''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,''' ''' please use only one''' ) lowerCamelCase_ =max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return super().__call__(lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ): """simple docstring""" lowerCamelCase_ =load_image(lowerCAmelCase ) if prompt is not None: if not isinstance(lowerCAmelCase, lowerCAmelCase ): raise ValueError( f'''Received an invalid text input, got - {type(lowerCAmelCase )} - but expected a single string. ''' '''Note also that one single text can be provided for conditional image to text generation.''' ) lowerCamelCase_ =self.model.config.model_type if model_type == "git": lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework ) lowerCamelCase_ =self.tokenizer(text=lowerCAmelCase, add_special_tokens=lowerCAmelCase ).input_ids lowerCamelCase_ =[self.tokenizer.cls_token_id] + input_ids lowerCamelCase_ =torch.tensor(lowerCAmelCase ).unsqueeze(0 ) model_inputs.update({'''input_ids''': input_ids} ) elif model_type == "pix2struct": lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, header_text=lowerCAmelCase, return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework ) lowerCamelCase_ =self.tokenizer(lowerCAmelCase, return_tensors=self.framework ) model_inputs.update(lowerCAmelCase ) else: raise ValueError(f'''Model type {model_type} does not support conditional text generation''' ) else: lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: lowerCamelCase_ =None return model_inputs def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ): """simple docstring""" if ( "input_ids" in model_inputs and isinstance(model_inputs['''input_ids'''], lowerCAmelCase ) and all(x is None for x in model_inputs['''input_ids'''] ) ): lowerCamelCase_ =None if generate_kwargs is None: lowerCamelCase_ ={} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. lowerCamelCase_ =model_inputs.pop(self.model.main_input_name ) lowerCamelCase_ =self.model.generate(lowerCAmelCase, **lowerCAmelCase, **lowerCAmelCase ) return model_outputs def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =[] for output_ids in model_outputs: lowerCamelCase_ ={ '''generated_text''': self.tokenizer.decode( lowerCAmelCase, skip_special_tokens=lowerCAmelCase, ) } records.append(lowerCAmelCase ) return records
676
1
'''simple docstring''' import importlib.util import json import os import warnings from dataclasses import dataclass, field import torch from ..training_args import TrainingArguments from ..utils import cached_property, is_sagemaker_dp_enabled, logging a_ : int = logging.get_logger(__name__) def a_ ( ) -> int: """simple docstring""" # Get the sagemaker specific mp parameters from smp_options variable. lowerCamelCase_ =os.getenv('''SM_HP_MP_PARAMETERS''' , '''{}''' ) try: # Parse it and check the field "partitions" is included, it is required for model parallel. lowerCamelCase_ =json.loads(__snake_case ) if "partitions" not in smp_options: return False except json.JSONDecodeError: return False # Get the sagemaker specific framework parameters from mpi_options variable. lowerCamelCase_ =os.getenv('''SM_FRAMEWORK_PARAMS''' , '''{}''' ) try: # Parse it and check the field "sagemaker_distributed_dataparallel_enabled". lowerCamelCase_ =json.loads(__snake_case ) if not mpi_options.get('''sagemaker_mpi_enabled''' , __snake_case ): return False except json.JSONDecodeError: return False # Lastly, check if the `smdistributed` module is present. return importlib.util.find_spec('''smdistributed''' ) is not None if is_sagemaker_model_parallel_available(): import smdistributed.modelparallel.torch as smp smp.init() @dataclass class __UpperCamelCase ( lowerCamelCase__ ): lowercase : str =field( default='' , metadata={'help': 'Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'} , ) def lowercase__ ( self ): """simple docstring""" super().__post_init__() warnings.warn( '''`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use ''' '''`TrainingArguments` instead.''', lowerCAmelCase, ) @cached_property def lowercase__ ( self ): """simple docstring""" logger.info('''PyTorch: setting up devices''' ) if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1: logger.warning( '''torch.distributed process group is initialized, but local_rank == -1. ''' '''In order to use Torch DDP, launch your script with `python -m torch.distributed.launch''' ) if self.no_cuda: lowerCamelCase_ =torch.device('''cpu''' ) lowerCamelCase_ =0 elif is_sagemaker_model_parallel_available(): lowerCamelCase_ =smp.local_rank() lowerCamelCase_ =torch.device('''cuda''', lowerCAmelCase ) lowerCamelCase_ =1 elif is_sagemaker_dp_enabled(): import smdistributed.dataparallel.torch.torch_smddp # noqa: F401 torch.distributed.init_process_group(backend='''smddp''', timeout=self.ddp_timeout_delta ) lowerCamelCase_ =int(os.getenv('''SMDATAPARALLEL_LOCAL_RANK''' ) ) lowerCamelCase_ =torch.device('''cuda''', self.local_rank ) lowerCamelCase_ =1 elif self.local_rank == -1: # if n_gpu is > 1 we'll use nn.DataParallel. # If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0` # Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will # trigger an error that a device index is missing. Index 0 takes into account the # GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0` # will use the first GPU in that env, i.e. GPU#1 lowerCamelCase_ =torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' ) # Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at # the default value. lowerCamelCase_ =torch.cuda.device_count() else: # Here, we'll use torch.distributed. # Initializes the distributed backend which will take care of synchronizing nodes/GPUs if not torch.distributed.is_initialized(): torch.distributed.init_process_group(backend='''nccl''', timeout=self.ddp_timeout_delta ) lowerCamelCase_ =torch.device('''cuda''', self.local_rank ) lowerCamelCase_ =1 if device.type == "cuda": torch.cuda.set_device(lowerCAmelCase ) return device @property def lowercase__ ( self ): """simple docstring""" if is_sagemaker_model_parallel_available(): return smp.dp_size() return super().world_size @property def lowercase__ ( self ): """simple docstring""" return not is_sagemaker_model_parallel_available() @property def lowercase__ ( self ): """simple docstring""" return False
676
'''simple docstring''' import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def a_ ( __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Any ) -> str: """simple docstring""" # Initialise PyTorch model lowerCamelCase_ =BertConfig.from_json_file(__snake_case ) print(F'''Building PyTorch model from configuration: {config}''' ) lowerCamelCase_ =BertForPreTraining(__snake_case ) # Load weights from tf checkpoint load_tf_weights_in_bert(__snake_case , __snake_case , __snake_case ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , __snake_case ) if __name__ == "__main__": a_ : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--bert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained BERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) a_ : Optional[Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
676
1
'''simple docstring''' # # This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or # many nodes) can talk to each other via nccl and allocate gpu memory. # # To run first adjust the number of processes and nodes: # # python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port # # You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d # # use torch.distributed.launch instead of torch.distributed.run for torch < 1.9 # # If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with: # # NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # which should tell you what's going on behind the scenes. # # # This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that # runs on 2 nodes of 4 gpus per node: # # #SBATCH --job-name=test-nodes # name # #SBATCH --nodes=2 # nodes # #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! # #SBATCH --cpus-per-task=10 # number of cores per tasks # #SBATCH --gres=gpu:4 # number of gpus # #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS) # #SBATCH --output=%x-%j.out # output file name # # GPUS_PER_NODE=4 # MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) # MASTER_PORT=6000 # # srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \ # --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \ # --master_addr $MASTER_ADDR --master_port $MASTER_PORT \ # torch-distributed-gpu-test.py' # import fcntl import os import socket import torch import torch.distributed as dist def a_ ( *__snake_case : Dict ) -> Optional[Any]: """simple docstring""" with open(__snake_case , '''r''' ) as fh: fcntl.flock(__snake_case , fcntl.LOCK_EX ) try: print(*__snake_case ) finally: fcntl.flock(__snake_case , fcntl.LOCK_UN ) a_ : Optional[Any] = int(os.environ["""LOCAL_RANK"""]) torch.cuda.set_device(local_rank) a_ : Tuple = torch.device("""cuda""", local_rank) a_ : Optional[Any] = socket.gethostname() a_ : Tuple = F"""[{hostname}-{local_rank}]""" try: # test distributed dist.init_process_group("""nccl""") dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM) dist.barrier() # test cuda is available and can allocate memory torch.cuda.is_available() torch.ones(1).cuda(local_rank) # global rank a_ : Dict = dist.get_rank() a_ : int = dist.get_world_size() printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""") dist.barrier() if rank == 0: printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""") except Exception: printflock(F"""{gpu} is broken""") raise
676
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : Union[str, Any] = logging.get_logger(__name__) a_ : Optional[int] = { """BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""", # See all AltCLIP models at https://huggingface.co/models?filter=altclip } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[Any] ='altclip_text_model' def __init__( self, lowerCAmelCase=250_002, lowerCAmelCase=1_024, lowerCAmelCase=24, lowerCAmelCase=16, lowerCAmelCase=4_096, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=514, lowerCAmelCase=1, lowerCAmelCase=0.0_2, lowerCAmelCase=0.0_2, lowerCAmelCase=1e-05, lowerCAmelCase=1, lowerCAmelCase=0, lowerCAmelCase=2, lowerCAmelCase="absolute", lowerCAmelCase=True, lowerCAmelCase=768, **lowerCAmelCase, ): """simple docstring""" super().__init__(pad_token_id=lowerCAmelCase, bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, **lowerCAmelCase ) lowerCamelCase_ =vocab_size lowerCamelCase_ =hidden_size lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =hidden_act lowerCamelCase_ =intermediate_size lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =attention_probs_dropout_prob lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =type_vocab_size lowerCamelCase_ =initializer_range lowerCamelCase_ =initializer_factor lowerCamelCase_ =layer_norm_eps lowerCamelCase_ =position_embedding_type lowerCamelCase_ =use_cache lowerCamelCase_ =project_dim class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Dict ='altclip_vision_model' def __init__( self, lowerCAmelCase=768, lowerCAmelCase=3_072, lowerCAmelCase=512, lowerCAmelCase=12, lowerCAmelCase=12, lowerCAmelCase=3, lowerCAmelCase=224, lowerCAmelCase=32, lowerCAmelCase="quick_gelu", lowerCAmelCase=1e-5, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=1.0, **lowerCAmelCase, ): """simple docstring""" super().__init__(**lowerCAmelCase ) lowerCamelCase_ =hidden_size lowerCamelCase_ =intermediate_size lowerCamelCase_ =projection_dim lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =num_channels lowerCamelCase_ =patch_size lowerCamelCase_ =image_size lowerCamelCase_ =initializer_range lowerCamelCase_ =initializer_factor lowerCamelCase_ =attention_dropout lowerCamelCase_ =layer_norm_eps lowerCamelCase_ =hidden_act @classmethod def lowercase__ ( cls, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" cls._set_token_in_kwargs(lowerCAmelCase ) lowerCamelCase_, lowerCamelCase_ =cls.get_config_dict(lowerCAmelCase, **lowerCAmelCase ) # get the vision config dict if we are loading from AltCLIPConfig if config_dict.get('''model_type''' ) == "altclip": lowerCamelCase_ =config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls, '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(lowerCAmelCase, **lowerCAmelCase ) class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Dict ='altclip' lowercase : str =True def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=768, lowerCAmelCase=2.6_5_9_2, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =kwargs.pop('''text_config_dict''', lowerCAmelCase ) lowerCamelCase_ =kwargs.pop('''vision_config_dict''', lowerCAmelCase ) super().__init__(**lowerCAmelCase ) # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`. if text_config_dict is not None: if text_config is None: lowerCamelCase_ ={} # This is the complete result when using `text_config_dict`. lowerCamelCase_ =AltCLIPTextConfig(**lowerCAmelCase ).to_dict() # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different. for key, value in _text_config_dict.items(): if key in text_config and value != text_config[key] and key not in ["transformers_version"]: # If specified in `text_config_dict` if key in text_config_dict: lowerCamelCase_ =( f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. ''' f'''The value `text_config_dict["{key}"]` will be used instead.''' ) # If inferred from default argument values (just to be super careful) else: lowerCamelCase_ =( f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The ''' f'''value `text_config["{key}"]` will be overriden.''' ) logger.warning(lowerCAmelCase ) # Update all values in `text_config` with the ones in `_text_config_dict`. text_config.update(_text_config_dict ) if vision_config_dict is not None: if vision_config is None: lowerCamelCase_ ={} # This is the complete result when using `vision_config_dict`. lowerCamelCase_ =AltCLIPVisionConfig(**lowerCAmelCase ).to_dict() # convert keys to string instead of integer if "id2label" in _vision_config_dict: lowerCamelCase_ ={ str(lowerCAmelCase ): value for key, value in _vision_config_dict['''id2label'''].items() } # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different. for key, value in _vision_config_dict.items(): if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]: # If specified in `vision_config_dict` if key in vision_config_dict: lowerCamelCase_ =( f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different ''' f'''values. The value `vision_config_dict["{key}"]` will be used instead.''' ) # If inferred from default argument values (just to be super careful) else: lowerCamelCase_ =( f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. ''' f'''The value `vision_config["{key}"]` will be overriden.''' ) logger.warning(lowerCAmelCase ) # Update all values in `vision_config` with the ones in `_vision_config_dict`. vision_config.update(_vision_config_dict ) if text_config is None: lowerCamelCase_ ={} logger.info('''`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.''' ) if vision_config is None: lowerCamelCase_ ={} logger.info('''`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.''' ) lowerCamelCase_ =AltCLIPTextConfig(**lowerCAmelCase ) lowerCamelCase_ =AltCLIPVisionConfig(**lowerCAmelCase ) lowerCamelCase_ =projection_dim lowerCamelCase_ =logit_scale_init_value lowerCamelCase_ =1.0 @classmethod def lowercase__ ( cls, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =copy.deepcopy(self.__dict__ ) lowerCamelCase_ =self.text_config.to_dict() lowerCamelCase_ =self.vision_config.to_dict() lowerCamelCase_ =self.__class__.model_type return output
676
1
'''simple docstring''' import re def a_ ( __snake_case : str ) -> bool: """simple docstring""" lowerCamelCase_ =re.compile( r'''^(?:0|94|\+94|0{2}94)''' r'''7(0|1|2|4|5|6|7|8)''' r'''(-| |)''' r'''\d{7}$''' ) return bool(re.search(__snake_case , __snake_case ) ) if __name__ == "__main__": a_ : Union[str, Any] = """0094702343221""" print(is_sri_lankan_phone_number(phone))
676
'''simple docstring''' import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, lowerCAmelCase, lowerCAmelCase=13, lowerCAmelCase=7, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=2, lowerCAmelCase=99, lowerCAmelCase=0, lowerCAmelCase=32, lowerCAmelCase=5, lowerCAmelCase=4, lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=12, lowerCAmelCase=2, lowerCAmelCase=0.0_2, lowerCAmelCase=3, lowerCAmelCase=4, lowerCAmelCase="last", lowerCAmelCase=None, lowerCAmelCase=None, ): """simple docstring""" lowerCamelCase_ =parent lowerCamelCase_ =batch_size lowerCamelCase_ =seq_length lowerCamelCase_ =is_training lowerCamelCase_ =use_input_lengths lowerCamelCase_ =use_token_type_ids lowerCamelCase_ =use_labels lowerCamelCase_ =gelu_activation lowerCamelCase_ =sinusoidal_embeddings lowerCamelCase_ =causal lowerCamelCase_ =asm lowerCamelCase_ =n_langs lowerCamelCase_ =vocab_size lowerCamelCase_ =n_special lowerCamelCase_ =hidden_size lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =attention_probs_dropout_prob lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =type_vocab_size lowerCamelCase_ =type_sequence_label_size lowerCamelCase_ =initializer_range lowerCamelCase_ =num_labels lowerCamelCase_ =num_choices lowerCamelCase_ =summary_type lowerCamelCase_ =use_proj lowerCamelCase_ =scope def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase_ =None if self.use_input_lengths: lowerCamelCase_ =( ids_tensor([self.batch_size], vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length lowerCamelCase_ =None if self.use_token_type_ids: lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.n_langs ) lowerCamelCase_ =None lowerCamelCase_ =None lowerCamelCase_ =None if self.use_labels: lowerCamelCase_ =ids_tensor([self.batch_size], self.type_sequence_label_size ) lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.num_labels ) lowerCamelCase_ =ids_tensor([self.batch_size], 2 ).float() lowerCamelCase_ =ids_tensor([self.batch_size], self.num_choices ) lowerCamelCase_ =self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def lowercase__ ( self ): """simple docstring""" return FlaubertConfig( vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =FlaubertModel(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase, lengths=lowerCAmelCase, langs=lowerCAmelCase ) lowerCamelCase_ =model(lowerCAmelCase, langs=lowerCAmelCase ) lowerCamelCase_ =model(lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =FlaubertWithLMHeadModel(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape, () ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =FlaubertForQuestionAnsweringSimple(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase ) lowerCamelCase_ =model(lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =FlaubertForQuestionAnswering(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase ) lowerCamelCase_ =model( lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase, cls_index=lowerCAmelCase, is_impossible=lowerCAmelCase, p_mask=lowerCAmelCase, ) lowerCamelCase_ =model( lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase, cls_index=lowerCAmelCase, is_impossible=lowerCAmelCase, ) ((lowerCamelCase_), ) =result_with_labels.to_tuple() lowerCamelCase_ =model(lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase ) ((lowerCamelCase_), ) =result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape, () ) self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =FlaubertForSequenceClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase ) lowerCamelCase_ =model(lowerCAmelCase, labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape, () ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =self.num_labels lowerCamelCase_ =FlaubertForTokenClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase, attention_mask=lowerCAmelCase, labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =self.num_choices lowerCamelCase_ =FlaubertForMultipleChoice(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() lowerCamelCase_ =token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() lowerCamelCase_ =input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() lowerCamelCase_ =model( lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.prepare_config_and_inputs() ( ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ) =config_and_inputs lowerCamelCase_ ={ '''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase : List[Any] =( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) lowercase : Tuple =( { 'feature-extraction': FlaubertModel, 'fill-mask': FlaubertWithLMHeadModel, 'question-answering': FlaubertForQuestionAnsweringSimple, 'text-classification': FlaubertForSequenceClassification, 'token-classification': FlaubertForTokenClassification, 'zero-shot': FlaubertForSequenceClassification, } if is_torch_available() else {} ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('''Fast''' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=False ): """simple docstring""" lowerCamelCase_ =super()._prepare_for_class(lowerCAmelCase, lowerCAmelCase, return_labels=lowerCAmelCase ) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": lowerCamelCase_ =torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase ) lowerCamelCase_ =torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase ) return inputs_dict def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =FlaubertModelTester(self ) lowerCamelCase_ =ConfigTester(self, config_class=lowerCAmelCase, emb_dim=37 ) def lowercase__ ( self ): """simple docstring""" self.config_tester.run_common_tests() def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*lowerCAmelCase ) @slow def lowercase__ ( self ): """simple docstring""" for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ =FlaubertModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) @slow @require_torch_gpu def lowercase__ ( self ): """simple docstring""" lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return lowerCamelCase_ =True lowerCamelCase_ =model_class(config=lowerCAmelCase ) lowerCamelCase_ =self._prepare_for_class(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =torch.jit.trace( lowerCAmelCase, (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(lowerCAmelCase, os.path.join(lowerCAmelCase, '''traced_model.pt''' ) ) lowerCamelCase_ =torch.jit.load(os.path.join(lowerCAmelCase, '''traced_model.pt''' ), map_location=lowerCAmelCase ) loaded(inputs_dict['''input_ids'''].to(lowerCAmelCase ), inputs_dict['''attention_mask'''].to(lowerCAmelCase ) ) @require_torch class __UpperCamelCase ( unittest.TestCase ): @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' ) lowerCamelCase_ =torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] ) with torch.no_grad(): lowerCamelCase_ =model(lowerCAmelCase )[0] lowerCamelCase_ =torch.Size((1, 11, 768) ) self.assertEqual(output.shape, lowerCAmelCase ) lowerCamelCase_ =torch.tensor( [[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], lowerCAmelCase, atol=1e-4 ) )
676
1
'''simple docstring''' import unittest from parameterized import parameterized from transformers import OpenLlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel class __UpperCamelCase : def __init__( self, lowerCAmelCase, lowerCAmelCase=13, lowerCAmelCase=7, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=False, lowerCAmelCase=True, lowerCAmelCase=99, lowerCAmelCase=32, lowerCAmelCase=5, lowerCAmelCase=4, lowerCAmelCase=37, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=16, lowerCAmelCase=2, lowerCAmelCase=0.0_2, lowerCAmelCase=3, lowerCAmelCase=4, lowerCAmelCase=None, ): """simple docstring""" lowerCamelCase_ =parent lowerCamelCase_ =batch_size lowerCamelCase_ =seq_length lowerCamelCase_ =is_training lowerCamelCase_ =use_input_mask lowerCamelCase_ =use_token_type_ids lowerCamelCase_ =use_labels lowerCamelCase_ =vocab_size lowerCamelCase_ =hidden_size lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =intermediate_size lowerCamelCase_ =hidden_act lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =attention_probs_dropout_prob lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =type_vocab_size lowerCamelCase_ =type_sequence_label_size lowerCamelCase_ =initializer_range lowerCamelCase_ =num_labels lowerCamelCase_ =num_choices lowerCamelCase_ =scope def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) lowerCamelCase_ =None if self.use_input_mask: lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase_ =None if self.use_token_type_ids: lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size ) lowerCamelCase_ =None lowerCamelCase_ =None lowerCamelCase_ =None if self.use_labels: lowerCamelCase_ =ids_tensor([self.batch_size], self.type_sequence_label_size ) lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.num_labels ) lowerCamelCase_ =ids_tensor([self.batch_size], self.num_choices ) lowerCamelCase_ =self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowercase__ ( self ): """simple docstring""" return OpenLlamaConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=lowerCAmelCase, initializer_range=self.initializer_range, use_stable_embedding=lowerCAmelCase, ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =OpenLlamaModel(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase, attention_mask=lowerCAmelCase ) lowerCamelCase_ =model(lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =True lowerCamelCase_ =OpenLlamaModel(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model( lowerCAmelCase, attention_mask=lowerCAmelCase, encoder_hidden_states=lowerCAmelCase, encoder_attention_mask=lowerCAmelCase, ) lowerCamelCase_ =model( lowerCAmelCase, attention_mask=lowerCAmelCase, encoder_hidden_states=lowerCAmelCase, ) lowerCamelCase_ =model(lowerCAmelCase, attention_mask=lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =OpenLlamaForCausalLM(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase, attention_mask=lowerCAmelCase, labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =True lowerCamelCase_ =True lowerCamelCase_ =OpenLlamaForCausalLM(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() # first forward pass lowerCamelCase_ =model( lowerCAmelCase, attention_mask=lowerCAmelCase, encoder_hidden_states=lowerCAmelCase, encoder_attention_mask=lowerCAmelCase, use_cache=lowerCAmelCase, ) lowerCamelCase_ =outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids lowerCamelCase_ =ids_tensor((self.batch_size, 3), config.vocab_size ) lowerCamelCase_ =ids_tensor((self.batch_size, 3), vocab_size=2 ) # append to next input_ids and lowerCamelCase_ =torch.cat([input_ids, next_tokens], dim=-1 ) lowerCamelCase_ =torch.cat([input_mask, next_mask], dim=-1 ) lowerCamelCase_ =model( lowerCAmelCase, attention_mask=lowerCAmelCase, encoder_hidden_states=lowerCAmelCase, encoder_attention_mask=lowerCAmelCase, output_hidden_states=lowerCAmelCase, )['''hidden_states'''][0] lowerCamelCase_ =model( lowerCAmelCase, attention_mask=lowerCAmelCase, encoder_hidden_states=lowerCAmelCase, encoder_attention_mask=lowerCAmelCase, past_key_values=lowerCAmelCase, output_hidden_states=lowerCAmelCase, )['''hidden_states'''][0] # select random slice lowerCamelCase_ =ids_tensor((1,), output_from_past.shape[-1] ).item() lowerCamelCase_ =output_from_no_past[:, -3:, random_slice_idx].detach() lowerCamelCase_ =output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowerCAmelCase, lowerCAmelCase, atol=1e-3 ) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.prepare_config_and_inputs() ( ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ) =config_and_inputs lowerCamelCase_ ={'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase : Union[str, Any] =( (OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else () ) lowercase : Tuple =(OpenLlamaForCausalLM,) if is_torch_available() else () lowercase : Optional[int] =( { 'feature-extraction': OpenLlamaModel, 'text-classification': OpenLlamaForSequenceClassification, 'text-generation': OpenLlamaForCausalLM, 'zero-shot': OpenLlamaForSequenceClassification, } if is_torch_available() else {} ) lowercase : List[Any] =False lowercase : Tuple =False def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =OpenLlamaModelTester(self ) lowerCamelCase_ =ConfigTester(self, config_class=lowerCAmelCase, hidden_size=37 ) def lowercase__ ( self ): """simple docstring""" self.config_tester.run_common_tests() def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowerCamelCase_ =type self.model_tester.create_and_check_model(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ =3 lowerCamelCase_ =input_dict['''input_ids'''] lowerCamelCase_ =input_ids.ne(1 ).to(lowerCAmelCase ) lowerCamelCase_ =ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size ) lowerCamelCase_ =OpenLlamaForSequenceClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase, attention_mask=lowerCAmelCase, labels=lowerCAmelCase ) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ =3 lowerCamelCase_ ='''single_label_classification''' lowerCamelCase_ =input_dict['''input_ids'''] lowerCamelCase_ =input_ids.ne(1 ).to(lowerCAmelCase ) lowerCamelCase_ =ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size ) lowerCamelCase_ =OpenLlamaForSequenceClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase, attention_mask=lowerCAmelCase, labels=lowerCAmelCase ) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ =3 lowerCamelCase_ ='''multi_label_classification''' lowerCamelCase_ =input_dict['''input_ids'''] lowerCamelCase_ =input_ids.ne(1 ).to(lowerCAmelCase ) lowerCamelCase_ =ids_tensor( [self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size ).to(torch.float ) lowerCamelCase_ =OpenLlamaForSequenceClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase, attention_mask=lowerCAmelCase, labels=lowerCAmelCase ) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip('''Open-Llama buffers include complex numbers, which breaks this test''' ) def lowercase__ ( self ): """simple docstring""" pass @parameterized.expand([('''linear''',), ('''dynamic''',)] ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ =ids_tensor([1, 10], config.vocab_size ) lowerCamelCase_ =ids_tensor([1, int(config.max_position_embeddings * 1.5 )], config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights lowerCamelCase_ =OpenLlamaModel(lowerCAmelCase ) original_model.to(lowerCAmelCase ) original_model.eval() lowerCamelCase_ =original_model(lowerCAmelCase ).last_hidden_state lowerCamelCase_ =original_model(lowerCAmelCase ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights lowerCamelCase_ ={'''type''': scaling_type, '''factor''': 1_0.0} lowerCamelCase_ =OpenLlamaModel(lowerCAmelCase ) scaled_model.to(lowerCAmelCase ) scaled_model.eval() lowerCamelCase_ =scaled_model(lowerCAmelCase ).last_hidden_state lowerCamelCase_ =scaled_model(lowerCAmelCase ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(lowerCAmelCase, lowerCAmelCase, atol=1e-5 ) ) else: self.assertFalse(torch.allclose(lowerCAmelCase, lowerCAmelCase, atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(lowerCAmelCase, lowerCAmelCase, atol=1e-5 ) )
676
'''simple docstring''' import os from pickle import UnpicklingError from typing import Dict, Tuple import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict, unflatten_dict import transformers from .utils import logging a_ : List[Any] = logging.get_logger(__name__) def a_ ( __snake_case : Optional[int] , __snake_case : str , __snake_case : List[Any] , __snake_case : int=False ) -> List[str]: """simple docstring""" try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise if not is_sharded: lowerCamelCase_ =os.path.abspath(__snake_case ) logger.info(F'''Loading PyTorch weights from {pt_path}''' ) lowerCamelCase_ =torch.load(__snake_case , map_location='''cpu''' ) logger.info(F'''PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.''' ) lowerCamelCase_ =convert_pytorch_state_dict_to_flax(__snake_case , __snake_case ) else: # model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files lowerCamelCase_ =convert_pytorch_sharded_state_dict_to_flax(__snake_case , __snake_case ) return flax_state_dict def a_ ( __snake_case : Tuple[str] , __snake_case : np.ndarray , __snake_case : Dict[str, jnp.ndarray] , __snake_case : str , ) -> (Tuple[str], np.ndarray): """simple docstring""" def is_key_or_prefix_key_in_dict(__snake_case : Tuple[str] ) -> bool: return len(set(__snake_case ) & {key, (model_prefix,) + key} ) > 0 # layer norm lowerCamelCase_ =pt_tuple_key[:-1] + ('''scale''',) if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(__snake_case ): return renamed_pt_tuple_key, pt_tensor # batch norm layer mean lowerCamelCase_ =pt_tuple_key[:-1] + ('''mean''',) if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(__snake_case ): return renamed_pt_tuple_key, pt_tensor # batch norm layer var lowerCamelCase_ =pt_tuple_key[:-1] + ('''var''',) if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(__snake_case ): return renamed_pt_tuple_key, pt_tensor # embedding lowerCamelCase_ =pt_tuple_key[:-1] + ('''embedding''',) if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(__snake_case ): return renamed_pt_tuple_key, pt_tensor # conv layer lowerCamelCase_ =pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(__snake_case ): lowerCamelCase_ =pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowerCamelCase_ =pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(__snake_case ): lowerCamelCase_ =pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowerCamelCase_ =pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowerCamelCase_ =pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 lowerCamelCase_ =None if pt_tuple_key[-3::2] == ("parametrizations", "original0"): lowerCamelCase_ =pt_tuple_key[-2] + '''_g''' elif pt_tuple_key[-3::2] == ("parametrizations", "original1"): lowerCamelCase_ =pt_tuple_key[-2] + '''_v''' if name is not None: lowerCamelCase_ =pt_tuple_key[:-3] + (name,) return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def a_ ( __snake_case : Union[str, Any] , __snake_case : str ) -> str: """simple docstring""" # convert pytorch tensor to numpy lowerCamelCase_ ={k: v.numpy() for k, v in pt_state_dict.items()} lowerCamelCase_ =flax_model.base_model_prefix # use params dict if the model contains batch norm layers if "params" in flax_model.params: lowerCamelCase_ =flax_model.params['''params'''] else: lowerCamelCase_ =flax_model.params lowerCamelCase_ =flatten_dict(__snake_case ) # add batch_stats keys,values to dict if "batch_stats" in flax_model.params: lowerCamelCase_ =flatten_dict(flax_model.params['''batch_stats'''] ) random_flax_state_dict.update(__snake_case ) lowerCamelCase_ ={} lowerCamelCase_ =(model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) lowerCamelCase_ =(model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCamelCase_ =tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary lowerCamelCase_ =pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: lowerCamelCase_ =pt_tuple_key[1:] # Correctly rename weight parameters lowerCamelCase_, lowerCamelCase_ =rename_key_and_reshape_tensor( __snake_case , __snake_case , __snake_case , __snake_case ) # add model prefix if necessary lowerCamelCase_ =(model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: lowerCamelCase_ =(model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1] or "var" in flax_key[-1]: lowerCamelCase_ =jnp.asarray(__snake_case ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(__snake_case , __snake_case ) continue # also add unexpected weight so that warning is thrown lowerCamelCase_ =jnp.asarray(__snake_case ) else: # also add unexpected weight so that warning is thrown lowerCamelCase_ =jnp.asarray(__snake_case ) return unflatten_dict(__snake_case ) def a_ ( __snake_case : Union[str, Any] , __snake_case : List[Any] ) -> Optional[Any]: """simple docstring""" import torch # Load the index lowerCamelCase_ ={} for shard_file in shard_filenames: # load using msgpack utils lowerCamelCase_ =torch.load(__snake_case ) lowerCamelCase_ ={k: v.numpy() for k, v in pt_state_dict.items()} lowerCamelCase_ =flax_model.base_model_prefix # use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict if "batch_stats" in flax_model.params: lowerCamelCase_ =flax_model.params['''params'''] lowerCamelCase_ =flatten_dict(__snake_case ) random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) ) else: lowerCamelCase_ =flax_model.params lowerCamelCase_ =flatten_dict(__snake_case ) lowerCamelCase_ =(model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) lowerCamelCase_ =(model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCamelCase_ =tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary lowerCamelCase_ =pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: lowerCamelCase_ =pt_tuple_key[1:] # Correctly rename weight parameters lowerCamelCase_, lowerCamelCase_ =rename_key_and_reshape_tensor( __snake_case , __snake_case , __snake_case , __snake_case ) # add model prefix if necessary lowerCamelCase_ =(model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: lowerCamelCase_ =(model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1]: lowerCamelCase_ =jnp.asarray(__snake_case ) continue if "var" in flax_key[-1]: lowerCamelCase_ =jnp.asarray(__snake_case ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(__snake_case , __snake_case ) continue # also add unexpected weight so that warning is thrown lowerCamelCase_ =jnp.asarray(__snake_case ) else: # also add unexpected weight so that warning is thrown lowerCamelCase_ =jnp.asarray(__snake_case ) return unflatten_dict(__snake_case ) def a_ ( __snake_case : List[str] , __snake_case : Dict ) -> str: """simple docstring""" lowerCamelCase_ =os.path.abspath(__snake_case ) logger.info(F'''Loading Flax weights from {flax_checkpoint_path}''' ) # import correct flax class lowerCamelCase_ =getattr(__snake_case , '''Flax''' + model.__class__.__name__ ) # load flax weight dict with open(__snake_case , '''rb''' ) as state_f: try: lowerCamelCase_ =from_bytes(__snake_case , state_f.read() ) except UnpicklingError: raise EnvironmentError(F'''Unable to convert {flax_checkpoint_path} to Flax deserializable object. ''' ) return load_flax_weights_in_pytorch_model(__snake_case , __snake_case ) def a_ ( __snake_case : Optional[Any] , __snake_case : Union[str, Any] ) -> Optional[int]: """simple docstring""" try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise # check if we have bf16 weights lowerCamelCase_ =flatten_dict(jax.tree_util.tree_map(lambda __snake_case : x.dtype == jnp.bfloataa , __snake_case ) ).values() if any(__snake_case ): # convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( '''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` ''' '''before loading those in PyTorch model.''' ) lowerCamelCase_ =jax.tree_util.tree_map( lambda __snake_case : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __snake_case ) lowerCamelCase_ =flatten_dict(__snake_case ) lowerCamelCase_ =pt_model.state_dict() lowerCamelCase_ =(pt_model.base_model_prefix in flax_state) and ( pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) lowerCamelCase_ =(pt_model.base_model_prefix not in flax_state) and ( pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) # keep track of unexpected & missing keys lowerCamelCase_ =[] lowerCamelCase_ =set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): lowerCamelCase_ =flax_key_tuple[0] == pt_model.base_model_prefix lowerCamelCase_ ='''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict # adapt flax_key to prepare for loading from/to base model only if load_model_with_head_into_base_model and has_base_model_prefix: lowerCamelCase_ =flax_key_tuple[1:] elif load_base_model_into_model_with_head and require_base_model_prefix: lowerCamelCase_ =(pt_model.base_model_prefix,) + flax_key_tuple # rename flax weights to PyTorch format if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(__snake_case ) not in pt_model_dict: # conv layer lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',) lowerCamelCase_ =jnp.transpose(__snake_case , (3, 2, 0, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(__snake_case ) not in pt_model_dict: # linear layer lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',) lowerCamelCase_ =flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',) # adding batch stats from flax batch norm to pt elif "mean" in flax_key_tuple[-1]: lowerCamelCase_ =flax_key_tuple[:-1] + ('''running_mean''',) elif "var" in flax_key_tuple[-1]: lowerCamelCase_ =flax_key_tuple[:-1] + ('''running_var''',) if "batch_stats" in flax_state: lowerCamelCase_ ='''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header else: lowerCamelCase_ ='''.'''.join(__snake_case ) # We also need to look at `pt_model_dict` and see if there are keys requiring further transformation. lowerCamelCase_ ={} # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 for key in pt_model_dict: lowerCamelCase_ =key.split('''.''' ) lowerCamelCase_ =None if key_components[-3::2] == ["parametrizations", "original0"]: lowerCamelCase_ =key_components[-2] + '''_g''' elif key_components[-3::2] == ["parametrizations", "original1"]: lowerCamelCase_ =key_components[-2] + '''_v''' if name is not None: lowerCamelCase_ =key_components[:-3] + [name] lowerCamelCase_ ='''.'''.join(__snake_case ) lowerCamelCase_ =key if flax_key in special_pt_names: lowerCamelCase_ =special_pt_names[flax_key] if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( F'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected ''' F'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) else: # add weight to pytorch dict lowerCamelCase_ =np.asarray(__snake_case ) if not isinstance(__snake_case , np.ndarray ) else flax_tensor lowerCamelCase_ =torch.from_numpy(__snake_case ) # remove from missing keys missing_keys.remove(__snake_case ) else: # weight is not expected by PyTorch model unexpected_keys.append(__snake_case ) pt_model.load_state_dict(__snake_case ) # re-transform missing_keys to list lowerCamelCase_ =list(__snake_case ) if len(__snake_case ) > 0: logger.warning( '''Some weights of the Flax model were not used when initializing the PyTorch model''' F''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing''' F''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture''' ''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This''' F''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect''' ''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a''' ''' FlaxBertForSequenceClassification model).''' ) else: logger.warning(F'''All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n''' ) if len(__snake_case ) > 0: logger.warning( F'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly''' F''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to''' ''' use it for predictions and inference.''' ) else: logger.warning( F'''All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n''' '''If your task is similar to the task the model of the checkpoint was trained on, ''' F'''you can already use {pt_model.__class__.__name__} for predictions without further training.''' ) return pt_model
676
1
'''simple docstring''' import itertools from dataclasses import dataclass from typing import Optional import pandas as pd import pyarrow as pa import datasets from datasets.table import table_cast @dataclass class __UpperCamelCase ( datasets.BuilderConfig ): lowercase : Optional[datasets.Features] =None class __UpperCamelCase ( datasets.ArrowBasedBuilder ): lowercase : List[str] =PandasConfig def lowercase__ ( self ): """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" if not self.config.data_files: raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' ) lowerCamelCase_ =dl_manager.download_and_extract(self.config.data_files ) if isinstance(lowerCAmelCase, (str, list, tuple) ): lowerCamelCase_ =data_files if isinstance(lowerCAmelCase, lowerCAmelCase ): lowerCamelCase_ =[files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive lowerCamelCase_ =[dl_manager.iter_files(lowerCAmelCase ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={'''files''': files} )] lowerCamelCase_ =[] for split_name, files in data_files.items(): if isinstance(lowerCAmelCase, lowerCAmelCase ): lowerCamelCase_ =[files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive lowerCamelCase_ =[dl_manager.iter_files(lowerCAmelCase ) for file in files] splits.append(datasets.SplitGenerator(name=lowerCAmelCase, gen_kwargs={'''files''': files} ) ) return splits def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" if self.config.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example lowerCamelCase_ =table_cast(lowerCAmelCase, self.config.features.arrow_schema ) return pa_table def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" for i, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase ) ): with open(lowerCAmelCase, '''rb''' ) as f: lowerCamelCase_ =pa.Table.from_pandas(pd.read_pickle(lowerCAmelCase ) ) yield i, self._cast_table(lowerCAmelCase )
676
'''simple docstring''' def a_ ( __snake_case : str , __snake_case : str ) -> str: """simple docstring""" lowerCamelCase_ =len(__snake_case ) lowerCamelCase_ =len(__snake_case ) lowerCamelCase_ =( first_str_length if first_str_length > second_str_length else second_str_length ) lowerCamelCase_ =[] for char_count in range(__snake_case ): if char_count < first_str_length: output_list.append(first_str[char_count] ) if char_count < second_str_length: output_list.append(second_str[char_count] ) return "".join(__snake_case ) if __name__ == "__main__": print(alternative_string_arrange("""AB""", """XYZ"""), end=""" """)
676
1
'''simple docstring''' from statistics import mean import numpy as np def a_ ( __snake_case : list , __snake_case : list , __snake_case : list , __snake_case : int ) -> list: """simple docstring""" lowerCamelCase_ =0 # Number of processes finished lowerCamelCase_ =0 # Displays the finished process. # If it is 0, the performance is completed if it is 1, before the performance. lowerCamelCase_ =[0] * no_of_process # List to include calculation results lowerCamelCase_ =[0] * no_of_process # Sort by arrival time. lowerCamelCase_ =[burst_time[i] for i in np.argsort(__snake_case )] lowerCamelCase_ =[process_name[i] for i in np.argsort(__snake_case )] arrival_time.sort() while no_of_process > finished_process_count: lowerCamelCase_ =0 while finished_process[i] == 1: i += 1 if current_time < arrival_time[i]: lowerCamelCase_ =arrival_time[i] lowerCamelCase_ =0 # Index showing the location of the process being performed lowerCamelCase_ =0 # Saves the current response ratio. lowerCamelCase_ =0 for i in range(0 , __snake_case ): if finished_process[i] == 0 and arrival_time[i] <= current_time: lowerCamelCase_ =(burst_time[i] + (current_time - arrival_time[i])) / burst_time[ i ] if response_ratio < temp: lowerCamelCase_ =temp lowerCamelCase_ =i # Calculate the turn around time lowerCamelCase_ =current_time + burst_time[loc] - arrival_time[loc] current_time += burst_time[loc] # Indicates that the process has been performed. lowerCamelCase_ =1 # Increase finished_process_count by 1 finished_process_count += 1 return turn_around_time def a_ ( __snake_case : list , __snake_case : list , __snake_case : list , __snake_case : int ) -> list: """simple docstring""" lowerCamelCase_ =[0] * no_of_process for i in range(0 , __snake_case ): lowerCamelCase_ =turn_around_time[i] - burst_time[i] return waiting_time if __name__ == "__main__": a_ : str = 5 a_ : Any = ["""A""", """B""", """C""", """D""", """E"""] a_ : Dict = [1, 2, 3, 4, 5] a_ : Optional[Any] = [1, 2, 3, 4, 5] a_ : Union[str, Any] = calculate_turn_around_time( process_name, arrival_time, burst_time, no_of_process ) a_ : List[str] = calculate_waiting_time( process_name, turn_around_time, burst_time, no_of_process ) print("""Process name \tArrival time \tBurst time \tTurn around time \tWaiting time""") for i in range(0, no_of_process): print( F"""{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t""" F"""{turn_around_time[i]}\t\t\t{waiting_time[i]}""" ) print(F"""average waiting time : {mean(waiting_time):.5f}""") print(F"""average turn around time : {mean(turn_around_time):.5f}""")
676
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ : Any = {"""configuration_timm_backbone""": ["""TimmBackboneConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Optional[Any] = ["""TimmBackbone"""] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys a_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
676
1
'''simple docstring''' def a_ ( __snake_case : str , __snake_case : str ) -> str: """simple docstring""" lowerCamelCase_ =len(__snake_case ) lowerCamelCase_ =len(__snake_case ) lowerCamelCase_ =( first_str_length if first_str_length > second_str_length else second_str_length ) lowerCamelCase_ =[] for char_count in range(__snake_case ): if char_count < first_str_length: output_list.append(first_str[char_count] ) if char_count < second_str_length: output_list.append(second_str[char_count] ) return "".join(__snake_case ) if __name__ == "__main__": print(alternative_string_arrange("""AB""", """XYZ"""), end=""" """)
676
'''simple docstring''' import functools def a_ ( __snake_case : str , __snake_case : str ) -> int: """simple docstring""" lowerCamelCase_ =len(__snake_case ) lowerCamelCase_ =len(__snake_case ) @functools.cache def min_distance(__snake_case : int , __snake_case : int ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa lowerCamelCase_ =int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , __snake_case ) , 1 + min_distance(__snake_case , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
676
1
'''simple docstring''' import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : str = logging.get_logger(__name__) a_ : int = { """RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""", } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : List[str] ='mvp' lowercase : List[str] =['past_key_values'] lowercase : Dict ={'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self, lowerCAmelCase=50_267, lowerCAmelCase=1_024, lowerCAmelCase=12, lowerCAmelCase=4_096, lowerCAmelCase=16, lowerCAmelCase=12, lowerCAmelCase=4_096, lowerCAmelCase=16, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase="gelu", lowerCAmelCase=1_024, lowerCAmelCase=0.1, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=0.0, lowerCAmelCase=False, lowerCAmelCase=True, lowerCAmelCase=1, lowerCAmelCase=0, lowerCAmelCase=2, lowerCAmelCase=True, lowerCAmelCase=2, lowerCAmelCase=2, lowerCAmelCase=False, lowerCAmelCase=100, lowerCAmelCase=800, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =vocab_size lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =d_model lowerCamelCase_ =encoder_ffn_dim lowerCamelCase_ =encoder_layers lowerCamelCase_ =encoder_attention_heads lowerCamelCase_ =decoder_ffn_dim lowerCamelCase_ =decoder_layers lowerCamelCase_ =decoder_attention_heads lowerCamelCase_ =dropout lowerCamelCase_ =attention_dropout lowerCamelCase_ =activation_dropout lowerCamelCase_ =activation_function lowerCamelCase_ =init_std lowerCamelCase_ =encoder_layerdrop lowerCamelCase_ =decoder_layerdrop lowerCamelCase_ =classifier_dropout lowerCamelCase_ =use_cache lowerCamelCase_ =encoder_layers lowerCamelCase_ =scale_embedding # scale factor will be sqrt(d_model) if True lowerCamelCase_ =use_prompt lowerCamelCase_ =prompt_length lowerCamelCase_ =prompt_mid_dim super().__init__( pad_token_id=lowerCAmelCase, bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, is_encoder_decoder=lowerCAmelCase, decoder_start_token_id=lowerCAmelCase, forced_eos_token_id=lowerCAmelCase, **lowerCAmelCase, ) if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''', lowerCAmelCase ): lowerCamelCase_ =self.bos_token_id warnings.warn( f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. ''' '''The config can simply be saved and uploaded again to be fixed.''' )
676
'''simple docstring''' def a_ ( __snake_case : int ) -> bool: """simple docstring""" if not isinstance(__snake_case , __snake_case ): lowerCamelCase_ =F'''Input value of [number={number}] must be an integer''' raise TypeError(__snake_case ) if number < 0: return False lowerCamelCase_ =number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
676
1
'''simple docstring''' import inspect from typing import Callable, List, Optional, Union import torch from transformers import ( CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, WhisperForConditionalGeneration, WhisperProcessor, ) from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.utils import logging a_ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" super().__init__() if safety_checker is None: logger.warning( f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure''' ''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered''' ''' results in services or applications open to the public. Both the diffusers team and Hugging Face''' ''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling''' ''' it only for use-cases that involve analyzing network behavior or auditing its results. For more''' ''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' ) self.register_modules( speech_model=lowerCAmelCase, speech_processor=lowerCAmelCase, vae=lowerCAmelCase, text_encoder=lowerCAmelCase, tokenizer=lowerCAmelCase, unet=lowerCAmelCase, scheduler=lowerCAmelCase, feature_extractor=lowerCAmelCase, ) def lowercase__ ( self, lowerCAmelCase = "auto" ): """simple docstring""" if slice_size == "auto": lowerCamelCase_ =self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" self.enable_attention_slicing(lowerCAmelCase ) @torch.no_grad() def __call__( self, lowerCAmelCase, lowerCAmelCase=16_000, lowerCAmelCase = 512, lowerCAmelCase = 512, lowerCAmelCase = 50, lowerCAmelCase = 7.5, lowerCAmelCase = None, lowerCAmelCase = 1, lowerCAmelCase = 0.0, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = "pil", lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = 1, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =self.speech_processor.feature_extractor( lowerCAmelCase, return_tensors='''pt''', sampling_rate=lowerCAmelCase ).input_features.to(self.device ) lowerCamelCase_ =self.speech_model.generate(lowerCAmelCase, max_length=480_000 ) lowerCamelCase_ =self.speech_processor.tokenizer.batch_decode(lowerCAmelCase, skip_special_tokens=lowerCAmelCase, normalize=lowerCAmelCase )[ 0 ] if isinstance(lowerCAmelCase, lowerCAmelCase ): lowerCamelCase_ =1 elif isinstance(lowerCAmelCase, lowerCAmelCase ): lowerCamelCase_ =len(lowerCAmelCase ) else: raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(lowerCAmelCase )}''' ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(lowerCAmelCase, lowerCAmelCase ) or callback_steps <= 0) ): raise ValueError( f'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' f''' {type(lowerCAmelCase )}.''' ) # get prompt text embeddings lowerCamelCase_ =self.tokenizer( lowerCAmelCase, padding='''max_length''', max_length=self.tokenizer.model_max_length, return_tensors='''pt''', ) lowerCamelCase_ =text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: lowerCamelCase_ =self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( '''The following part of your input was truncated because CLIP can only handle sequences up to''' f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) lowerCamelCase_ =text_input_ids[:, : self.tokenizer.model_max_length] lowerCamelCase_ =self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =text_embeddings.shape lowerCamelCase_ =text_embeddings.repeat(1, lowerCAmelCase, 1 ) lowerCamelCase_ =text_embeddings.view(bs_embed * num_images_per_prompt, lowerCAmelCase, -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. lowerCamelCase_ =guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: lowerCamelCase_ =42 if negative_prompt is None: lowerCamelCase_ =[''''''] * batch_size elif type(lowerCAmelCase ) is not type(lowerCAmelCase ): raise TypeError( f'''`negative_prompt` should be the same type to `prompt`, but got {type(lowerCAmelCase )} !=''' f''' {type(lowerCAmelCase )}.''' ) elif isinstance(lowerCAmelCase, lowerCAmelCase ): lowerCamelCase_ =[negative_prompt] elif batch_size != len(lowerCAmelCase ): raise ValueError( f'''`negative_prompt`: {negative_prompt} has batch size {len(lowerCAmelCase )}, but `prompt`:''' f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches''' ''' the batch size of `prompt`.''' ) else: lowerCamelCase_ =negative_prompt lowerCamelCase_ =text_input_ids.shape[-1] lowerCamelCase_ =self.tokenizer( lowerCAmelCase, padding='''max_length''', max_length=lowerCAmelCase, truncation=lowerCAmelCase, return_tensors='''pt''', ) lowerCamelCase_ =self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method lowerCamelCase_ =uncond_embeddings.shape[1] lowerCamelCase_ =uncond_embeddings.repeat(1, lowerCAmelCase, 1 ) lowerCamelCase_ =uncond_embeddings.view(batch_size * num_images_per_prompt, lowerCAmelCase, -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes lowerCamelCase_ =torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. lowerCamelCase_ =(batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) lowerCamelCase_ =text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps lowerCamelCase_ =torch.randn(lowerCAmelCase, generator=lowerCAmelCase, device='''cpu''', dtype=lowerCAmelCase ).to( self.device ) else: lowerCamelCase_ =torch.randn(lowerCAmelCase, generator=lowerCAmelCase, device=self.device, dtype=lowerCAmelCase ) else: if latents.shape != latents_shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) lowerCamelCase_ =latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(lowerCAmelCase ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand lowerCamelCase_ =self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler lowerCamelCase_ =latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] lowerCamelCase_ ='''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) lowerCamelCase_ ={} if accepts_eta: lowerCamelCase_ =eta for i, t in enumerate(self.progress_bar(lowerCAmelCase ) ): # expand the latents if we are doing classifier free guidance lowerCamelCase_ =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents lowerCamelCase_ =self.scheduler.scale_model_input(lowerCAmelCase, lowerCAmelCase ) # predict the noise residual lowerCamelCase_ =self.unet(lowerCAmelCase, lowerCAmelCase, encoder_hidden_states=lowerCAmelCase ).sample # perform guidance if do_classifier_free_guidance: lowerCamelCase_, lowerCamelCase_ =noise_pred.chunk(2 ) lowerCamelCase_ =noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 lowerCamelCase_ =self.scheduler.step(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =1 / 0.1_8_2_1_5 * latents lowerCamelCase_ =self.vae.decode(lowerCAmelCase ).sample lowerCamelCase_ =(image / 2 + 0.5).clamp(0, 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 lowerCamelCase_ =image.cpu().permute(0, 2, 3, 1 ).float().numpy() if output_type == "pil": lowerCamelCase_ =self.numpy_to_pil(lowerCAmelCase ) if not return_dict: return image return StableDiffusionPipelineOutput(images=lowerCAmelCase, nsfw_content_detected=lowerCAmelCase )
676
'''simple docstring''' from __future__ import annotations a_ : int = list[list[int]] # assigning initial values to the grid a_ : Matrix = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution a_ : Matrix = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def a_ ( __snake_case : Matrix , __snake_case : int , __snake_case : int , __snake_case : int ) -> bool: """simple docstring""" for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def a_ ( __snake_case : Matrix ) -> tuple[int, int] | None: """simple docstring""" for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def a_ ( __snake_case : Matrix ) -> Matrix | None: """simple docstring""" if location := find_empty_location(__snake_case ): lowerCamelCase_, lowerCamelCase_ =location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(__snake_case , __snake_case , __snake_case , __snake_case ): lowerCamelCase_ =digit if sudoku(__snake_case ) is not None: return grid lowerCamelCase_ =0 return None def a_ ( __snake_case : Matrix ) -> None: """simple docstring""" for row in grid: for cell in row: print(__snake_case , end=''' ''' ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print("""\nExample grid:\n""" + """=""" * 20) print_solution(example_grid) print("""\nExample grid solution:""") a_ : Union[str, Any] = sudoku(example_grid) if solution is not None: print_solution(solution) else: print("""Cannot find a solution.""")
676
1
'''simple docstring''' from __future__ import annotations class __UpperCamelCase : def __init__( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =data lowerCamelCase_ =None lowerCamelCase_ =None def a_ ( __snake_case : Node | None ) -> None: # In Order traversal of the tree """simple docstring""" if tree: display(tree.left ) print(tree.data ) display(tree.right ) def a_ ( __snake_case : Node | None ) -> int: """simple docstring""" return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0 def a_ ( __snake_case : Node ) -> bool: """simple docstring""" if not tree: return True if tree.left and tree.right: return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right ) else: return not tree.left and not tree.right def a_ ( ) -> None: # Main function for testing. """simple docstring""" lowerCamelCase_ =Node(1 ) lowerCamelCase_ =Node(2 ) lowerCamelCase_ =Node(3 ) lowerCamelCase_ =Node(4 ) lowerCamelCase_ =Node(5 ) lowerCamelCase_ =Node(6 ) lowerCamelCase_ =Node(7 ) lowerCamelCase_ =Node(8 ) lowerCamelCase_ =Node(9 ) print(is_full_binary_tree(__snake_case ) ) print(depth_of_tree(__snake_case ) ) print('''Tree is: ''' ) display(__snake_case ) if __name__ == "__main__": main()
676
'''simple docstring''' from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : Union[str, Any] = logging.get_logger(__name__) a_ : Tuple = { """huggingface/informer-tourism-monthly""": ( """https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json""" ), # See all Informer models at https://huggingface.co/models?filter=informer } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Union[str, Any] ='informer' lowercase : Union[str, Any] ={ 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', 'num_hidden_layers': 'encoder_layers', } def __init__( self, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = "student_t", lowerCAmelCase = "nll", lowerCAmelCase = 1, lowerCAmelCase = None, lowerCAmelCase = "mean", lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = 64, lowerCAmelCase = 32, lowerCAmelCase = 32, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = True, lowerCAmelCase = "gelu", lowerCAmelCase = 0.0_5, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 100, lowerCAmelCase = 0.0_2, lowerCAmelCase=True, lowerCAmelCase = "prob", lowerCAmelCase = 5, lowerCAmelCase = True, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =prediction_length lowerCamelCase_ =context_length or prediction_length lowerCamelCase_ =distribution_output lowerCamelCase_ =loss lowerCamelCase_ =input_size lowerCamelCase_ =num_time_features lowerCamelCase_ =lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7] lowerCamelCase_ =scaling lowerCamelCase_ =num_dynamic_real_features lowerCamelCase_ =num_static_real_features lowerCamelCase_ =num_static_categorical_features # set cardinality if cardinality and num_static_categorical_features > 0: if len(lowerCAmelCase ) != num_static_categorical_features: raise ValueError( '''The cardinality should be a list of the same length as `num_static_categorical_features`''' ) lowerCamelCase_ =cardinality else: lowerCamelCase_ =[0] # set embedding_dimension if embedding_dimension and num_static_categorical_features > 0: if len(lowerCAmelCase ) != num_static_categorical_features: raise ValueError( '''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' ) lowerCamelCase_ =embedding_dimension else: lowerCamelCase_ =[min(50, (cat + 1) // 2 ) for cat in self.cardinality] lowerCamelCase_ =num_parallel_samples # Transformer architecture configuration lowerCamelCase_ =input_size * len(self.lags_sequence ) + self._number_of_features lowerCamelCase_ =d_model lowerCamelCase_ =encoder_attention_heads lowerCamelCase_ =decoder_attention_heads lowerCamelCase_ =encoder_ffn_dim lowerCamelCase_ =decoder_ffn_dim lowerCamelCase_ =encoder_layers lowerCamelCase_ =decoder_layers lowerCamelCase_ =dropout lowerCamelCase_ =attention_dropout lowerCamelCase_ =activation_dropout lowerCamelCase_ =encoder_layerdrop lowerCamelCase_ =decoder_layerdrop lowerCamelCase_ =activation_function lowerCamelCase_ =init_std lowerCamelCase_ =use_cache # Informer lowerCamelCase_ =attention_type lowerCamelCase_ =sampling_factor lowerCamelCase_ =distil super().__init__(is_encoder_decoder=lowerCAmelCase, **lowerCAmelCase ) @property def lowercase__ ( self ): """simple docstring""" return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
676
1
'''simple docstring''' from ..utils import DummyObject, requires_backends class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : List[str] =['speech'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''speech'''] ) class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : List[str] =['speech'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''speech'''] )
676
'''simple docstring''' from __future__ import annotations def a_ ( __snake_case : int ) -> list[int]: """simple docstring""" lowerCamelCase_ =[True] * limit lowerCamelCase_ =False lowerCamelCase_ =False lowerCamelCase_ =True for i in range(3 , int(limit**0.5 + 1 ) , 2 ): lowerCamelCase_ =i * 2 while index < limit: lowerCamelCase_ =False lowerCamelCase_ =index + i lowerCamelCase_ =[2] for i in range(3 , __snake_case , 2 ): if is_prime[i]: primes.append(__snake_case ) return primes def a_ ( __snake_case : int = 100_0000 ) -> int: """simple docstring""" lowerCamelCase_ =prime_sieve(__snake_case ) lowerCamelCase_ =0 lowerCamelCase_ =0 for i in range(len(__snake_case ) ): for j in range(i + length , len(__snake_case ) ): lowerCamelCase_ =sum(primes[i:j] ) if sol >= ceiling: break if sol in primes: lowerCamelCase_ =j - i lowerCamelCase_ =sol return largest if __name__ == "__main__": print(F"""{solution() = }""")
676
1
'''simple docstring''' from unittest.mock import Mock, patch from file_transfer.send_file import send_file @patch('''socket.socket''' ) @patch('''builtins.open''' ) def a_ ( __snake_case : Union[str, Any] , __snake_case : Tuple ) -> Optional[Any]: """simple docstring""" # ===== initialization ===== lowerCamelCase_ =Mock() lowerCamelCase_ =conn, Mock() lowerCamelCase_ =iter([1, None] ) lowerCamelCase_ =lambda __snake_case : next(__snake_case ) # ===== invoke ===== send_file(filename='''mytext.txt''' , testing=__snake_case ) # ===== ensurance ===== sock.assert_called_once() sock.return_value.bind.assert_called_once() sock.return_value.listen.assert_called_once() sock.return_value.accept.assert_called_once() conn.recv.assert_called_once() file.return_value.__enter__.assert_called_once() file.return_value.__enter__.return_value.read.assert_called() conn.send.assert_called_once() conn.close.assert_called_once() sock.return_value.shutdown.assert_called_once() sock.return_value.close.assert_called_once()
676
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" super().__init__() # make sure scheduler can always be converted to DDIM lowerCamelCase_ =DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=lowerCAmelCase, scheduler=lowerCAmelCase ) @torch.no_grad() def __call__( self, lowerCAmelCase = 1, lowerCAmelCase = None, lowerCAmelCase = 0.0, lowerCAmelCase = 50, lowerCAmelCase = None, lowerCAmelCase = "pil", lowerCAmelCase = True, ): """simple docstring""" if isinstance(self.unet.config.sample_size, lowerCAmelCase ): lowerCamelCase_ =( batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size, ) else: lowerCamelCase_ =(batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) if isinstance(lowerCAmelCase, lowerCAmelCase ) and len(lowerCAmelCase ) != batch_size: raise ValueError( f'''You have passed a list of generators of length {len(lowerCAmelCase )}, but requested an effective batch''' f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) lowerCamelCase_ =randn_tensor(lowerCAmelCase, generator=lowerCAmelCase, device=self.device, dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(lowerCAmelCase ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output lowerCamelCase_ =self.unet(lowerCAmelCase, lowerCAmelCase ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 lowerCamelCase_ =self.scheduler.step( lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, eta=lowerCAmelCase, use_clipped_model_output=lowerCAmelCase, generator=lowerCAmelCase ).prev_sample lowerCamelCase_ =(image / 2 + 0.5).clamp(0, 1 ) lowerCamelCase_ =image.cpu().permute(0, 2, 3, 1 ).numpy() if output_type == "pil": lowerCamelCase_ =self.numpy_to_pil(lowerCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowerCAmelCase )
676
1
'''simple docstring''' from datetime import datetime import requests def a_ ( __snake_case : str ) -> bytes: """simple docstring""" lowerCamelCase_ ='''https://downloadgram.net/wp-json/wppress/video-downloader/video?url=''' lowerCamelCase_ =requests.get(base_url + url ).json()[0]['''urls'''][0]['''src'''] return requests.get(__snake_case ).content if __name__ == "__main__": a_ : int = input("""Enter Video/IGTV url: """).strip() a_ : Dict = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4""" with open(file_name, """wb""") as fp: fp.write(download_video(url)) print(F"""Done. Video saved to disk as {file_name}.""")
676
'''simple docstring''' from maths.prime_check import is_prime def a_ ( __snake_case : int ) -> int: """simple docstring""" if not isinstance(__snake_case , __snake_case ): lowerCamelCase_ =F'''Input value of [number={number}] must be an integer''' raise TypeError(__snake_case ) if is_prime(__snake_case ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
676
1
'''simple docstring''' import sys from collections import defaultdict class __UpperCamelCase : def __init__( self ): """simple docstring""" lowerCamelCase_ =[] def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return self.node_position[vertex] def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =pos def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" if start > size // 2 - 1: return else: if 2 * start + 2 >= size: lowerCamelCase_ =2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: lowerCamelCase_ =2 * start + 1 else: lowerCamelCase_ =2 * start + 2 if heap[smallest_child] < heap[start]: lowerCamelCase_, lowerCamelCase_ =heap[smallest_child], positions[smallest_child] lowerCamelCase_, lowerCamelCase_ =( heap[start], positions[start], ) lowerCamelCase_, lowerCamelCase_ =temp, tempa lowerCamelCase_ =self.get_position(positions[smallest_child] ) self.set_position( positions[smallest_child], self.get_position(positions[start] ) ) self.set_position(positions[start], lowerCAmelCase ) self.top_to_bottom(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =position[index] while index != 0: lowerCamelCase_ =int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 ) if val < heap[parent]: lowerCamelCase_ =heap[parent] lowerCamelCase_ =position[parent] self.set_position(position[parent], lowerCAmelCase ) else: lowerCamelCase_ =val lowerCamelCase_ =temp self.set_position(lowerCAmelCase, lowerCAmelCase ) break lowerCamelCase_ =parent else: lowerCamelCase_ =val lowerCamelCase_ =temp self.set_position(lowerCAmelCase, 0 ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =len(lowerCAmelCase ) // 2 - 1 for i in range(lowerCAmelCase, -1, -1 ): self.top_to_bottom(lowerCAmelCase, lowerCAmelCase, len(lowerCAmelCase ), lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =positions[0] lowerCamelCase_ =sys.maxsize self.top_to_bottom(lowerCAmelCase, 0, len(lowerCAmelCase ), lowerCAmelCase ) return temp def a_ ( __snake_case : Tuple ) -> Any: """simple docstring""" lowerCamelCase_ =Heap() lowerCamelCase_ =[0] * len(__snake_case ) lowerCamelCase_ =[-1] * len(__snake_case ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph lowerCamelCase_ =[] # Heap of Distance of vertices from their neighboring vertex lowerCamelCase_ =[] for vertex in range(len(__snake_case ) ): distance_tv.append(sys.maxsize ) positions.append(__snake_case ) heap.node_position.append(__snake_case ) lowerCamelCase_ =[] lowerCamelCase_ =1 lowerCamelCase_ =sys.maxsize for neighbor, distance in adjacency_list[0]: lowerCamelCase_ =0 lowerCamelCase_ =distance heap.heapify(__snake_case , __snake_case ) for _ in range(1 , len(__snake_case ) ): lowerCamelCase_ =heap.delete_minimum(__snake_case , __snake_case ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) lowerCamelCase_ =1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(__snake_case )] ): lowerCamelCase_ =distance heap.bottom_to_top( __snake_case , heap.get_position(__snake_case ) , __snake_case , __snake_case ) lowerCamelCase_ =vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > a_ : List[Any] = int(input("""Enter number of edges: """).strip()) a_ : List[Any] = defaultdict(list) for _ in range(edges_number): a_ : Any = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
676
'''simple docstring''' # DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin, SchedulerOutput @dataclass class __UpperCamelCase ( lowerCamelCase__ ): lowercase : torch.FloatTensor lowercase : torch.FloatTensor class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): lowercase : Tuple =1 @register_to_config def __init__( self, lowerCAmelCase = 2_000, lowerCAmelCase = 0.1_5, lowerCAmelCase = 0.0_1, lowerCAmelCase = 1_3_4_8.0, lowerCAmelCase = 1e-5, lowerCAmelCase = 1, ): """simple docstring""" lowerCamelCase_ =sigma_max # setable values lowerCamelCase_ =None self.set_sigmas(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ): """simple docstring""" return sample def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None ): """simple docstring""" lowerCamelCase_ =sampling_eps if sampling_eps is not None else self.config.sampling_eps lowerCamelCase_ =torch.linspace(1, lowerCAmelCase, lowerCAmelCase, device=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None ): """simple docstring""" lowerCamelCase_ =sigma_min if sigma_min is not None else self.config.sigma_min lowerCamelCase_ =sigma_max if sigma_max is not None else self.config.sigma_max lowerCamelCase_ =sampling_eps if sampling_eps is not None else self.config.sampling_eps if self.timesteps is None: self.set_timesteps(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps) lowerCamelCase_ =torch.exp(torch.linspace(math.log(lowerCAmelCase ), math.log(lowerCAmelCase ), lowerCAmelCase ) ) lowerCamelCase_ =torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" return torch.where( timesteps == 0, torch.zeros_like(t.to(timesteps.device ) ), self.discrete_sigmas[timesteps - 1].to(timesteps.device ), ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = True, ): """simple docstring""" if self.timesteps is None: raise ValueError( '''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' ) lowerCamelCase_ =timestep * torch.ones( sample.shape[0], device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0]) lowerCamelCase_ =(timestep * (len(self.timesteps ) - 1)).long() # mps requires indices to be in the same device, so we use cpu as is the default with cuda lowerCamelCase_ =timesteps.to(self.discrete_sigmas.device ) lowerCamelCase_ =self.discrete_sigmas[timesteps].to(sample.device ) lowerCamelCase_ =self.get_adjacent_sigma(lowerCAmelCase, lowerCAmelCase ).to(sample.device ) lowerCamelCase_ =torch.zeros_like(lowerCAmelCase ) lowerCamelCase_ =(sigma**2 - adjacent_sigma**2) ** 0.5 # equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x) # also equation 47 shows the analog from SDE models to ancestral sampling methods lowerCamelCase_ =diffusion.flatten() while len(diffusion.shape ) < len(sample.shape ): lowerCamelCase_ =diffusion.unsqueeze(-1 ) lowerCamelCase_ =drift - diffusion**2 * model_output # equation 6: sample noise for the diffusion term of lowerCamelCase_ =randn_tensor( sample.shape, layout=sample.layout, generator=lowerCAmelCase, device=sample.device, dtype=sample.dtype ) lowerCamelCase_ =sample - drift # subtract because `dt` is a small negative timestep # TODO is the variable diffusion the correct scaling term for the noise? lowerCamelCase_ =prev_sample_mean + diffusion * noise # add impact of diffusion field g if not return_dict: return (prev_sample, prev_sample_mean) return SdeVeOutput(prev_sample=lowerCAmelCase, prev_sample_mean=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = True, ): """simple docstring""" if self.timesteps is None: raise ValueError( '''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' ) # For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z" # sample noise for correction lowerCamelCase_ =randn_tensor(sample.shape, layout=sample.layout, generator=lowerCAmelCase ).to(sample.device ) # compute step size from the model_output, the noise, and the snr lowerCamelCase_ =torch.norm(model_output.reshape(model_output.shape[0], -1 ), dim=-1 ).mean() lowerCamelCase_ =torch.norm(noise.reshape(noise.shape[0], -1 ), dim=-1 ).mean() lowerCamelCase_ =(self.config.snr * noise_norm / grad_norm) ** 2 * 2 lowerCamelCase_ =step_size * torch.ones(sample.shape[0] ).to(sample.device ) # self.repeat_scalar(step_size, sample.shape[0]) # compute corrected sample: model_output term and noise term lowerCamelCase_ =step_size.flatten() while len(step_size.shape ) < len(sample.shape ): lowerCamelCase_ =step_size.unsqueeze(-1 ) lowerCamelCase_ =sample + step_size * model_output lowerCamelCase_ =prev_sample_mean + ((step_size * 2) ** 0.5) * noise if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =timesteps.to(original_samples.device ) lowerCamelCase_ =self.discrete_sigmas.to(original_samples.device )[timesteps] lowerCamelCase_ =( noise * sigmas[:, None, None, None] if noise is not None else torch.randn_like(lowerCAmelCase ) * sigmas[:, None, None, None] ) lowerCamelCase_ =noise + original_samples return noisy_samples def __len__( self ): """simple docstring""" return self.config.num_train_timesteps
676
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : Optional[Any] = logging.get_logger(__name__) a_ : List[str] = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""} class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Union[str, Any] ='openai-gpt' lowercase : List[Any] ={ 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self, lowerCAmelCase=40_478, lowerCAmelCase=512, lowerCAmelCase=768, lowerCAmelCase=12, lowerCAmelCase=12, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=1e-5, lowerCAmelCase=0.0_2, lowerCAmelCase="cls_index", lowerCAmelCase=True, lowerCAmelCase=None, lowerCAmelCase=True, lowerCAmelCase=0.1, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =vocab_size lowerCamelCase_ =n_positions lowerCamelCase_ =n_embd lowerCamelCase_ =n_layer lowerCamelCase_ =n_head lowerCamelCase_ =afn lowerCamelCase_ =resid_pdrop lowerCamelCase_ =embd_pdrop lowerCamelCase_ =attn_pdrop lowerCamelCase_ =layer_norm_epsilon lowerCamelCase_ =initializer_range lowerCamelCase_ =summary_type lowerCamelCase_ =summary_use_proj lowerCamelCase_ =summary_activation lowerCamelCase_ =summary_first_dropout lowerCamelCase_ =summary_proj_to_labels super().__init__(**lowerCAmelCase )
676
'''simple docstring''' def a_ ( __snake_case : int , __snake_case : int ) -> str: """simple docstring""" if not isinstance(__snake_case , __snake_case ): raise ValueError('''iterations must be defined as integers''' ) if not isinstance(__snake_case , __snake_case ) or not number >= 1: raise ValueError( '''starting number must be and integer and be more than 0''' ) if not iterations >= 1: raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' ) lowerCamelCase_ ='''''' while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(__snake_case ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
676
1
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ : Any = {"""configuration_timm_backbone""": ["""TimmBackboneConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Optional[Any] = ["""TimmBackbone"""] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys a_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
676
'''simple docstring''' from typing import List import numpy as np def a_ ( __snake_case : dict ) -> int: """simple docstring""" lowerCamelCase_ ={key: len(__snake_case ) for key, value in gen_kwargs.items() if isinstance(__snake_case , __snake_case )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( '''Sharding is ambiguous for this dataset: ''' + '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n''' + '''\n'''.join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() ) + '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, ''' + '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.''' ) ) lowerCamelCase_ =max(lists_lengths.values() , default=0 ) return max(1 , __snake_case ) def a_ ( __snake_case : int , __snake_case : int ) -> List[range]: """simple docstring""" lowerCamelCase_ =[] for group_idx in range(__snake_case ): lowerCamelCase_ =num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break lowerCamelCase_ =shards_indices_per_group[-1].stop if shards_indices_per_group else 0 lowerCamelCase_ =range(__snake_case , start + num_shards_to_add ) shards_indices_per_group.append(__snake_case ) return shards_indices_per_group def a_ ( __snake_case : dict , __snake_case : int ) -> List[dict]: """simple docstring""" lowerCamelCase_ =_number_of_shards_in_gen_kwargs(__snake_case ) if num_shards == 1: return [dict(__snake_case )] else: lowerCamelCase_ =_distribute_shards(num_shards=__snake_case , max_num_jobs=__snake_case ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(__snake_case , __snake_case ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(__snake_case ) ) ] def a_ ( __snake_case : List[dict] ) -> dict: """simple docstring""" return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , __snake_case ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def a_ ( __snake_case : np.random.Generator , __snake_case : dict ) -> dict: """simple docstring""" lowerCamelCase_ ={len(__snake_case ) for value in gen_kwargs.values() if isinstance(__snake_case , __snake_case )} lowerCamelCase_ ={} for size in list_sizes: lowerCamelCase_ =list(range(__snake_case ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes lowerCamelCase_ =dict(__snake_case ) for key, value in shuffled_kwargs.items(): if isinstance(__snake_case , __snake_case ): lowerCamelCase_ =[value[i] for i in indices_per_size[len(__snake_case )]] return shuffled_kwargs
676
1
'''simple docstring''' import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class __UpperCamelCase ( unittest.TestCase ): def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =inspect.getfile(accelerate.test_utils ) lowerCamelCase_ =os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] ) lowerCamelCase_ =os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] ) lowerCamelCase_ =os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] ) @require_multi_gpu def lowercase__ ( self ): """simple docstring""" print(f'''Found {torch.cuda.device_count()} devices.''' ) lowerCamelCase_ =['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(lowerCAmelCase, env=os.environ.copy() ) @require_multi_gpu def lowercase__ ( self ): """simple docstring""" print(f'''Found {torch.cuda.device_count()} devices.''' ) lowerCamelCase_ =['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path] print(f'''Command: {cmd}''' ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(lowerCAmelCase, env=os.environ.copy() ) @require_multi_gpu def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(lowerCAmelCase, env=os.environ.copy() ) @require_multi_gpu def lowercase__ ( self ): """simple docstring""" print(f'''Found {torch.cuda.device_count()} devices, using 2 devices only''' ) lowerCamelCase_ =['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path] with patch_environment(omp_num_threads=1, cuda_visible_devices='''0,1''' ): execute_subprocess_async(lowerCAmelCase, env=os.environ.copy() ) if __name__ == "__main__": a_ : str = Accelerator() a_ : List[str] = (accelerator.state.process_index + 2, 10) a_ : Dict = torch.randint(0, 10, shape).to(accelerator.device) a_ : Union[str, Any] = """""" a_ : Optional[Any] = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." a_ : Dict = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." a_ : int = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
676
'''simple docstring''' import argparse import logging import pickle import random import time import numpy as np from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO ) a_ : int = logging.getLogger(__name__) def a_ ( ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ =argparse.ArgumentParser( description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' ) parser.add_argument('''--file_path''' , type=__snake_case , default='''data/dump.txt''' , help='''The path to the data.''' ) parser.add_argument('''--tokenizer_type''' , type=__snake_case , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] ) parser.add_argument('''--tokenizer_name''' , type=__snake_case , default='''bert-base-uncased''' , help='''The tokenizer to use.''' ) parser.add_argument('''--dump_file''' , type=__snake_case , default='''data/dump''' , help='''The dump file prefix.''' ) lowerCamelCase_ =parser.parse_args() logger.info(F'''Loading Tokenizer ({args.tokenizer_name})''' ) if args.tokenizer_type == "bert": lowerCamelCase_ =BertTokenizer.from_pretrained(args.tokenizer_name ) lowerCamelCase_ =tokenizer.special_tokens_map['''cls_token'''] # `[CLS]` lowerCamelCase_ =tokenizer.special_tokens_map['''sep_token'''] # `[SEP]` elif args.tokenizer_type == "roberta": lowerCamelCase_ =RobertaTokenizer.from_pretrained(args.tokenizer_name ) lowerCamelCase_ =tokenizer.special_tokens_map['''cls_token'''] # `<s>` lowerCamelCase_ =tokenizer.special_tokens_map['''sep_token'''] # `</s>` elif args.tokenizer_type == "gpt2": lowerCamelCase_ =GPTaTokenizer.from_pretrained(args.tokenizer_name ) lowerCamelCase_ =tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>` lowerCamelCase_ =tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>` logger.info(F'''Loading text from {args.file_path}''' ) with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp: lowerCamelCase_ =fp.readlines() logger.info('''Start encoding''' ) logger.info(F'''{len(__snake_case )} examples to process.''' ) lowerCamelCase_ =[] lowerCamelCase_ =0 lowerCamelCase_ =1_0000 lowerCamelCase_ =time.time() for text in data: lowerCamelCase_ =F'''{bos} {text.strip()} {sep}''' lowerCamelCase_ =tokenizer.encode(__snake_case , add_special_tokens=__snake_case ) rslt.append(__snake_case ) iter += 1 if iter % interval == 0: lowerCamelCase_ =time.time() logger.info(F'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' ) lowerCamelCase_ =time.time() logger.info('''Finished binarization''' ) logger.info(F'''{len(__snake_case )} examples processed.''' ) lowerCamelCase_ =F'''{args.dump_file}.{args.tokenizer_name}.pickle''' lowerCamelCase_ =tokenizer.vocab_size if vocab_size < (1 << 16): lowerCamelCase_ =[np.uintaa(__snake_case ) for d in rslt] else: lowerCamelCase_ =[np.intaa(__snake_case ) for d in rslt] random.shuffle(rslt_ ) logger.info(F'''Dump to {dp_file}''' ) with open(__snake_case , '''wb''' ) as handle: pickle.dump(rslt_ , __snake_case , protocol=pickle.HIGHEST_PROTOCOL ) if __name__ == "__main__": main()
676
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_videomae import VideoMAEImageProcessor a_ : Optional[Any] = logging.get_logger(__name__) class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" warnings.warn( '''The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use VideoMAEImageProcessor instead.''', lowerCAmelCase, ) super().__init__(*lowerCAmelCase, **lowerCAmelCase )
676
'''simple docstring''' import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : str = logging.get_logger(__name__) a_ : int = { """RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""", } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : List[str] ='mvp' lowercase : List[str] =['past_key_values'] lowercase : Dict ={'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self, lowerCAmelCase=50_267, lowerCAmelCase=1_024, lowerCAmelCase=12, lowerCAmelCase=4_096, lowerCAmelCase=16, lowerCAmelCase=12, lowerCAmelCase=4_096, lowerCAmelCase=16, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase="gelu", lowerCAmelCase=1_024, lowerCAmelCase=0.1, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=0.0, lowerCAmelCase=False, lowerCAmelCase=True, lowerCAmelCase=1, lowerCAmelCase=0, lowerCAmelCase=2, lowerCAmelCase=True, lowerCAmelCase=2, lowerCAmelCase=2, lowerCAmelCase=False, lowerCAmelCase=100, lowerCAmelCase=800, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =vocab_size lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =d_model lowerCamelCase_ =encoder_ffn_dim lowerCamelCase_ =encoder_layers lowerCamelCase_ =encoder_attention_heads lowerCamelCase_ =decoder_ffn_dim lowerCamelCase_ =decoder_layers lowerCamelCase_ =decoder_attention_heads lowerCamelCase_ =dropout lowerCamelCase_ =attention_dropout lowerCamelCase_ =activation_dropout lowerCamelCase_ =activation_function lowerCamelCase_ =init_std lowerCamelCase_ =encoder_layerdrop lowerCamelCase_ =decoder_layerdrop lowerCamelCase_ =classifier_dropout lowerCamelCase_ =use_cache lowerCamelCase_ =encoder_layers lowerCamelCase_ =scale_embedding # scale factor will be sqrt(d_model) if True lowerCamelCase_ =use_prompt lowerCamelCase_ =prompt_length lowerCamelCase_ =prompt_mid_dim super().__init__( pad_token_id=lowerCAmelCase, bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, is_encoder_decoder=lowerCAmelCase, decoder_start_token_id=lowerCAmelCase, forced_eos_token_id=lowerCAmelCase, **lowerCAmelCase, ) if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''', lowerCAmelCase ): lowerCamelCase_ =self.bos_token_id warnings.warn( f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. ''' '''The config can simply be saved and uploaded again to be fixed.''' )
676
1
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Tuple =( 'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.' 'It takes two arguments named `image` which should be the original image, and `label` which should be a text ' 'describing the elements what should be identified in the segmentation mask. The tool returns the mask.' ) lowercase : Optional[int] ='CIDAS/clipseg-rd64-refined' lowercase : Dict ='image_segmenter' lowercase : Optional[Any] =CLIPSegForImageSegmentation lowercase : Dict =['image', 'text'] lowercase : Optional[Any] =['image'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''vision'''] ) super().__init__(*lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" return self.pre_processor(text=[label], images=[image], padding=lowerCAmelCase, return_tensors='''pt''' ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" with torch.no_grad(): lowerCamelCase_ =self.model(**lowerCAmelCase ).logits return logits def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =outputs.cpu().detach().numpy() lowerCamelCase_ =0 lowerCamelCase_ =1 return Image.fromarray((array * 255).astype(np.uinta ) )
676
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a_ : int = logging.get_logger(__name__) a_ : str = {"""vocab_file""": """spiece.model"""} a_ : Optional[int] = { """vocab_file""": { """bert_for_seq_generation""": ( """https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model""" ), } } a_ : List[Any] = {"""bert_for_seq_generation""": 5_12} class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[int] =VOCAB_FILES_NAMES lowercase : Optional[int] =PRETRAINED_VOCAB_FILES_MAP lowercase : Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : List[int] =[] lowercase : str =['input_ids', 'attention_mask'] def __init__( self, lowerCAmelCase, lowerCAmelCase="<s>", lowerCAmelCase="</s>", lowerCAmelCase="<unk>", lowerCAmelCase="<pad>", lowerCAmelCase="<::::>", lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ ={} if sp_model_kwargs is None else sp_model_kwargs # Add extra_ids to the special token list super().__init__( bos_token=lowerCAmelCase, eos_token=lowerCAmelCase, unk_token=lowerCAmelCase, pad_token=lowerCAmelCase, sep_token=lowerCAmelCase, sp_model_kwargs=self.sp_model_kwargs, **lowerCAmelCase, ) lowerCamelCase_ =vocab_file lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowerCAmelCase ) @property def lowercase__ ( self ): """simple docstring""" return self.sp_model.get_piece_size() def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ={self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): """simple docstring""" lowerCamelCase_ =self.__dict__.copy() lowerCamelCase_ =None return state def __setstate__( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =d # for backward compatibility if not hasattr(self, '''sp_model_kwargs''' ): lowerCamelCase_ ={} lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return self.sp_model.encode(lowerCAmelCase, out_type=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return self.sp_model.piece_to_id(lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.sp_model.IdToPiece(lowerCAmelCase ) return token def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =[] lowerCamelCase_ ='''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(lowerCAmelCase ) + token lowerCamelCase_ =[] else: current_sub_tokens.append(lowerCAmelCase ) out_string += self.sp_model.decode(lowerCAmelCase ) return out_string.strip() def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ): """simple docstring""" if not os.path.isdir(lowerCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCamelCase_ =os.path.join( lowerCAmelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file, lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(lowerCAmelCase, '''wb''' ) as fi: lowerCamelCase_ =self.sp_model.serialized_model_proto() fi.write(lowerCAmelCase ) return (out_vocab_file,)
676
1
'''simple docstring''' from string import ascii_uppercase a_ : List[Any] = {char: i for i, char in enumerate(ascii_uppercase)} a_ : Dict = dict(enumerate(ascii_uppercase)) def a_ ( __snake_case : str , __snake_case : str ) -> str: """simple docstring""" lowerCamelCase_ =len(__snake_case ) lowerCamelCase_ =0 while True: if x == i: lowerCamelCase_ =0 if len(__snake_case ) == len(__snake_case ): break key += key[i] i += 1 return key def a_ ( __snake_case : str , __snake_case : str ) -> str: """simple docstring""" lowerCamelCase_ ='''''' lowerCamelCase_ =0 for letter in message: if letter == " ": cipher_text += " " else: lowerCamelCase_ =(dicta[letter] - dicta[key_new[i]]) % 26 i += 1 cipher_text += dicta[x] return cipher_text def a_ ( __snake_case : str , __snake_case : str ) -> str: """simple docstring""" lowerCamelCase_ ='''''' lowerCamelCase_ =0 for letter in cipher_text: if letter == " ": or_txt += " " else: lowerCamelCase_ =(dicta[letter] + dicta[key_new[i]] + 26) % 26 i += 1 or_txt += dicta[x] return or_txt def a_ ( ) -> None: """simple docstring""" lowerCamelCase_ ='''THE GERMAN ATTACK''' lowerCamelCase_ ='''SECRET''' lowerCamelCase_ =generate_key(__snake_case , __snake_case ) lowerCamelCase_ =cipher_text(__snake_case , __snake_case ) print(F'''Encrypted Text = {s}''' ) print(F'''Original Text = {original_text(__snake_case , __snake_case )}''' ) if __name__ == "__main__": import doctest doctest.testmod() main()
676
'''simple docstring''' from collections.abc import Sequence def a_ ( __snake_case : Sequence[float] , __snake_case : float ) -> float: """simple docstring""" return sum(c * (x**i) for i, c in enumerate(__snake_case ) ) def a_ ( __snake_case : Sequence[float] , __snake_case : float ) -> float: """simple docstring""" lowerCamelCase_ =0.0 for coeff in reversed(__snake_case ): lowerCamelCase_ =result * x + coeff return result if __name__ == "__main__": a_ : Optional[int] = (0.0, 0.0, 5.0, 9.3, 7.0) a_ : Tuple = 10.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
676
1
'''simple docstring''' import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import numpy as np import pytest from datasets.arrow_dataset import Dataset from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex from .utils import require_elasticsearch, require_faiss a_ : int = pytest.mark.integration @require_faiss class __UpperCamelCase ( lowerCamelCase__ ): def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =Dataset.from_dict({'''filename''': ['''my_name-train''' + '''_''' + str(lowerCAmelCase ) for x in np.arange(30 ).tolist()]} ) return dset def lowercase__ ( self ): """simple docstring""" import faiss lowerCamelCase_ =self._create_dummy_dataset() lowerCamelCase_ =dset.map( lambda lowerCAmelCase, lowerCAmelCase : {"vecs": i * np.ones(5, dtype=np.floataa )}, with_indices=lowerCAmelCase, keep_in_memory=lowerCAmelCase ) lowerCamelCase_ =dset.add_faiss_index('''vecs''', batch_size=100, metric_type=faiss.METRIC_INNER_PRODUCT ) lowerCamelCase_, lowerCamelCase_ =dset.get_nearest_examples('''vecs''', np.ones(5, dtype=np.floataa ) ) self.assertEqual(examples['''filename'''][0], '''my_name-train_29''' ) dset.drop_index('''vecs''' ) def lowercase__ ( self ): """simple docstring""" import faiss lowerCamelCase_ =self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1, 1 ), index_name='''vecs''', batch_size=100, metric_type=faiss.METRIC_INNER_PRODUCT, ) lowerCamelCase_, lowerCamelCase_ =dset.get_nearest_examples('''vecs''', np.ones(5, dtype=np.floataa ) ) self.assertEqual(examples['''filename'''][0], '''my_name-train_29''' ) def lowercase__ ( self ): """simple docstring""" import faiss lowerCamelCase_ =self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1, 1 ), index_name='''vecs''', metric_type=faiss.METRIC_INNER_PRODUCT, ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=lowerCAmelCase ) as tmp_file: dset.save_faiss_index('''vecs''', tmp_file.name ) dset.load_faiss_index('''vecs2''', tmp_file.name ) os.unlink(tmp_file.name ) lowerCamelCase_, lowerCamelCase_ =dset.get_nearest_examples('''vecs2''', np.ones(5, dtype=np.floataa ) ) self.assertEqual(examples['''filename'''][0], '''my_name-train_29''' ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1, 1 ), index_name='''vecs''' ) dset.drop_index('''vecs''' ) self.assertRaises(lowerCAmelCase, partial(dset.get_nearest_examples, '''vecs2''', np.ones(5, dtype=np.floataa ) ) ) def lowercase__ ( self ): """simple docstring""" from elasticsearch import Elasticsearch lowerCamelCase_ =self._create_dummy_dataset() with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch( '''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk: lowerCamelCase_ ={'''acknowledged''': True} mocked_bulk.return_value([(True, None)] * 30 ) lowerCamelCase_ ={'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 29}]}} lowerCamelCase_ =Elasticsearch() dset.add_elasticsearch_index('''filename''', es_client=lowerCAmelCase ) lowerCamelCase_, lowerCamelCase_ =dset.get_nearest_examples('''filename''', '''my_name-train_29''' ) self.assertEqual(examples['''filename'''][0], '''my_name-train_29''' ) @require_faiss class __UpperCamelCase ( lowerCamelCase__ ): def lowercase__ ( self ): """simple docstring""" import faiss lowerCamelCase_ =FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) # add vectors index.add_vectors(np.eye(5, dtype=np.floataa ) ) self.assertIsNotNone(index.faiss_index ) self.assertEqual(index.faiss_index.ntotal, 5 ) index.add_vectors(np.zeros((5, 5), dtype=np.floataa ) ) self.assertEqual(index.faiss_index.ntotal, 10 ) # single query lowerCamelCase_ =np.zeros(5, dtype=np.floataa ) lowerCamelCase_ =1 lowerCamelCase_, lowerCamelCase_ =index.search(lowerCAmelCase ) self.assertRaises(lowerCAmelCase, index.search, query.reshape(-1, 1 ) ) self.assertGreater(scores[0], 0 ) self.assertEqual(indices[0], 1 ) # batched queries lowerCamelCase_ =np.eye(5, dtype=np.floataa )[::-1] lowerCamelCase_, lowerCamelCase_ =index.search_batch(lowerCAmelCase ) self.assertRaises(lowerCAmelCase, index.search_batch, queries[0] ) lowerCamelCase_ =[scores[0] for scores in total_scores] lowerCamelCase_ =[indices[0] for indices in total_indices] self.assertGreater(np.min(lowerCAmelCase ), 0 ) self.assertListEqual([4, 3, 2, 1, 0], lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" import faiss lowerCamelCase_ =FaissIndex(string_factory='''Flat''' ) index.add_vectors(np.eye(5, dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index, faiss.IndexFlat ) lowerCamelCase_ =FaissIndex(string_factory='''LSH''' ) index.add_vectors(np.eye(5, dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index, faiss.IndexLSH ) with self.assertRaises(lowerCAmelCase ): lowerCamelCase_ =FaissIndex(string_factory='''Flat''', custom_index=faiss.IndexFlat(5 ) ) def lowercase__ ( self ): """simple docstring""" import faiss lowerCamelCase_ =faiss.IndexFlat(5 ) lowerCamelCase_ =FaissIndex(custom_index=lowerCAmelCase ) index.add_vectors(np.eye(5, dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index, faiss.IndexFlat ) def lowercase__ ( self ): """simple docstring""" import faiss lowerCamelCase_ =FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5, dtype=np.floataa ) ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=lowerCAmelCase ) as tmp_file: index.save(tmp_file.name ) lowerCamelCase_ =FaissIndex.load(tmp_file.name ) os.unlink(tmp_file.name ) lowerCamelCase_ =np.zeros(5, dtype=np.floataa ) lowerCamelCase_ =1 lowerCamelCase_, lowerCamelCase_ =index.search(lowerCAmelCase ) self.assertGreater(scores[0], 0 ) self.assertEqual(indices[0], 1 ) @require_faiss def a_ ( __snake_case : Optional[int] ) -> Union[str, Any]: """simple docstring""" import faiss lowerCamelCase_ =FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) lowerCamelCase_ ='''index.faiss''' lowerCamelCase_ =F'''mock://{index_name}''' index.save(__snake_case , storage_options=mockfs.storage_options ) lowerCamelCase_ =FaissIndex.load(__snake_case , storage_options=mockfs.storage_options ) lowerCamelCase_ =np.zeros(5 , dtype=np.floataa ) lowerCamelCase_ =1 lowerCamelCase_, lowerCamelCase_ =index.search(__snake_case ) assert scores[0] > 0 assert indices[0] == 1 @require_elasticsearch class __UpperCamelCase ( lowerCamelCase__ ): def lowercase__ ( self ): """simple docstring""" from elasticsearch import Elasticsearch with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch( '''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk: lowerCamelCase_ =Elasticsearch() lowerCamelCase_ ={'''acknowledged''': True} lowerCamelCase_ =ElasticSearchIndex(es_client=lowerCAmelCase ) mocked_bulk.return_value([(True, None)] * 3 ) index.add_documents(['''foo''', '''bar''', '''foobar'''] ) # single query lowerCamelCase_ ='''foo''' lowerCamelCase_ ={'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}} lowerCamelCase_, lowerCamelCase_ =index.search(lowerCAmelCase ) self.assertEqual(scores[0], 1 ) self.assertEqual(indices[0], 0 ) # single query with timeout lowerCamelCase_ ='''foo''' lowerCamelCase_ ={'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}} lowerCamelCase_, lowerCamelCase_ =index.search(lowerCAmelCase, request_timeout=30 ) self.assertEqual(scores[0], 1 ) self.assertEqual(indices[0], 0 ) # batched queries lowerCamelCase_ =['''foo''', '''bar''', '''foobar'''] lowerCamelCase_ ={'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}} lowerCamelCase_, lowerCamelCase_ =index.search_batch(lowerCAmelCase ) lowerCamelCase_ =[scores[0] for scores in total_scores] lowerCamelCase_ =[indices[0] for indices in total_indices] self.assertGreater(np.min(lowerCAmelCase ), 0 ) self.assertListEqual([1, 1, 1], lowerCAmelCase ) # batched queries with timeout lowerCamelCase_ =['''foo''', '''bar''', '''foobar'''] lowerCamelCase_ ={'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}} lowerCamelCase_, lowerCamelCase_ =index.search_batch(lowerCAmelCase, request_timeout=30 ) lowerCamelCase_ =[scores[0] for scores in total_scores] lowerCamelCase_ =[indices[0] for indices in total_indices] self.assertGreater(np.min(lowerCAmelCase ), 0 ) self.assertListEqual([1, 1, 1], lowerCAmelCase )
676
'''simple docstring''' import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[int] =['image_processor', 'tokenizer'] lowercase : str ='CLIPImageProcessor' lowercase : Optional[Any] =('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast') def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''', lowerCAmelCase, ) lowerCamelCase_ =kwargs.pop('''feature_extractor''' ) lowerCamelCase_ =image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(lowerCAmelCase, lowerCAmelCase ) def __call__( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ): """simple docstring""" if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: lowerCamelCase_ =self.tokenizer(lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase ) if images is not None: lowerCamelCase_ =self.image_processor(lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase ) if text is not None and images is not None: lowerCamelCase_ =image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**lowerCAmelCase ), tensor_type=lowerCAmelCase ) def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase ) @property def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.tokenizer.model_input_names lowerCamelCase_ =self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
676
1
'''simple docstring''' from __future__ import annotations a_ : str = { """A""": ["""B""", """C""", """E"""], """B""": ["""A""", """D""", """E"""], """C""": ["""A""", """F""", """G"""], """D""": ["""B"""], """E""": ["""A""", """B""", """D"""], """F""": ["""C"""], """G""": ["""C"""], } class __UpperCamelCase : def __init__( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =graph # mapping node to its parent in resulting breadth first tree lowerCamelCase_ ={} lowerCamelCase_ =source_vertex def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ={self.source_vertex} lowerCamelCase_ =None lowerCamelCase_ =[self.source_vertex] # first in first out queue while queue: lowerCamelCase_ =queue.pop(0 ) for adjacent_vertex in self.graph[vertex]: if adjacent_vertex not in visited: visited.add(lowerCAmelCase ) lowerCamelCase_ =vertex queue.append(lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" if target_vertex == self.source_vertex: return self.source_vertex lowerCamelCase_ =self.parent.get(lowerCAmelCase ) if target_vertex_parent is None: lowerCamelCase_ =( f'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}''' ) raise ValueError(lowerCAmelCase ) return self.shortest_path(lowerCAmelCase ) + f'''->{target_vertex}''' if __name__ == "__main__": a_ : Optional[Any] = Graph(graph, """G""") g.breath_first_search() print(g.shortest_path("""D""")) print(g.shortest_path("""G""")) print(g.shortest_path("""Foo"""))
676
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING a_ : Optional[Any] = logging.get_logger(__name__) @add_end_docstrings(lowerCamelCase__ ) class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" super().__init__(*lowerCAmelCase, **lowerCAmelCase ) requires_backends(self, '''vision''' ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING ) def lowercase__ ( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None ): """simple docstring""" lowerCamelCase_ ={} lowerCamelCase_ ={} if prompt is not None: lowerCamelCase_ =prompt if generate_kwargs is not None: lowerCamelCase_ =generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: lowerCamelCase_ ={} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( '''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,''' ''' please use only one''' ) lowerCamelCase_ =max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return super().__call__(lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ): """simple docstring""" lowerCamelCase_ =load_image(lowerCAmelCase ) if prompt is not None: if not isinstance(lowerCAmelCase, lowerCAmelCase ): raise ValueError( f'''Received an invalid text input, got - {type(lowerCAmelCase )} - but expected a single string. ''' '''Note also that one single text can be provided for conditional image to text generation.''' ) lowerCamelCase_ =self.model.config.model_type if model_type == "git": lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework ) lowerCamelCase_ =self.tokenizer(text=lowerCAmelCase, add_special_tokens=lowerCAmelCase ).input_ids lowerCamelCase_ =[self.tokenizer.cls_token_id] + input_ids lowerCamelCase_ =torch.tensor(lowerCAmelCase ).unsqueeze(0 ) model_inputs.update({'''input_ids''': input_ids} ) elif model_type == "pix2struct": lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, header_text=lowerCAmelCase, return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework ) lowerCamelCase_ =self.tokenizer(lowerCAmelCase, return_tensors=self.framework ) model_inputs.update(lowerCAmelCase ) else: raise ValueError(f'''Model type {model_type} does not support conditional text generation''' ) else: lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: lowerCamelCase_ =None return model_inputs def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ): """simple docstring""" if ( "input_ids" in model_inputs and isinstance(model_inputs['''input_ids'''], lowerCAmelCase ) and all(x is None for x in model_inputs['''input_ids'''] ) ): lowerCamelCase_ =None if generate_kwargs is None: lowerCamelCase_ ={} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. lowerCamelCase_ =model_inputs.pop(self.model.main_input_name ) lowerCamelCase_ =self.model.generate(lowerCAmelCase, **lowerCAmelCase, **lowerCAmelCase ) return model_outputs def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =[] for output_ids in model_outputs: lowerCamelCase_ ={ '''generated_text''': self.tokenizer.decode( lowerCAmelCase, skip_special_tokens=lowerCAmelCase, ) } records.append(lowerCAmelCase ) return records
676
1
'''simple docstring''' from math import pow, sqrt def a_ ( *__snake_case : float ) -> bool: """simple docstring""" lowerCamelCase_ =len(__snake_case ) > 0 and all(value > 0.0 for value in values ) return result def a_ ( __snake_case : float , __snake_case : float ) -> float | ValueError: """simple docstring""" return ( round(sqrt(molar_mass_a / molar_mass_a ) , 6 ) if validate(__snake_case , __snake_case ) else ValueError('''Input Error: Molar mass values must greater than 0.''' ) ) def a_ ( __snake_case : float , __snake_case : float , __snake_case : float ) -> float | ValueError: """simple docstring""" return ( round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 ) if validate(__snake_case , __snake_case , __snake_case ) else ValueError( '''Input Error: Molar mass and effusion rate values must greater than 0.''' ) ) def a_ ( __snake_case : float , __snake_case : float , __snake_case : float ) -> float | ValueError: """simple docstring""" return ( round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 ) if validate(__snake_case , __snake_case , __snake_case ) else ValueError( '''Input Error: Molar mass and effusion rate values must greater than 0.''' ) ) def a_ ( __snake_case : float , __snake_case : float , __snake_case : float ) -> float | ValueError: """simple docstring""" return ( round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 ) if validate(__snake_case , __snake_case , __snake_case ) else ValueError( '''Input Error: Molar mass and effusion rate values must greater than 0.''' ) ) def a_ ( __snake_case : float , __snake_case : float , __snake_case : float ) -> float | ValueError: """simple docstring""" return ( round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 ) if validate(__snake_case , __snake_case , __snake_case ) else ValueError( '''Input Error: Molar mass and effusion rate values must greater than 0.''' ) )
676
'''simple docstring''' import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def a_ ( __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Any ) -> str: """simple docstring""" # Initialise PyTorch model lowerCamelCase_ =BertConfig.from_json_file(__snake_case ) print(F'''Building PyTorch model from configuration: {config}''' ) lowerCamelCase_ =BertForPreTraining(__snake_case ) # Load weights from tf checkpoint load_tf_weights_in_bert(__snake_case , __snake_case , __snake_case ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , __snake_case ) if __name__ == "__main__": a_ : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--bert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained BERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) a_ : Optional[Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
676
1
'''simple docstring''' def a_ ( __snake_case : int ) -> bool: """simple docstring""" if p < 2: raise ValueError('''p should not be less than 2!''' ) elif p == 2: return True lowerCamelCase_ =4 lowerCamelCase_ =(1 << p) - 1 for _ in range(p - 2 ): lowerCamelCase_ =((s * s) - 2) % m return s == 0 if __name__ == "__main__": print(lucas_lehmer_test(7)) print(lucas_lehmer_test(11))
676
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : Union[str, Any] = logging.get_logger(__name__) a_ : Optional[int] = { """BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""", # See all AltCLIP models at https://huggingface.co/models?filter=altclip } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[Any] ='altclip_text_model' def __init__( self, lowerCAmelCase=250_002, lowerCAmelCase=1_024, lowerCAmelCase=24, lowerCAmelCase=16, lowerCAmelCase=4_096, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=514, lowerCAmelCase=1, lowerCAmelCase=0.0_2, lowerCAmelCase=0.0_2, lowerCAmelCase=1e-05, lowerCAmelCase=1, lowerCAmelCase=0, lowerCAmelCase=2, lowerCAmelCase="absolute", lowerCAmelCase=True, lowerCAmelCase=768, **lowerCAmelCase, ): """simple docstring""" super().__init__(pad_token_id=lowerCAmelCase, bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, **lowerCAmelCase ) lowerCamelCase_ =vocab_size lowerCamelCase_ =hidden_size lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =hidden_act lowerCamelCase_ =intermediate_size lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =attention_probs_dropout_prob lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =type_vocab_size lowerCamelCase_ =initializer_range lowerCamelCase_ =initializer_factor lowerCamelCase_ =layer_norm_eps lowerCamelCase_ =position_embedding_type lowerCamelCase_ =use_cache lowerCamelCase_ =project_dim class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Dict ='altclip_vision_model' def __init__( self, lowerCAmelCase=768, lowerCAmelCase=3_072, lowerCAmelCase=512, lowerCAmelCase=12, lowerCAmelCase=12, lowerCAmelCase=3, lowerCAmelCase=224, lowerCAmelCase=32, lowerCAmelCase="quick_gelu", lowerCAmelCase=1e-5, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=1.0, **lowerCAmelCase, ): """simple docstring""" super().__init__(**lowerCAmelCase ) lowerCamelCase_ =hidden_size lowerCamelCase_ =intermediate_size lowerCamelCase_ =projection_dim lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =num_channels lowerCamelCase_ =patch_size lowerCamelCase_ =image_size lowerCamelCase_ =initializer_range lowerCamelCase_ =initializer_factor lowerCamelCase_ =attention_dropout lowerCamelCase_ =layer_norm_eps lowerCamelCase_ =hidden_act @classmethod def lowercase__ ( cls, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" cls._set_token_in_kwargs(lowerCAmelCase ) lowerCamelCase_, lowerCamelCase_ =cls.get_config_dict(lowerCAmelCase, **lowerCAmelCase ) # get the vision config dict if we are loading from AltCLIPConfig if config_dict.get('''model_type''' ) == "altclip": lowerCamelCase_ =config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls, '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(lowerCAmelCase, **lowerCAmelCase ) class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Dict ='altclip' lowercase : str =True def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=768, lowerCAmelCase=2.6_5_9_2, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =kwargs.pop('''text_config_dict''', lowerCAmelCase ) lowerCamelCase_ =kwargs.pop('''vision_config_dict''', lowerCAmelCase ) super().__init__(**lowerCAmelCase ) # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`. if text_config_dict is not None: if text_config is None: lowerCamelCase_ ={} # This is the complete result when using `text_config_dict`. lowerCamelCase_ =AltCLIPTextConfig(**lowerCAmelCase ).to_dict() # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different. for key, value in _text_config_dict.items(): if key in text_config and value != text_config[key] and key not in ["transformers_version"]: # If specified in `text_config_dict` if key in text_config_dict: lowerCamelCase_ =( f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. ''' f'''The value `text_config_dict["{key}"]` will be used instead.''' ) # If inferred from default argument values (just to be super careful) else: lowerCamelCase_ =( f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The ''' f'''value `text_config["{key}"]` will be overriden.''' ) logger.warning(lowerCAmelCase ) # Update all values in `text_config` with the ones in `_text_config_dict`. text_config.update(_text_config_dict ) if vision_config_dict is not None: if vision_config is None: lowerCamelCase_ ={} # This is the complete result when using `vision_config_dict`. lowerCamelCase_ =AltCLIPVisionConfig(**lowerCAmelCase ).to_dict() # convert keys to string instead of integer if "id2label" in _vision_config_dict: lowerCamelCase_ ={ str(lowerCAmelCase ): value for key, value in _vision_config_dict['''id2label'''].items() } # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different. for key, value in _vision_config_dict.items(): if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]: # If specified in `vision_config_dict` if key in vision_config_dict: lowerCamelCase_ =( f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different ''' f'''values. The value `vision_config_dict["{key}"]` will be used instead.''' ) # If inferred from default argument values (just to be super careful) else: lowerCamelCase_ =( f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. ''' f'''The value `vision_config["{key}"]` will be overriden.''' ) logger.warning(lowerCAmelCase ) # Update all values in `vision_config` with the ones in `_vision_config_dict`. vision_config.update(_vision_config_dict ) if text_config is None: lowerCamelCase_ ={} logger.info('''`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.''' ) if vision_config is None: lowerCamelCase_ ={} logger.info('''`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.''' ) lowerCamelCase_ =AltCLIPTextConfig(**lowerCAmelCase ) lowerCamelCase_ =AltCLIPVisionConfig(**lowerCAmelCase ) lowerCamelCase_ =projection_dim lowerCamelCase_ =logit_scale_init_value lowerCamelCase_ =1.0 @classmethod def lowercase__ ( cls, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =copy.deepcopy(self.__dict__ ) lowerCamelCase_ =self.text_config.to_dict() lowerCamelCase_ =self.vision_config.to_dict() lowerCamelCase_ =self.__class__.model_type return output
676
1
'''simple docstring''' def a_ ( __snake_case : int ) -> int: """simple docstring""" return 1 if digit in (0, 1) else (digit * factorial(digit - 1 )) def a_ ( __snake_case : int ) -> bool: """simple docstring""" lowerCamelCase_ =0 lowerCamelCase_ =number while duplicate > 0: lowerCamelCase_, lowerCamelCase_ =divmod(__snake_case , 10 ) fact_sum += factorial(__snake_case ) return fact_sum == number if __name__ == "__main__": print("""Program to check whether a number is a Krisnamurthy Number or not.""") a_ : Union[str, Any] = int(input("""Enter number: """).strip()) print( F"""{number} is {"" if krishnamurthy(number) else "not "}a Krishnamurthy Number.""" )
676
'''simple docstring''' import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, lowerCAmelCase, lowerCAmelCase=13, lowerCAmelCase=7, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=2, lowerCAmelCase=99, lowerCAmelCase=0, lowerCAmelCase=32, lowerCAmelCase=5, lowerCAmelCase=4, lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=12, lowerCAmelCase=2, lowerCAmelCase=0.0_2, lowerCAmelCase=3, lowerCAmelCase=4, lowerCAmelCase="last", lowerCAmelCase=None, lowerCAmelCase=None, ): """simple docstring""" lowerCamelCase_ =parent lowerCamelCase_ =batch_size lowerCamelCase_ =seq_length lowerCamelCase_ =is_training lowerCamelCase_ =use_input_lengths lowerCamelCase_ =use_token_type_ids lowerCamelCase_ =use_labels lowerCamelCase_ =gelu_activation lowerCamelCase_ =sinusoidal_embeddings lowerCamelCase_ =causal lowerCamelCase_ =asm lowerCamelCase_ =n_langs lowerCamelCase_ =vocab_size lowerCamelCase_ =n_special lowerCamelCase_ =hidden_size lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =attention_probs_dropout_prob lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =type_vocab_size lowerCamelCase_ =type_sequence_label_size lowerCamelCase_ =initializer_range lowerCamelCase_ =num_labels lowerCamelCase_ =num_choices lowerCamelCase_ =summary_type lowerCamelCase_ =use_proj lowerCamelCase_ =scope def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase_ =None if self.use_input_lengths: lowerCamelCase_ =( ids_tensor([self.batch_size], vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length lowerCamelCase_ =None if self.use_token_type_ids: lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.n_langs ) lowerCamelCase_ =None lowerCamelCase_ =None lowerCamelCase_ =None if self.use_labels: lowerCamelCase_ =ids_tensor([self.batch_size], self.type_sequence_label_size ) lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.num_labels ) lowerCamelCase_ =ids_tensor([self.batch_size], 2 ).float() lowerCamelCase_ =ids_tensor([self.batch_size], self.num_choices ) lowerCamelCase_ =self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def lowercase__ ( self ): """simple docstring""" return FlaubertConfig( vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =FlaubertModel(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase, lengths=lowerCAmelCase, langs=lowerCAmelCase ) lowerCamelCase_ =model(lowerCAmelCase, langs=lowerCAmelCase ) lowerCamelCase_ =model(lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =FlaubertWithLMHeadModel(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape, () ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =FlaubertForQuestionAnsweringSimple(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase ) lowerCamelCase_ =model(lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =FlaubertForQuestionAnswering(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase ) lowerCamelCase_ =model( lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase, cls_index=lowerCAmelCase, is_impossible=lowerCAmelCase, p_mask=lowerCAmelCase, ) lowerCamelCase_ =model( lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase, cls_index=lowerCAmelCase, is_impossible=lowerCAmelCase, ) ((lowerCamelCase_), ) =result_with_labels.to_tuple() lowerCamelCase_ =model(lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase ) ((lowerCamelCase_), ) =result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape, () ) self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =FlaubertForSequenceClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase ) lowerCamelCase_ =model(lowerCAmelCase, labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape, () ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =self.num_labels lowerCamelCase_ =FlaubertForTokenClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase, attention_mask=lowerCAmelCase, labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =self.num_choices lowerCamelCase_ =FlaubertForMultipleChoice(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() lowerCamelCase_ =token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() lowerCamelCase_ =input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() lowerCamelCase_ =model( lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.prepare_config_and_inputs() ( ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ) =config_and_inputs lowerCamelCase_ ={ '''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase : List[Any] =( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) lowercase : Tuple =( { 'feature-extraction': FlaubertModel, 'fill-mask': FlaubertWithLMHeadModel, 'question-answering': FlaubertForQuestionAnsweringSimple, 'text-classification': FlaubertForSequenceClassification, 'token-classification': FlaubertForTokenClassification, 'zero-shot': FlaubertForSequenceClassification, } if is_torch_available() else {} ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('''Fast''' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=False ): """simple docstring""" lowerCamelCase_ =super()._prepare_for_class(lowerCAmelCase, lowerCAmelCase, return_labels=lowerCAmelCase ) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": lowerCamelCase_ =torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase ) lowerCamelCase_ =torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase ) return inputs_dict def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =FlaubertModelTester(self ) lowerCamelCase_ =ConfigTester(self, config_class=lowerCAmelCase, emb_dim=37 ) def lowercase__ ( self ): """simple docstring""" self.config_tester.run_common_tests() def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*lowerCAmelCase ) @slow def lowercase__ ( self ): """simple docstring""" for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ =FlaubertModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) @slow @require_torch_gpu def lowercase__ ( self ): """simple docstring""" lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return lowerCamelCase_ =True lowerCamelCase_ =model_class(config=lowerCAmelCase ) lowerCamelCase_ =self._prepare_for_class(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =torch.jit.trace( lowerCAmelCase, (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(lowerCAmelCase, os.path.join(lowerCAmelCase, '''traced_model.pt''' ) ) lowerCamelCase_ =torch.jit.load(os.path.join(lowerCAmelCase, '''traced_model.pt''' ), map_location=lowerCAmelCase ) loaded(inputs_dict['''input_ids'''].to(lowerCAmelCase ), inputs_dict['''attention_mask'''].to(lowerCAmelCase ) ) @require_torch class __UpperCamelCase ( unittest.TestCase ): @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' ) lowerCamelCase_ =torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] ) with torch.no_grad(): lowerCamelCase_ =model(lowerCAmelCase )[0] lowerCamelCase_ =torch.Size((1, 11, 768) ) self.assertEqual(output.shape, lowerCAmelCase ) lowerCamelCase_ =torch.tensor( [[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], lowerCAmelCase, atol=1e-4 ) )
676
1
'''simple docstring''' from typing import List import numpy as np def a_ ( __snake_case : dict ) -> int: """simple docstring""" lowerCamelCase_ ={key: len(__snake_case ) for key, value in gen_kwargs.items() if isinstance(__snake_case , __snake_case )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( '''Sharding is ambiguous for this dataset: ''' + '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n''' + '''\n'''.join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() ) + '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, ''' + '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.''' ) ) lowerCamelCase_ =max(lists_lengths.values() , default=0 ) return max(1 , __snake_case ) def a_ ( __snake_case : int , __snake_case : int ) -> List[range]: """simple docstring""" lowerCamelCase_ =[] for group_idx in range(__snake_case ): lowerCamelCase_ =num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break lowerCamelCase_ =shards_indices_per_group[-1].stop if shards_indices_per_group else 0 lowerCamelCase_ =range(__snake_case , start + num_shards_to_add ) shards_indices_per_group.append(__snake_case ) return shards_indices_per_group def a_ ( __snake_case : dict , __snake_case : int ) -> List[dict]: """simple docstring""" lowerCamelCase_ =_number_of_shards_in_gen_kwargs(__snake_case ) if num_shards == 1: return [dict(__snake_case )] else: lowerCamelCase_ =_distribute_shards(num_shards=__snake_case , max_num_jobs=__snake_case ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(__snake_case , __snake_case ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(__snake_case ) ) ] def a_ ( __snake_case : List[dict] ) -> dict: """simple docstring""" return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , __snake_case ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def a_ ( __snake_case : np.random.Generator , __snake_case : dict ) -> dict: """simple docstring""" lowerCamelCase_ ={len(__snake_case ) for value in gen_kwargs.values() if isinstance(__snake_case , __snake_case )} lowerCamelCase_ ={} for size in list_sizes: lowerCamelCase_ =list(range(__snake_case ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes lowerCamelCase_ =dict(__snake_case ) for key, value in shuffled_kwargs.items(): if isinstance(__snake_case , __snake_case ): lowerCamelCase_ =[value[i] for i in indices_per_size[len(__snake_case )]] return shuffled_kwargs
676
'''simple docstring''' import os from pickle import UnpicklingError from typing import Dict, Tuple import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict, unflatten_dict import transformers from .utils import logging a_ : List[Any] = logging.get_logger(__name__) def a_ ( __snake_case : Optional[int] , __snake_case : str , __snake_case : List[Any] , __snake_case : int=False ) -> List[str]: """simple docstring""" try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise if not is_sharded: lowerCamelCase_ =os.path.abspath(__snake_case ) logger.info(F'''Loading PyTorch weights from {pt_path}''' ) lowerCamelCase_ =torch.load(__snake_case , map_location='''cpu''' ) logger.info(F'''PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.''' ) lowerCamelCase_ =convert_pytorch_state_dict_to_flax(__snake_case , __snake_case ) else: # model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files lowerCamelCase_ =convert_pytorch_sharded_state_dict_to_flax(__snake_case , __snake_case ) return flax_state_dict def a_ ( __snake_case : Tuple[str] , __snake_case : np.ndarray , __snake_case : Dict[str, jnp.ndarray] , __snake_case : str , ) -> (Tuple[str], np.ndarray): """simple docstring""" def is_key_or_prefix_key_in_dict(__snake_case : Tuple[str] ) -> bool: return len(set(__snake_case ) & {key, (model_prefix,) + key} ) > 0 # layer norm lowerCamelCase_ =pt_tuple_key[:-1] + ('''scale''',) if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(__snake_case ): return renamed_pt_tuple_key, pt_tensor # batch norm layer mean lowerCamelCase_ =pt_tuple_key[:-1] + ('''mean''',) if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(__snake_case ): return renamed_pt_tuple_key, pt_tensor # batch norm layer var lowerCamelCase_ =pt_tuple_key[:-1] + ('''var''',) if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(__snake_case ): return renamed_pt_tuple_key, pt_tensor # embedding lowerCamelCase_ =pt_tuple_key[:-1] + ('''embedding''',) if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(__snake_case ): return renamed_pt_tuple_key, pt_tensor # conv layer lowerCamelCase_ =pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(__snake_case ): lowerCamelCase_ =pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowerCamelCase_ =pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(__snake_case ): lowerCamelCase_ =pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowerCamelCase_ =pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowerCamelCase_ =pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 lowerCamelCase_ =None if pt_tuple_key[-3::2] == ("parametrizations", "original0"): lowerCamelCase_ =pt_tuple_key[-2] + '''_g''' elif pt_tuple_key[-3::2] == ("parametrizations", "original1"): lowerCamelCase_ =pt_tuple_key[-2] + '''_v''' if name is not None: lowerCamelCase_ =pt_tuple_key[:-3] + (name,) return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def a_ ( __snake_case : Union[str, Any] , __snake_case : str ) -> str: """simple docstring""" # convert pytorch tensor to numpy lowerCamelCase_ ={k: v.numpy() for k, v in pt_state_dict.items()} lowerCamelCase_ =flax_model.base_model_prefix # use params dict if the model contains batch norm layers if "params" in flax_model.params: lowerCamelCase_ =flax_model.params['''params'''] else: lowerCamelCase_ =flax_model.params lowerCamelCase_ =flatten_dict(__snake_case ) # add batch_stats keys,values to dict if "batch_stats" in flax_model.params: lowerCamelCase_ =flatten_dict(flax_model.params['''batch_stats'''] ) random_flax_state_dict.update(__snake_case ) lowerCamelCase_ ={} lowerCamelCase_ =(model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) lowerCamelCase_ =(model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCamelCase_ =tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary lowerCamelCase_ =pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: lowerCamelCase_ =pt_tuple_key[1:] # Correctly rename weight parameters lowerCamelCase_, lowerCamelCase_ =rename_key_and_reshape_tensor( __snake_case , __snake_case , __snake_case , __snake_case ) # add model prefix if necessary lowerCamelCase_ =(model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: lowerCamelCase_ =(model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1] or "var" in flax_key[-1]: lowerCamelCase_ =jnp.asarray(__snake_case ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(__snake_case , __snake_case ) continue # also add unexpected weight so that warning is thrown lowerCamelCase_ =jnp.asarray(__snake_case ) else: # also add unexpected weight so that warning is thrown lowerCamelCase_ =jnp.asarray(__snake_case ) return unflatten_dict(__snake_case ) def a_ ( __snake_case : Union[str, Any] , __snake_case : List[Any] ) -> Optional[Any]: """simple docstring""" import torch # Load the index lowerCamelCase_ ={} for shard_file in shard_filenames: # load using msgpack utils lowerCamelCase_ =torch.load(__snake_case ) lowerCamelCase_ ={k: v.numpy() for k, v in pt_state_dict.items()} lowerCamelCase_ =flax_model.base_model_prefix # use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict if "batch_stats" in flax_model.params: lowerCamelCase_ =flax_model.params['''params'''] lowerCamelCase_ =flatten_dict(__snake_case ) random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) ) else: lowerCamelCase_ =flax_model.params lowerCamelCase_ =flatten_dict(__snake_case ) lowerCamelCase_ =(model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) lowerCamelCase_ =(model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCamelCase_ =tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary lowerCamelCase_ =pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: lowerCamelCase_ =pt_tuple_key[1:] # Correctly rename weight parameters lowerCamelCase_, lowerCamelCase_ =rename_key_and_reshape_tensor( __snake_case , __snake_case , __snake_case , __snake_case ) # add model prefix if necessary lowerCamelCase_ =(model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: lowerCamelCase_ =(model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1]: lowerCamelCase_ =jnp.asarray(__snake_case ) continue if "var" in flax_key[-1]: lowerCamelCase_ =jnp.asarray(__snake_case ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(__snake_case , __snake_case ) continue # also add unexpected weight so that warning is thrown lowerCamelCase_ =jnp.asarray(__snake_case ) else: # also add unexpected weight so that warning is thrown lowerCamelCase_ =jnp.asarray(__snake_case ) return unflatten_dict(__snake_case ) def a_ ( __snake_case : List[str] , __snake_case : Dict ) -> str: """simple docstring""" lowerCamelCase_ =os.path.abspath(__snake_case ) logger.info(F'''Loading Flax weights from {flax_checkpoint_path}''' ) # import correct flax class lowerCamelCase_ =getattr(__snake_case , '''Flax''' + model.__class__.__name__ ) # load flax weight dict with open(__snake_case , '''rb''' ) as state_f: try: lowerCamelCase_ =from_bytes(__snake_case , state_f.read() ) except UnpicklingError: raise EnvironmentError(F'''Unable to convert {flax_checkpoint_path} to Flax deserializable object. ''' ) return load_flax_weights_in_pytorch_model(__snake_case , __snake_case ) def a_ ( __snake_case : Optional[Any] , __snake_case : Union[str, Any] ) -> Optional[int]: """simple docstring""" try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise # check if we have bf16 weights lowerCamelCase_ =flatten_dict(jax.tree_util.tree_map(lambda __snake_case : x.dtype == jnp.bfloataa , __snake_case ) ).values() if any(__snake_case ): # convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( '''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` ''' '''before loading those in PyTorch model.''' ) lowerCamelCase_ =jax.tree_util.tree_map( lambda __snake_case : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __snake_case ) lowerCamelCase_ =flatten_dict(__snake_case ) lowerCamelCase_ =pt_model.state_dict() lowerCamelCase_ =(pt_model.base_model_prefix in flax_state) and ( pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) lowerCamelCase_ =(pt_model.base_model_prefix not in flax_state) and ( pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) # keep track of unexpected & missing keys lowerCamelCase_ =[] lowerCamelCase_ =set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): lowerCamelCase_ =flax_key_tuple[0] == pt_model.base_model_prefix lowerCamelCase_ ='''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict # adapt flax_key to prepare for loading from/to base model only if load_model_with_head_into_base_model and has_base_model_prefix: lowerCamelCase_ =flax_key_tuple[1:] elif load_base_model_into_model_with_head and require_base_model_prefix: lowerCamelCase_ =(pt_model.base_model_prefix,) + flax_key_tuple # rename flax weights to PyTorch format if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(__snake_case ) not in pt_model_dict: # conv layer lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',) lowerCamelCase_ =jnp.transpose(__snake_case , (3, 2, 0, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(__snake_case ) not in pt_model_dict: # linear layer lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',) lowerCamelCase_ =flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',) # adding batch stats from flax batch norm to pt elif "mean" in flax_key_tuple[-1]: lowerCamelCase_ =flax_key_tuple[:-1] + ('''running_mean''',) elif "var" in flax_key_tuple[-1]: lowerCamelCase_ =flax_key_tuple[:-1] + ('''running_var''',) if "batch_stats" in flax_state: lowerCamelCase_ ='''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header else: lowerCamelCase_ ='''.'''.join(__snake_case ) # We also need to look at `pt_model_dict` and see if there are keys requiring further transformation. lowerCamelCase_ ={} # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 for key in pt_model_dict: lowerCamelCase_ =key.split('''.''' ) lowerCamelCase_ =None if key_components[-3::2] == ["parametrizations", "original0"]: lowerCamelCase_ =key_components[-2] + '''_g''' elif key_components[-3::2] == ["parametrizations", "original1"]: lowerCamelCase_ =key_components[-2] + '''_v''' if name is not None: lowerCamelCase_ =key_components[:-3] + [name] lowerCamelCase_ ='''.'''.join(__snake_case ) lowerCamelCase_ =key if flax_key in special_pt_names: lowerCamelCase_ =special_pt_names[flax_key] if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( F'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected ''' F'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) else: # add weight to pytorch dict lowerCamelCase_ =np.asarray(__snake_case ) if not isinstance(__snake_case , np.ndarray ) else flax_tensor lowerCamelCase_ =torch.from_numpy(__snake_case ) # remove from missing keys missing_keys.remove(__snake_case ) else: # weight is not expected by PyTorch model unexpected_keys.append(__snake_case ) pt_model.load_state_dict(__snake_case ) # re-transform missing_keys to list lowerCamelCase_ =list(__snake_case ) if len(__snake_case ) > 0: logger.warning( '''Some weights of the Flax model were not used when initializing the PyTorch model''' F''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing''' F''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture''' ''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This''' F''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect''' ''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a''' ''' FlaxBertForSequenceClassification model).''' ) else: logger.warning(F'''All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n''' ) if len(__snake_case ) > 0: logger.warning( F'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly''' F''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to''' ''' use it for predictions and inference.''' ) else: logger.warning( F'''All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n''' '''If your task is similar to the task the model of the checkpoint was trained on, ''' F'''you can already use {pt_model.__class__.__name__} for predictions without further training.''' ) return pt_model
676
1
'''simple docstring''' import argparse import pathlib import fairseq import torch from fairseq.models.roberta import RobertaModel as FairseqRobertaModel from fairseq.modules import TransformerSentenceEncoderLayer from packaging import version from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.models.roberta.modeling_roberta import RobertaAttention from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse("""1.0.0a"""): raise Exception("""requires fairseq >= 1.0.0a""") logging.set_verbosity_info() a_ : Optional[int] = logging.get_logger(__name__) a_ : Dict = """Hello world! cécé herlolip""" def a_ ( __snake_case : str , __snake_case : str , __snake_case : bool ) -> Tuple: """simple docstring""" lowerCamelCase_ =FairseqRobertaModel.from_pretrained(__snake_case ) roberta.eval() # disable dropout lowerCamelCase_ =roberta.model.encoder.sentence_encoder lowerCamelCase_ =XLMRobertaConfig( vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , ) if classification_head: lowerCamelCase_ =roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0] print('''Our RoBERTa config:''' , __snake_case ) lowerCamelCase_ =XLMRobertaXLForSequenceClassification(__snake_case ) if classification_head else XLMRobertaXLForMaskedLM(__snake_case ) model.eval() # Now let's copy all the weights. # Embeddings lowerCamelCase_ =roberta_sent_encoder.embed_tokens.weight lowerCamelCase_ =roberta_sent_encoder.embed_positions.weight lowerCamelCase_ =torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them. lowerCamelCase_ =roberta_sent_encoder.layer_norm.weight lowerCamelCase_ =roberta_sent_encoder.layer_norm.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer lowerCamelCase_ =model.roberta.encoder.layer[i] lowerCamelCase_ =roberta_sent_encoder.layers[i] lowerCamelCase_ =layer.attention lowerCamelCase_ =roberta_layer.self_attn_layer_norm.weight lowerCamelCase_ =roberta_layer.self_attn_layer_norm.bias # self attention lowerCamelCase_ =layer.attention.self assert ( roberta_layer.self_attn.k_proj.weight.data.shape == roberta_layer.self_attn.q_proj.weight.data.shape == roberta_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ) lowerCamelCase_ =roberta_layer.self_attn.q_proj.weight lowerCamelCase_ =roberta_layer.self_attn.q_proj.bias lowerCamelCase_ =roberta_layer.self_attn.k_proj.weight lowerCamelCase_ =roberta_layer.self_attn.k_proj.bias lowerCamelCase_ =roberta_layer.self_attn.v_proj.weight lowerCamelCase_ =roberta_layer.self_attn.v_proj.bias # self-attention output lowerCamelCase_ =layer.attention.output assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape lowerCamelCase_ =roberta_layer.self_attn.out_proj.weight lowerCamelCase_ =roberta_layer.self_attn.out_proj.bias # this one is final layer norm lowerCamelCase_ =roberta_layer.final_layer_norm.weight lowerCamelCase_ =roberta_layer.final_layer_norm.bias # intermediate lowerCamelCase_ =layer.intermediate assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape lowerCamelCase_ =roberta_layer.fca.weight lowerCamelCase_ =roberta_layer.fca.bias # output lowerCamelCase_ =layer.output assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape lowerCamelCase_ =roberta_layer.fca.weight lowerCamelCase_ =roberta_layer.fca.bias # end of layer if classification_head: lowerCamelCase_ =roberta.model.classification_heads['''mnli'''].dense.weight lowerCamelCase_ =roberta.model.classification_heads['''mnli'''].dense.bias lowerCamelCase_ =roberta.model.classification_heads['''mnli'''].out_proj.weight lowerCamelCase_ =roberta.model.classification_heads['''mnli'''].out_proj.bias else: # LM Head lowerCamelCase_ =roberta.model.encoder.lm_head.dense.weight lowerCamelCase_ =roberta.model.encoder.lm_head.dense.bias lowerCamelCase_ =roberta.model.encoder.lm_head.layer_norm.weight lowerCamelCase_ =roberta.model.encoder.lm_head.layer_norm.bias lowerCamelCase_ =roberta.model.encoder.lm_head.weight lowerCamelCase_ =roberta.model.encoder.lm_head.bias # Let's check that we get the same results. lowerCamelCase_ =roberta.encode(__snake_case ).unsqueeze(0 ) # batch of size 1 lowerCamelCase_ =model(__snake_case )[0] if classification_head: lowerCamelCase_ =roberta.model.classification_heads['''mnli'''](roberta.extract_features(__snake_case ) ) else: lowerCamelCase_ =roberta.model(__snake_case )[0] print(our_output.shape , their_output.shape ) lowerCamelCase_ =torch.max(torch.abs(our_output - their_output ) ).item() print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7 lowerCamelCase_ =torch.allclose(__snake_case , __snake_case , atol=1e-3 ) print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' ) if not success: raise Exception('''Something went wRoNg''' ) pathlib.Path(__snake_case ).mkdir(parents=__snake_case , exist_ok=__snake_case ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(__snake_case ) if __name__ == "__main__": a_ : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( """--roberta_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--classification_head""", action="""store_true""", help="""Whether to convert a final classification head.""" ) a_ : Union[str, Any] = parser.parse_args() convert_xlm_roberta_xl_checkpoint_to_pytorch( args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
676
'''simple docstring''' def a_ ( __snake_case : str , __snake_case : str ) -> str: """simple docstring""" lowerCamelCase_ =len(__snake_case ) lowerCamelCase_ =len(__snake_case ) lowerCamelCase_ =( first_str_length if first_str_length > second_str_length else second_str_length ) lowerCamelCase_ =[] for char_count in range(__snake_case ): if char_count < first_str_length: output_list.append(first_str[char_count] ) if char_count < second_str_length: output_list.append(second_str[char_count] ) return "".join(__snake_case ) if __name__ == "__main__": print(alternative_string_arrange("""AB""", """XYZ"""), end=""" """)
676
1
'''simple docstring''' import json import logging import os import re import sys from dataclasses import dataclass, field from typing import Any, Dict, List, Optional, Union import datasets import numpy as np import torch import torchaudio from packaging import version from torch import nn import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaProcessor, is_apex_available, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process if is_apex_available(): from apex import amp if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""): a_ : Dict = True from torch.cuda.amp import autocast a_ : Optional[int] = logging.getLogger(__name__) def a_ ( __snake_case : Any=None , __snake_case : str=None ) -> Optional[int]: """simple docstring""" return field(default_factory=lambda: default , metadata=__snake_case ) @dataclass class __UpperCamelCase : lowercase : str =field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) lowercase : Optional[str] =field( default=lowerCamelCase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) lowercase : Optional[bool] =field( default=lowerCamelCase__ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} ) lowercase : Optional[float] =field( default=0.1 , metadata={'help': 'The dropout ratio for the attention probabilities.'} ) lowercase : Optional[float] =field( default=0.1 , metadata={'help': 'The dropout ratio for activations inside the fully connected layer.'} ) lowercase : Optional[float] =field( default=0.1 , metadata={ 'help': 'The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.' } , ) lowercase : Optional[float] =field( default=0.1 , metadata={'help': 'The dropout probabilitiy for all 1D convolutional layers in feature extractor.'} , ) lowercase : Optional[float] =field( default=0.05 , metadata={ 'help': ( 'Propability of each feature vector along the time axis to be chosen as the start of the vector' 'span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature' 'vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``.' ) } , ) lowercase : Optional[float] =field(default=0.0 , metadata={'help': 'The LayerDrop probability.'} ) @dataclass class __UpperCamelCase : lowercase : Optional[str] =field( default=lowerCamelCase__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} ) lowercase : Optional[str] =field( default='train+validation' , metadata={ 'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\'' } , ) lowercase : bool =field( default=lowerCamelCase__ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} ) lowercase : Optional[int] =field( default=lowerCamelCase__ , metadata={'help': 'The number of processes to use for the preprocessing.'} , ) lowercase : Optional[int] =field( default=lowerCamelCase__ , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of training examples to this ' 'value if set.' ) } , ) lowercase : Optional[int] =field( default=lowerCamelCase__ , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of validation examples to this ' 'value if set.' ) } , ) lowercase : List[str] =list_field( default=[',', '?', '.', '!', '-', ';', ':', '""', '%', '\'', '"', '�'] , metadata={'help': 'A list of characters to remove from the transcripts.'} , ) @dataclass class __UpperCamelCase : lowercase : WavaVecaProcessor lowercase : Union[bool, str] =True lowercase : Optional[int] =None lowercase : Optional[int] =None lowercase : Optional[int] =None lowercase : Optional[int] =None def __call__( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =[{'''input_values''': feature['''input_values''']} for feature in features] lowerCamelCase_ =[{'''input_ids''': feature['''labels''']} for feature in features] lowerCamelCase_ =self.processor.pad( lowerCAmelCase, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors='''pt''', ) lowerCamelCase_ =self.processor.pad( labels=lowerCAmelCase, padding=self.padding, max_length=self.max_length_labels, pad_to_multiple_of=self.pad_to_multiple_of_labels, return_tensors='''pt''', ) # replace padding with -100 to ignore loss correctly lowerCamelCase_ =labels_batch['''input_ids'''].masked_fill(labels_batch.attention_mask.ne(1 ), -100 ) lowerCamelCase_ =labels return batch class __UpperCamelCase ( lowerCamelCase__ ): def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" model.train() lowerCamelCase_ =self._prepare_inputs(lowerCAmelCase ) if self.use_amp: with autocast(): lowerCamelCase_ =self.compute_loss(lowerCAmelCase, lowerCAmelCase ) else: lowerCamelCase_ =self.compute_loss(lowerCAmelCase, lowerCAmelCase ) if self.args.n_gpu > 1: if model.module.config.ctc_loss_reduction == "mean": lowerCamelCase_ =loss.mean() elif model.module.config.ctc_loss_reduction == "sum": lowerCamelCase_ =loss.sum() / (inputs['''labels'''] >= 0).sum() else: raise ValueError(f'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' ) if self.args.gradient_accumulation_steps > 1: lowerCamelCase_ =loss / self.args.gradient_accumulation_steps if self.use_amp: self.scaler.scale(lowerCAmelCase ).backward() elif self.use_apex: with amp.scale_loss(lowerCAmelCase, self.optimizer ) as scaled_loss: scaled_loss.backward() elif self.deepspeed: self.deepspeed.backward(lowerCAmelCase ) else: loss.backward() return loss.detach() def a_ ( ) -> Optional[int]: """simple docstring""" # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowerCamelCase_ =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =parser.parse_args_into_dataclasses() # Detecting last checkpoint. lowerCamelCase_ =None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: lowerCamelCase_ =get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN ) # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info('''Training/evaluation parameters %s''' , __snake_case ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: lowerCamelCase_ =datasets.load_dataset( '''common_voice''' , data_args.dataset_config_name , split=data_args.train_split_name ) lowerCamelCase_ =datasets.load_dataset('''common_voice''' , data_args.dataset_config_name , split='''test''' ) # Create and save tokenizer lowerCamelCase_ =F'''[{''.join(data_args.chars_to_ignore )}]''' def remove_special_characters(__snake_case : List[Any] ): lowerCamelCase_ =re.sub(__snake_case , '''''' , batch['''sentence'''] ).lower() + ''' ''' return batch lowerCamelCase_ =train_dataset.map(__snake_case , remove_columns=['''sentence'''] ) lowerCamelCase_ =eval_dataset.map(__snake_case , remove_columns=['''sentence'''] ) def extract_all_chars(__snake_case : Optional[Any] ): lowerCamelCase_ =''' '''.join(batch['''text'''] ) lowerCamelCase_ =list(set(__snake_case ) ) return {"vocab": [vocab], "all_text": [all_text]} lowerCamelCase_ =train_dataset.map( __snake_case , batched=__snake_case , batch_size=-1 , keep_in_memory=__snake_case , remove_columns=train_dataset.column_names , ) lowerCamelCase_ =train_dataset.map( __snake_case , batched=__snake_case , batch_size=-1 , keep_in_memory=__snake_case , remove_columns=eval_dataset.column_names , ) lowerCamelCase_ =list(set(vocab_train['''vocab'''][0] ) | set(vocab_test['''vocab'''][0] ) ) lowerCamelCase_ ={v: k for k, v in enumerate(__snake_case )} lowerCamelCase_ =vocab_dict[''' '''] del vocab_dict[" "] lowerCamelCase_ =len(__snake_case ) lowerCamelCase_ =len(__snake_case ) with open('''vocab.json''' , '''w''' ) as vocab_file: json.dump(__snake_case , __snake_case ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowerCamelCase_ =WavaVecaCTCTokenizer( '''vocab.json''' , unk_token='''[UNK]''' , pad_token='''[PAD]''' , word_delimiter_token='''|''' , ) lowerCamelCase_ =WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6000 , padding_value=0.0 , do_normalize=__snake_case , return_attention_mask=__snake_case ) lowerCamelCase_ =WavaVecaProcessor(feature_extractor=__snake_case , tokenizer=__snake_case ) lowerCamelCase_ =WavaVecaForCTC.from_pretrained( model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='''mean''' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , ) if data_args.max_train_samples is not None: lowerCamelCase_ =min(len(__snake_case ) , data_args.max_train_samples ) lowerCamelCase_ =train_dataset.select(range(__snake_case ) ) if data_args.max_val_samples is not None: lowerCamelCase_ =eval_dataset.select(range(data_args.max_val_samples ) ) lowerCamelCase_ =torchaudio.transforms.Resample(4_8000 , 1_6000 ) # Preprocessing the datasets. # We need to read the aduio files as arrays and tokenize the targets. def speech_file_to_array_fn(__snake_case : int ): lowerCamelCase_, lowerCamelCase_ =torchaudio.load(batch['''path'''] ) lowerCamelCase_ =resampler(__snake_case ).squeeze().numpy() lowerCamelCase_ =1_6000 lowerCamelCase_ =batch['''text'''] return batch lowerCamelCase_ =train_dataset.map( __snake_case , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , ) lowerCamelCase_ =eval_dataset.map( __snake_case , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , ) def prepare_dataset(__snake_case : Union[str, Any] ): # check that all files have the correct sampling rate assert ( len(set(batch['''sampling_rate'''] ) ) == 1 ), F'''Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}.''' lowerCamelCase_ =processor( audio=batch['''speech'''] , text=batch['''target_text'''] , sampling_rate=batch['''sampling_rate'''][0] ) batch.update(__snake_case ) return batch lowerCamelCase_ =train_dataset.map( __snake_case , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=__snake_case , num_proc=data_args.preprocessing_num_workers , ) lowerCamelCase_ =eval_dataset.map( __snake_case , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=__snake_case , num_proc=data_args.preprocessing_num_workers , ) # Metric lowerCamelCase_ =datasets.load_metric('''wer''' ) def compute_metrics(__snake_case : Dict ): lowerCamelCase_ =pred.predictions lowerCamelCase_ =np.argmax(__snake_case , axis=-1 ) lowerCamelCase_ =processor.tokenizer.pad_token_id lowerCamelCase_ =processor.batch_decode(__snake_case ) # we do not want to group tokens when computing the metrics lowerCamelCase_ =processor.batch_decode(pred.label_ids , group_tokens=__snake_case ) lowerCamelCase_ =wer_metric.compute(predictions=__snake_case , references=__snake_case ) return {"wer": wer} if model_args.freeze_feature_extractor: model.freeze_feature_extractor() # Data collator lowerCamelCase_ =DataCollatorCTCWithPadding(processor=__snake_case , padding=__snake_case ) # Initialize our Trainer lowerCamelCase_ =CTCTrainer( model=__snake_case , data_collator=__snake_case , args=__snake_case , compute_metrics=__snake_case , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , ) # Training if training_args.do_train: if last_checkpoint is not None: lowerCamelCase_ =last_checkpoint elif os.path.isdir(model_args.model_name_or_path ): lowerCamelCase_ =model_args.model_name_or_path else: lowerCamelCase_ =None # Save the feature_extractor and the tokenizer if is_main_process(training_args.local_rank ): processor.save_pretrained(training_args.output_dir ) lowerCamelCase_ =trainer.train(resume_from_checkpoint=__snake_case ) trainer.save_model() lowerCamelCase_ =train_result.metrics lowerCamelCase_ =( data_args.max_train_samples if data_args.max_train_samples is not None else len(__snake_case ) ) lowerCamelCase_ =min(__snake_case , len(__snake_case ) ) trainer.log_metrics('''train''' , __snake_case ) trainer.save_metrics('''train''' , __snake_case ) trainer.save_state() # Evaluation lowerCamelCase_ ={} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) lowerCamelCase_ =trainer.evaluate() lowerCamelCase_ =data_args.max_val_samples if data_args.max_val_samples is not None else len(__snake_case ) lowerCamelCase_ =min(__snake_case , len(__snake_case ) ) trainer.log_metrics('''eval''' , __snake_case ) trainer.save_metrics('''eval''' , __snake_case ) return results if __name__ == "__main__": main()
676
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ : Any = {"""configuration_timm_backbone""": ["""TimmBackboneConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Optional[Any] = ["""TimmBackbone"""] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys a_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
676
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available a_ : List[str] = { """configuration_groupvit""": [ """GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GroupViTConfig""", """GroupViTOnnxConfig""", """GroupViTTextConfig""", """GroupViTVisionConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : List[str] = [ """GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """GroupViTModel""", """GroupViTPreTrainedModel""", """GroupViTTextModel""", """GroupViTVisionModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Tuple = [ """TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFGroupViTModel""", """TFGroupViTPreTrainedModel""", """TFGroupViTTextModel""", """TFGroupViTVisionModel""", ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys a_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
676
'''simple docstring''' import functools def a_ ( __snake_case : str , __snake_case : str ) -> int: """simple docstring""" lowerCamelCase_ =len(__snake_case ) lowerCamelCase_ =len(__snake_case ) @functools.cache def min_distance(__snake_case : int , __snake_case : int ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa lowerCamelCase_ =int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , __snake_case ) , 1 + min_distance(__snake_case , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
676
1
'''simple docstring''' import inspect import unittest import torch import torch.nn as nn from accelerate.hooks import ( AlignDevicesHook, ModelHook, SequentialHook, add_hook_to_module, attach_align_device_hook, remove_hook_from_module, remove_hook_from_submodules, ) from accelerate.test_utils import require_multi_gpu class __UpperCamelCase ( nn.Module ): def __init__( self ): """simple docstring""" super().__init__() lowerCamelCase_ =nn.Linear(3, 4 ) lowerCamelCase_ =nn.BatchNormad(4 ) lowerCamelCase_ =nn.Linear(4, 5 ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return self.lineara(self.batchnorm(self.lineara(lowerCAmelCase ) ) ) class __UpperCamelCase ( lowerCamelCase__ ): def lowercase__ ( self, lowerCAmelCase, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return (args[0] + 1,) + args[1:], kwargs class __UpperCamelCase ( lowerCamelCase__ ): def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" return output + 1 class __UpperCamelCase ( unittest.TestCase ): def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =ModelForTest() lowerCamelCase_ =ModelHook() add_hook_to_module(lowerCAmelCase, lowerCAmelCase ) self.assertEqual(test_model._hf_hook, lowerCAmelCase ) self.assertTrue(hasattr(lowerCAmelCase, '''_old_forward''' ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__, '''forward''' ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ), ['''x'''] ) remove_hook_from_module(lowerCAmelCase ) self.assertFalse(hasattr(lowerCAmelCase, '''_hf_hook''' ) ) self.assertFalse(hasattr(lowerCAmelCase, '''_old_forward''' ) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =ModelForTest() lowerCamelCase_ =ModelHook() add_hook_to_module(lowerCAmelCase, lowerCAmelCase ) add_hook_to_module(lowerCAmelCase, lowerCAmelCase, append=lowerCAmelCase ) self.assertEqual(isinstance(test_model._hf_hook, lowerCAmelCase ), lowerCAmelCase ) self.assertEqual(len(test_model._hf_hook.hooks ), 2 ) self.assertTrue(hasattr(lowerCAmelCase, '''_old_forward''' ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__, '''forward''' ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ), ['''x'''] ) remove_hook_from_module(lowerCAmelCase ) self.assertFalse(hasattr(lowerCAmelCase, '''_hf_hook''' ) ) self.assertFalse(hasattr(lowerCAmelCase, '''_old_forward''' ) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =ModelForTest() lowerCamelCase_ =torch.randn(2, 3 ) lowerCamelCase_ =test_model(x + 1 ) lowerCamelCase_ =test_model(x + 2 ) lowerCamelCase_ =PreForwardHook() add_hook_to_module(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =test_model(lowerCAmelCase ) self.assertTrue(torch.allclose(lowerCAmelCase, lowerCAmelCase, atol=1e-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain lowerCamelCase_ =PreForwardHook() add_hook_to_module(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =test_model(lowerCAmelCase ) self.assertTrue(torch.allclose(lowerCAmelCase, lowerCAmelCase, atol=1e-5 ) ) # You need to use the sequential hook to chain two or more hooks lowerCamelCase_ =SequentialHook(PreForwardHook(), PreForwardHook() ) add_hook_to_module(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =test_model(lowerCAmelCase ) assert torch.allclose(lowerCAmelCase, lowerCAmelCase, atol=1e-5 ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =ModelForTest() lowerCamelCase_ =torch.randn(2, 3 ) lowerCamelCase_ =test_model(lowerCAmelCase ) lowerCamelCase_ =PostForwardHook() add_hook_to_module(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =test_model(lowerCAmelCase ) self.assertTrue(torch.allclose(lowerCAmelCase, output + 1, atol=1e-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain lowerCamelCase_ =PostForwardHook() add_hook_to_module(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =test_model(lowerCAmelCase ) self.assertTrue(torch.allclose(lowerCAmelCase, output + 1, atol=1e-5 ) ) # You need to use the sequential hook to chain two or more hooks lowerCamelCase_ =SequentialHook(PostForwardHook(), PostForwardHook() ) add_hook_to_module(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =test_model(lowerCAmelCase ) assert torch.allclose(lowerCAmelCase, output + 2, atol=1e-5 ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =ModelForTest() lowerCamelCase_ =torch.randn(2, 3 ) lowerCamelCase_ =test_model(lowerCAmelCase ) lowerCamelCase_ =PostForwardHook() add_hook_to_module(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =test_model(lowerCAmelCase ) self.assertTrue(torch.allclose(lowerCAmelCase, output + 1 ) ) self.assertTrue(outputa.requires_grad ) lowerCamelCase_ =True lowerCamelCase_ =test_model(lowerCAmelCase ) self.assertFalse(outputa.requires_grad ) @require_multi_gpu def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device, torch.device('''cpu''' ) ) self.assertEqual(model.batchnorm.weight.device, torch.device('''cpu''' ) ) self.assertEqual(model.lineara.weight.device, torch.device('''cpu''' ) ) # This will move each submodule on different devices add_hook_to_module(model.lineara, AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.batchnorm, AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.lineara, AlignDevicesHook(execution_device=1 ) ) self.assertEqual(model.lineara.weight.device, torch.device(0 ) ) self.assertEqual(model.batchnorm.weight.device, torch.device(0 ) ) self.assertEqual(model.batchnorm.running_mean.device, torch.device(0 ) ) self.assertEqual(model.lineara.weight.device, torch.device(1 ) ) # We can still make a forward pass. The input does not need to be on any particular device lowerCamelCase_ =torch.randn(2, 3 ) lowerCamelCase_ =model(lowerCAmelCase ) self.assertEqual(output.device, torch.device(1 ) ) # We can add a general hook to put back output on same device as input. add_hook_to_module(lowerCAmelCase, AlignDevicesHook(io_same_device=lowerCAmelCase ) ) lowerCamelCase_ =torch.randn(2, 3 ).to(0 ) lowerCamelCase_ =model(lowerCAmelCase ) self.assertEqual(output.device, torch.device(0 ) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device, torch.device('''cpu''' ) ) self.assertEqual(model.batchnorm.weight.device, torch.device('''cpu''' ) ) self.assertEqual(model.lineara.weight.device, torch.device('''cpu''' ) ) # This will move each submodule on different devices lowerCamelCase_ ={'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''', '''offload''': True} add_hook_to_module(model.lineara, AlignDevicesHook(**lowerCAmelCase ) ) add_hook_to_module(model.batchnorm, AlignDevicesHook(**lowerCAmelCase ) ) add_hook_to_module(model.lineara, AlignDevicesHook(**lowerCAmelCase ) ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device, torch.device('''meta''' ) ) self.assertEqual(model.batchnorm.weight.device, torch.device('''meta''' ) ) self.assertEqual(model.lineara.weight.device, torch.device('''meta''' ) ) # Buffers are not included in the offload by default, so are on the execution device lowerCamelCase_ =torch.device(hook_kwargs['''execution_device'''] ) self.assertEqual(model.batchnorm.running_mean.device, lowerCAmelCase ) lowerCamelCase_ =torch.randn(2, 3 ) lowerCamelCase_ =model(lowerCAmelCase ) self.assertEqual(output.device, lowerCAmelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device, torch.device('''cpu''' ) ) self.assertEqual(model.batchnorm.weight.device, torch.device('''cpu''' ) ) self.assertEqual(model.lineara.weight.device, torch.device('''cpu''' ) ) # Now test with buffers included in the offload lowerCamelCase_ ={ '''execution_device''': 0 if torch.cuda.is_available() else '''cpu''', '''offload''': True, '''offload_buffers''': True, } add_hook_to_module(model.lineara, AlignDevicesHook(**lowerCAmelCase ) ) add_hook_to_module(model.batchnorm, AlignDevicesHook(**lowerCAmelCase ) ) add_hook_to_module(model.lineara, AlignDevicesHook(**lowerCAmelCase ) ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device, torch.device('''meta''' ) ) self.assertEqual(model.batchnorm.weight.device, torch.device('''meta''' ) ) self.assertEqual(model.lineara.weight.device, torch.device('''meta''' ) ) self.assertEqual(model.batchnorm.running_mean.device, torch.device('''meta''' ) ) lowerCamelCase_ =torch.randn(2, 3 ) lowerCamelCase_ =model(lowerCAmelCase ) self.assertEqual(output.device, lowerCAmelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device, torch.device('''cpu''' ) ) self.assertEqual(model.batchnorm.weight.device, torch.device('''cpu''' ) ) self.assertEqual(model.lineara.weight.device, torch.device('''cpu''' ) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device, torch.device('''cpu''' ) ) self.assertEqual(model.batchnorm.weight.device, torch.device('''cpu''' ) ) self.assertEqual(model.lineara.weight.device, torch.device('''cpu''' ) ) # This will move each submodule on different devices lowerCamelCase_ =0 if torch.cuda.is_available() else '''cpu''' attach_align_device_hook(lowerCAmelCase, execution_device=lowerCAmelCase, offload=lowerCAmelCase ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device, torch.device('''meta''' ) ) self.assertEqual(model.batchnorm.weight.device, torch.device('''meta''' ) ) self.assertEqual(model.lineara.weight.device, torch.device('''meta''' ) ) # Buffers are not included in the offload by default, so are on the execution device lowerCamelCase_ =torch.device(lowerCAmelCase ) self.assertEqual(model.batchnorm.running_mean.device, lowerCAmelCase ) lowerCamelCase_ =torch.randn(2, 3 ) lowerCamelCase_ =model(lowerCAmelCase ) self.assertEqual(output.device, lowerCAmelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(lowerCAmelCase ) self.assertEqual(model.lineara.weight.device, torch.device('''cpu''' ) ) self.assertEqual(model.batchnorm.weight.device, torch.device('''cpu''' ) ) self.assertEqual(model.lineara.weight.device, torch.device('''cpu''' ) ) # Now test with buffers included in the offload attach_align_device_hook(lowerCAmelCase, execution_device=lowerCAmelCase, offload=lowerCAmelCase, offload_buffers=lowerCAmelCase ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device, torch.device('''meta''' ) ) self.assertEqual(model.batchnorm.weight.device, torch.device('''meta''' ) ) self.assertEqual(model.lineara.weight.device, torch.device('''meta''' ) ) self.assertEqual(model.batchnorm.running_mean.device, torch.device('''meta''' ) ) lowerCamelCase_ =torch.randn(2, 3 ) lowerCamelCase_ =model(lowerCAmelCase ) self.assertEqual(output.device, lowerCAmelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(lowerCAmelCase ) self.assertEqual(model.lineara.weight.device, torch.device('''cpu''' ) ) self.assertEqual(model.batchnorm.weight.device, torch.device('''cpu''' ) ) self.assertEqual(model.lineara.weight.device, torch.device('''cpu''' ) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device, torch.device('''cpu''' ) ) self.assertEqual(model.batchnorm.weight.device, torch.device('''cpu''' ) ) self.assertEqual(model.lineara.weight.device, torch.device('''cpu''' ) ) # This will move each submodule on different devices lowerCamelCase_ =0 if torch.cuda.is_available() else '''cpu''' attach_align_device_hook( lowerCAmelCase, execution_device=lowerCAmelCase, offload=lowerCAmelCase, weights_map=model.state_dict() ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device, torch.device('''meta''' ) ) self.assertEqual(model.batchnorm.weight.device, torch.device('''meta''' ) ) self.assertEqual(model.lineara.weight.device, torch.device('''meta''' ) ) # Buffers are not included in the offload by default, so are on the execution device lowerCamelCase_ =torch.device(lowerCAmelCase ) self.assertEqual(model.batchnorm.running_mean.device, lowerCAmelCase ) lowerCamelCase_ =torch.randn(2, 3 ) lowerCamelCase_ =model(lowerCAmelCase ) self.assertEqual(output.device, lowerCAmelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(lowerCAmelCase ) self.assertEqual(model.lineara.weight.device, torch.device('''cpu''' ) ) self.assertEqual(model.batchnorm.weight.device, torch.device('''cpu''' ) ) self.assertEqual(model.lineara.weight.device, torch.device('''cpu''' ) ) # Now test with buffers included in the offload attach_align_device_hook( lowerCAmelCase, execution_device=lowerCAmelCase, offload=lowerCAmelCase, weights_map=model.state_dict(), offload_buffers=lowerCAmelCase, ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device, torch.device('''meta''' ) ) self.assertEqual(model.batchnorm.weight.device, torch.device('''meta''' ) ) self.assertEqual(model.lineara.weight.device, torch.device('''meta''' ) ) self.assertEqual(model.batchnorm.running_mean.device, torch.device('''meta''' ) ) lowerCamelCase_ =torch.randn(2, 3 ) lowerCamelCase_ =model(lowerCAmelCase ) self.assertEqual(output.device, lowerCAmelCase ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(lowerCAmelCase ) self.assertEqual(model.lineara.weight.device, torch.device('''cpu''' ) ) self.assertEqual(model.batchnorm.weight.device, torch.device('''cpu''' ) ) self.assertEqual(model.lineara.weight.device, torch.device('''cpu''' ) )
676
'''simple docstring''' def a_ ( __snake_case : int ) -> bool: """simple docstring""" if not isinstance(__snake_case , __snake_case ): lowerCamelCase_ =F'''Input value of [number={number}] must be an integer''' raise TypeError(__snake_case ) if number < 0: return False lowerCamelCase_ =number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
676
1
'''simple docstring''' import bza import gzip import lzma import os import shutil import struct import tarfile import warnings import zipfile from abc import ABC, abstractmethod from pathlib import Path from typing import Dict, List, Optional, Type, Union from .. import config from .filelock import FileLock from .logging import get_logger a_ : int = get_logger(__name__) class __UpperCamelCase : def __init__( self, lowerCAmelCase = None ): """simple docstring""" lowerCamelCase_ =( os.path.join(lowerCAmelCase, config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH ) lowerCamelCase_ =Extractor def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" from .file_utils import hash_url_to_filename # Path where we extract compressed archives # We extract in the cache dir, and get the extracted path name by hashing the original path" lowerCamelCase_ =os.path.abspath(lowerCAmelCase ) return os.path.join(self.extract_dir, hash_url_to_filename(lowerCAmelCase ) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" return force_extract or ( not os.path.isfile(lowerCAmelCase ) and not (os.path.isdir(lowerCAmelCase ) and os.listdir(lowerCAmelCase )) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = False ): """simple docstring""" lowerCamelCase_ =self.extractor.infer_extractor_format(lowerCAmelCase ) if not extractor_format: return input_path lowerCamelCase_ =self._get_output_path(lowerCAmelCase ) if self._do_extract(lowerCAmelCase, lowerCAmelCase ): self.extractor.extract(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) return output_path class __UpperCamelCase ( lowerCamelCase__ ): @classmethod @abstractmethod def lowercase__ ( cls, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" ... @staticmethod @abstractmethod def lowercase__ ( lowerCAmelCase, lowerCAmelCase ): """simple docstring""" ... class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): lowercase : List[bytes] =[] @staticmethod def lowercase__ ( lowerCAmelCase, lowerCAmelCase ): """simple docstring""" with open(lowerCAmelCase, '''rb''' ) as f: return f.read(lowerCAmelCase ) @classmethod def lowercase__ ( cls, lowerCAmelCase, lowerCAmelCase = b"" ): """simple docstring""" if not magic_number: lowerCamelCase_ =max(len(lowerCAmelCase ) for cls_magic_number in cls.magic_numbers ) try: lowerCamelCase_ =cls.read_magic_number(lowerCAmelCase, lowerCAmelCase ) except OSError: return False return any(magic_number.startswith(lowerCAmelCase ) for cls_magic_number in cls.magic_numbers ) class __UpperCamelCase ( lowerCamelCase__ ): @classmethod def lowercase__ ( cls, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return tarfile.is_tarfile(lowerCAmelCase ) @staticmethod def lowercase__ ( lowerCAmelCase, lowerCAmelCase ): """simple docstring""" def resolved(lowerCAmelCase ) -> str: return os.path.realpath(os.path.abspath(lowerCAmelCase ) ) def badpath(lowerCAmelCase, lowerCAmelCase ) -> bool: # joinpath will ignore base if path is absolute return not resolved(os.path.join(lowerCAmelCase, lowerCAmelCase ) ).startswith(lowerCAmelCase ) def badlink(lowerCAmelCase, lowerCAmelCase ) -> bool: # Links are interpreted relative to the directory containing the link lowerCamelCase_ =resolved(os.path.join(lowerCAmelCase, os.path.dirname(info.name ) ) ) return badpath(info.linkname, base=lowerCAmelCase ) lowerCamelCase_ =resolved(lowerCAmelCase ) for finfo in members: if badpath(finfo.name, lowerCAmelCase ): logger.error(f'''Extraction of {finfo.name} is blocked (illegal path)''' ) elif finfo.issym() and badlink(lowerCAmelCase, lowerCAmelCase ): logger.error(f'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' ) elif finfo.islnk() and badlink(lowerCAmelCase, lowerCAmelCase ): logger.error(f'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' ) else: yield finfo @staticmethod def lowercase__ ( lowerCAmelCase, lowerCAmelCase ): """simple docstring""" os.makedirs(lowerCAmelCase, exist_ok=lowerCAmelCase ) lowerCamelCase_ =tarfile.open(lowerCAmelCase ) tar_file.extractall(lowerCAmelCase, members=TarExtractor.safemembers(lowerCAmelCase, lowerCAmelCase ) ) tar_file.close() class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Union[str, Any] =[b'\x1F\x8B'] @staticmethod def lowercase__ ( lowerCAmelCase, lowerCAmelCase ): """simple docstring""" with gzip.open(lowerCAmelCase, '''rb''' ) as gzip_file: with open(lowerCAmelCase, '''wb''' ) as extracted_file: shutil.copyfileobj(lowerCAmelCase, lowerCAmelCase ) class __UpperCamelCase ( lowerCamelCase__ ): lowercase : List[str] =[ b'PK\x03\x04', b'PK\x05\x06', # empty archive b'PK\x07\x08', # spanned archive ] @classmethod def lowercase__ ( cls, lowerCAmelCase, lowerCAmelCase = b"" ): """simple docstring""" if super().is_extractable(lowerCAmelCase, magic_number=lowerCAmelCase ): return True try: # Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives. # From: https://github.com/python/cpython/pull/5053 from zipfile import ( _CD_SIGNATURE, _ECD_DISK_NUMBER, _ECD_DISK_START, _ECD_ENTRIES_TOTAL, _ECD_OFFSET, _ECD_SIZE, _EndRecData, sizeCentralDir, stringCentralDir, structCentralDir, ) with open(lowerCAmelCase, '''rb''' ) as fp: lowerCamelCase_ =_EndRecData(lowerCAmelCase ) if endrec: if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0: return True # Empty zipfiles are still zipfiles elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]: fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir: lowerCamelCase_ =fp.read(lowerCAmelCase ) # CD is where we expect it to be if len(lowerCAmelCase ) == sizeCentralDir: lowerCamelCase_ =struct.unpack(lowerCAmelCase, lowerCAmelCase ) # CD is the right size if centdir[_CD_SIGNATURE] == stringCentralDir: return True # First central directory entry has correct magic number return False except Exception: # catch all errors in case future python versions change the zipfile internals return False @staticmethod def lowercase__ ( lowerCAmelCase, lowerCAmelCase ): """simple docstring""" os.makedirs(lowerCAmelCase, exist_ok=lowerCAmelCase ) with zipfile.ZipFile(lowerCAmelCase, '''r''' ) as zip_file: zip_file.extractall(lowerCAmelCase ) zip_file.close() class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Tuple =[b'\xFD\x37\x7A\x58\x5A\x00'] @staticmethod def lowercase__ ( lowerCAmelCase, lowerCAmelCase ): """simple docstring""" with lzma.open(lowerCAmelCase ) as compressed_file: with open(lowerCAmelCase, '''wb''' ) as extracted_file: shutil.copyfileobj(lowerCAmelCase, lowerCAmelCase ) class __UpperCamelCase ( lowerCamelCase__ ): lowercase : str =[b'Rar!\x1a\x07\x00', b'Rar!\x1a\x07\x01\x00'] # RAR_ID # RAR5_ID @staticmethod def lowercase__ ( lowerCAmelCase, lowerCAmelCase ): """simple docstring""" if not config.RARFILE_AVAILABLE: raise ImportError('''Please pip install rarfile''' ) import rarfile os.makedirs(lowerCAmelCase, exist_ok=lowerCAmelCase ) lowerCamelCase_ =rarfile.RarFile(lowerCAmelCase ) rf.extractall(lowerCAmelCase ) rf.close() class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Tuple =[b'\x28\xb5\x2F\xFD'] @staticmethod def lowercase__ ( lowerCAmelCase, lowerCAmelCase ): """simple docstring""" if not config.ZSTANDARD_AVAILABLE: raise ImportError('''Please pip install zstandard''' ) import zstandard as zstd lowerCamelCase_ =zstd.ZstdDecompressor() with open(lowerCAmelCase, '''rb''' ) as ifh, open(lowerCAmelCase, '''wb''' ) as ofh: dctx.copy_stream(lowerCAmelCase, lowerCAmelCase ) class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Dict =[b'\x42\x5A\x68'] @staticmethod def lowercase__ ( lowerCAmelCase, lowerCAmelCase ): """simple docstring""" with bza.open(lowerCAmelCase, '''rb''' ) as compressed_file: with open(lowerCAmelCase, '''wb''' ) as extracted_file: shutil.copyfileobj(lowerCAmelCase, lowerCAmelCase ) class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Any =[b'\x37\x7A\xBC\xAF\x27\x1C'] @staticmethod def lowercase__ ( lowerCAmelCase, lowerCAmelCase ): """simple docstring""" if not config.PY7ZR_AVAILABLE: raise ImportError('''Please pip install py7zr''' ) import pyazr os.makedirs(lowerCAmelCase, exist_ok=lowerCAmelCase ) with pyazr.SevenZipFile(lowerCAmelCase, '''r''' ) as archive: archive.extractall(lowerCAmelCase ) class __UpperCamelCase ( lowerCamelCase__ ): lowercase : List[str] =[b'\x04\x22\x4D\x18'] @staticmethod def lowercase__ ( lowerCAmelCase, lowerCAmelCase ): """simple docstring""" if not config.LZ4_AVAILABLE: raise ImportError('''Please pip install lz4''' ) import lza.frame with lza.frame.open(lowerCAmelCase, '''rb''' ) as compressed_file: with open(lowerCAmelCase, '''wb''' ) as extracted_file: shutil.copyfileobj(lowerCAmelCase, lowerCAmelCase ) class __UpperCamelCase : # Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip) lowercase : Dict[str, Type[BaseExtractor]] ={ "tar": TarExtractor, "gzip": GzipExtractor, "zip": ZipExtractor, "xz": XzExtractor, "rar": RarExtractor, "zstd": ZstdExtractor, "bz2": BzipaExtractor, "7z": SevenZipExtractor, # <Added version="2.4.0"/> "lz4": LzaExtractor, # <Added version="2.4.0"/> } @classmethod def lowercase__ ( cls ): """simple docstring""" return max( len(lowerCAmelCase ) for extractor in cls.extractors.values() if issubclass(lowerCAmelCase, lowerCAmelCase ) for extractor_magic_number in extractor.magic_numbers ) @staticmethod def lowercase__ ( lowerCAmelCase, lowerCAmelCase ): """simple docstring""" try: return MagicNumberBaseExtractor.read_magic_number(lowerCAmelCase, magic_number_length=lowerCAmelCase ) except OSError: return b"" @classmethod def lowercase__ ( cls, lowerCAmelCase, lowerCAmelCase = False ): """simple docstring""" warnings.warn( '''Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. ''' '''Use \'infer_extractor_format\' instead.''', category=lowerCAmelCase, ) lowerCamelCase_ =cls.infer_extractor_format(lowerCAmelCase ) if extractor_format: return True if not return_extractor else (True, cls.extractors[extractor_format]) return False if not return_extractor else (False, None) @classmethod def lowercase__ ( cls, lowerCAmelCase ): # <Added version="2.4.0"/> """simple docstring""" lowerCamelCase_ =cls._get_magic_number_max_length() lowerCamelCase_ =cls._read_magic_number(lowerCAmelCase, lowerCAmelCase ) for extractor_format, extractor in cls.extractors.items(): if extractor.is_extractable(lowerCAmelCase, magic_number=lowerCAmelCase ): return extractor_format @classmethod def lowercase__ ( cls, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = "deprecated", ): """simple docstring""" os.makedirs(os.path.dirname(lowerCAmelCase ), exist_ok=lowerCAmelCase ) # Prevent parallel extractions lowerCamelCase_ =str(Path(lowerCAmelCase ).with_suffix('''.lock''' ) ) with FileLock(lowerCAmelCase ): shutil.rmtree(lowerCAmelCase, ignore_errors=lowerCAmelCase ) if extractor_format or extractor != "deprecated": if extractor != "deprecated" or not isinstance(lowerCAmelCase, lowerCAmelCase ): # passed as positional arg warnings.warn( '''Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. ''' '''Use \'extractor_format\' instead.''', category=lowerCAmelCase, ) lowerCamelCase_ =extractor if extractor != '''deprecated''' else extractor_format else: lowerCamelCase_ =cls.extractors[extractor_format] return extractor.extract(lowerCAmelCase, lowerCAmelCase ) else: warnings.warn( '''Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an ''' '''exception in 3.0.0.''', category=lowerCAmelCase, ) for extractor in cls.extractors.values(): if extractor.is_extractable(lowerCAmelCase ): return extractor.extract(lowerCAmelCase, lowerCAmelCase )
676
'''simple docstring''' from __future__ import annotations a_ : int = list[list[int]] # assigning initial values to the grid a_ : Matrix = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution a_ : Matrix = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def a_ ( __snake_case : Matrix , __snake_case : int , __snake_case : int , __snake_case : int ) -> bool: """simple docstring""" for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def a_ ( __snake_case : Matrix ) -> tuple[int, int] | None: """simple docstring""" for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def a_ ( __snake_case : Matrix ) -> Matrix | None: """simple docstring""" if location := find_empty_location(__snake_case ): lowerCamelCase_, lowerCamelCase_ =location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(__snake_case , __snake_case , __snake_case , __snake_case ): lowerCamelCase_ =digit if sudoku(__snake_case ) is not None: return grid lowerCamelCase_ =0 return None def a_ ( __snake_case : Matrix ) -> None: """simple docstring""" for row in grid: for cell in row: print(__snake_case , end=''' ''' ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print("""\nExample grid:\n""" + """=""" * 20) print_solution(example_grid) print("""\nExample grid solution:""") a_ : Union[str, Any] = sudoku(example_grid) if solution is not None: print_solution(solution) else: print("""Cannot find a solution.""")
676
1
'''simple docstring''' def a_ ( __snake_case : int = 1000 ) -> int: """simple docstring""" lowerCamelCase_, lowerCamelCase_ =1, 1 lowerCamelCase_ =[] for i in range(1 , n + 1 ): lowerCamelCase_ =prev_numerator + 2 * prev_denominator lowerCamelCase_ =prev_numerator + prev_denominator if len(str(__snake_case ) ) > len(str(__snake_case ) ): result.append(__snake_case ) lowerCamelCase_ =numerator lowerCamelCase_ =denominator return len(__snake_case ) if __name__ == "__main__": print(F"""{solution() = }""")
676
'''simple docstring''' from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : Union[str, Any] = logging.get_logger(__name__) a_ : Tuple = { """huggingface/informer-tourism-monthly""": ( """https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json""" ), # See all Informer models at https://huggingface.co/models?filter=informer } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Union[str, Any] ='informer' lowercase : Union[str, Any] ={ 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', 'num_hidden_layers': 'encoder_layers', } def __init__( self, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = "student_t", lowerCAmelCase = "nll", lowerCAmelCase = 1, lowerCAmelCase = None, lowerCAmelCase = "mean", lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = 64, lowerCAmelCase = 32, lowerCAmelCase = 32, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = True, lowerCAmelCase = "gelu", lowerCAmelCase = 0.0_5, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 100, lowerCAmelCase = 0.0_2, lowerCAmelCase=True, lowerCAmelCase = "prob", lowerCAmelCase = 5, lowerCAmelCase = True, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =prediction_length lowerCamelCase_ =context_length or prediction_length lowerCamelCase_ =distribution_output lowerCamelCase_ =loss lowerCamelCase_ =input_size lowerCamelCase_ =num_time_features lowerCamelCase_ =lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7] lowerCamelCase_ =scaling lowerCamelCase_ =num_dynamic_real_features lowerCamelCase_ =num_static_real_features lowerCamelCase_ =num_static_categorical_features # set cardinality if cardinality and num_static_categorical_features > 0: if len(lowerCAmelCase ) != num_static_categorical_features: raise ValueError( '''The cardinality should be a list of the same length as `num_static_categorical_features`''' ) lowerCamelCase_ =cardinality else: lowerCamelCase_ =[0] # set embedding_dimension if embedding_dimension and num_static_categorical_features > 0: if len(lowerCAmelCase ) != num_static_categorical_features: raise ValueError( '''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' ) lowerCamelCase_ =embedding_dimension else: lowerCamelCase_ =[min(50, (cat + 1) // 2 ) for cat in self.cardinality] lowerCamelCase_ =num_parallel_samples # Transformer architecture configuration lowerCamelCase_ =input_size * len(self.lags_sequence ) + self._number_of_features lowerCamelCase_ =d_model lowerCamelCase_ =encoder_attention_heads lowerCamelCase_ =decoder_attention_heads lowerCamelCase_ =encoder_ffn_dim lowerCamelCase_ =decoder_ffn_dim lowerCamelCase_ =encoder_layers lowerCamelCase_ =decoder_layers lowerCamelCase_ =dropout lowerCamelCase_ =attention_dropout lowerCamelCase_ =activation_dropout lowerCamelCase_ =encoder_layerdrop lowerCamelCase_ =decoder_layerdrop lowerCamelCase_ =activation_function lowerCamelCase_ =init_std lowerCamelCase_ =use_cache # Informer lowerCamelCase_ =attention_type lowerCamelCase_ =sampling_factor lowerCamelCase_ =distil super().__init__(is_encoder_decoder=lowerCAmelCase, **lowerCAmelCase ) @property def lowercase__ ( self ): """simple docstring""" return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
676
1
'''simple docstring''' import inspect import unittest from transformers import ViTMSNConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMSNForImageClassification, ViTMSNModel from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __UpperCamelCase : def __init__( self, lowerCAmelCase, lowerCAmelCase=13, lowerCAmelCase=30, lowerCAmelCase=2, lowerCAmelCase=3, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=32, lowerCAmelCase=5, lowerCAmelCase=4, lowerCAmelCase=37, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=10, lowerCAmelCase=0.0_2, lowerCAmelCase=None, ): """simple docstring""" lowerCamelCase_ =parent lowerCamelCase_ =batch_size lowerCamelCase_ =image_size lowerCamelCase_ =patch_size lowerCamelCase_ =num_channels lowerCamelCase_ =is_training lowerCamelCase_ =use_labels lowerCamelCase_ =hidden_size lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =intermediate_size lowerCamelCase_ =hidden_act lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =attention_probs_dropout_prob lowerCamelCase_ =type_sequence_label_size lowerCamelCase_ =initializer_range lowerCamelCase_ =scope # in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) lowerCamelCase_ =(image_size // patch_size) ** 2 lowerCamelCase_ =num_patches + 1 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase_ =None if self.use_labels: lowerCamelCase_ =ids_tensor([self.batch_size], self.type_sequence_label_size ) lowerCamelCase_ =self.get_config() return config, pixel_values, labels def lowercase__ ( self ): """simple docstring""" return ViTMSNConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, initializer_range=self.initializer_range, ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =ViTMSNModel(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.type_sequence_label_size lowerCamelCase_ =ViTMSNForImageClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase, labels=lowerCAmelCase ) print('''Pixel and labels shape: {pixel_values.shape}, {labels.shape}''' ) print('''Labels: {labels}''' ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowerCamelCase_ =1 lowerCamelCase_ =ViTMSNForImageClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase_ =model(lowerCAmelCase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.prepare_config_and_inputs() lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =config_and_inputs lowerCamelCase_ ={'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase : Union[str, Any] =(ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else () lowercase : int =( {'feature-extraction': ViTMSNModel, 'image-classification': ViTMSNForImageClassification} if is_torch_available() else {} ) lowercase : Tuple =False lowercase : str =False lowercase : Any =False lowercase : List[str] =False def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =ViTMSNModelTester(self ) lowerCamelCase_ =ConfigTester(self, config_class=lowerCAmelCase, has_text_modality=lowerCAmelCase, hidden_size=37 ) def lowercase__ ( self ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='''ViTMSN does not use inputs_embeds''' ) def lowercase__ ( self ): """simple docstring""" pass def lowercase__ ( self ): """simple docstring""" lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ =model_class(lowerCAmelCase ) self.assertIsInstance(model.get_input_embeddings(), (nn.Module) ) lowerCamelCase_ =model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCAmelCase, nn.Linear ) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ =model_class(lowerCAmelCase ) lowerCamelCase_ =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase_ =[*signature.parameters.keys()] lowerCamelCase_ =['''pixel_values'''] self.assertListEqual(arg_names[:1], lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase ) @slow def lowercase__ ( self ): """simple docstring""" for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ =ViTMSNModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) def a_ ( ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __UpperCamelCase ( unittest.TestCase ): @cached_property def lowercase__ ( self ): """simple docstring""" return ViTImageProcessor.from_pretrained('''facebook/vit-msn-small''' ) if is_vision_available() else None @slow def lowercase__ ( self ): """simple docstring""" torch.manual_seed(2 ) lowerCamelCase_ =ViTMSNForImageClassification.from_pretrained('''facebook/vit-msn-small''' ).to(lowerCAmelCase ) lowerCamelCase_ =self.default_image_processor lowerCamelCase_ =prepare_img() lowerCamelCase_ =image_processor(images=lowerCAmelCase, return_tensors='''pt''' ).to(lowerCAmelCase ) # forward pass with torch.no_grad(): lowerCamelCase_ =model(**lowerCAmelCase ) # verify the logits lowerCamelCase_ =torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape, lowerCAmelCase ) lowerCamelCase_ =torch.tensor([-0.0_8_0_3, -0.4_4_5_4, -0.2_3_7_5] ).to(lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCAmelCase, atol=1e-4 ) )
676
'''simple docstring''' from __future__ import annotations def a_ ( __snake_case : int ) -> list[int]: """simple docstring""" lowerCamelCase_ =[True] * limit lowerCamelCase_ =False lowerCamelCase_ =False lowerCamelCase_ =True for i in range(3 , int(limit**0.5 + 1 ) , 2 ): lowerCamelCase_ =i * 2 while index < limit: lowerCamelCase_ =False lowerCamelCase_ =index + i lowerCamelCase_ =[2] for i in range(3 , __snake_case , 2 ): if is_prime[i]: primes.append(__snake_case ) return primes def a_ ( __snake_case : int = 100_0000 ) -> int: """simple docstring""" lowerCamelCase_ =prime_sieve(__snake_case ) lowerCamelCase_ =0 lowerCamelCase_ =0 for i in range(len(__snake_case ) ): for j in range(i + length , len(__snake_case ) ): lowerCamelCase_ =sum(primes[i:j] ) if sol >= ceiling: break if sol in primes: lowerCamelCase_ =j - i lowerCamelCase_ =sol return largest if __name__ == "__main__": print(F"""{solution() = }""")
676
1
'''simple docstring''' import numpy as np def a_ ( __snake_case : np.ndarray , __snake_case : np.ndarray , __snake_case : float = 1e-12 , __snake_case : int = 100 , ) -> tuple[float, np.ndarray]: """simple docstring""" assert np.shape(__snake_case )[0] == np.shape(__snake_case )[1] # Ensure proper dimensionality. assert np.shape(__snake_case )[0] == np.shape(__snake_case )[0] # Ensure inputs are either both complex or both real assert np.iscomplexobj(__snake_case ) == np.iscomplexobj(__snake_case ) lowerCamelCase_ =np.iscomplexobj(__snake_case ) if is_complex: # Ensure complex input_matrix is Hermitian assert np.array_equal(__snake_case , input_matrix.conj().T ) # Set convergence to False. Will define convergence when we exceed max_iterations # or when we have small changes from one iteration to next. lowerCamelCase_ =False lowerCamelCase_ =0 lowerCamelCase_ =0 lowerCamelCase_ =1e12 while not convergence: # Multiple matrix by the vector. lowerCamelCase_ =np.dot(__snake_case , __snake_case ) # Normalize the resulting output vector. lowerCamelCase_ =w / np.linalg.norm(__snake_case ) # Find rayleigh quotient # (faster than usual b/c we know vector is normalized already) lowerCamelCase_ =vector.conj().T if is_complex else vector.T lowerCamelCase_ =np.dot(__snake_case , np.dot(__snake_case , __snake_case ) ) # Check convergence. lowerCamelCase_ =np.abs(lambda_ - lambda_previous ) / lambda_ iterations += 1 if error <= error_tol or iterations >= max_iterations: lowerCamelCase_ =True lowerCamelCase_ =lambda_ if is_complex: lowerCamelCase_ =np.real(lambda_ ) return lambda_, vector def a_ ( ) -> None: """simple docstring""" lowerCamelCase_ =np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] ) lowerCamelCase_ =np.array([41, 4, 20] ) lowerCamelCase_ =real_input_matrix.astype(np.complexaaa ) lowerCamelCase_ =np.triu(1j * complex_input_matrix , 1 ) complex_input_matrix += imag_matrix complex_input_matrix += -1 * imag_matrix.T lowerCamelCase_ =np.array([41, 4, 20] ).astype(np.complexaaa ) for problem_type in ["real", "complex"]: if problem_type == "real": lowerCamelCase_ =real_input_matrix lowerCamelCase_ =real_vector elif problem_type == "complex": lowerCamelCase_ =complex_input_matrix lowerCamelCase_ =complex_vector # Our implementation. lowerCamelCase_, lowerCamelCase_ =power_iteration(__snake_case , __snake_case ) # Numpy implementation. # Get eigenvalues and eigenvectors using built-in numpy # eigh (eigh used for symmetric or hermetian matrices). lowerCamelCase_, lowerCamelCase_ =np.linalg.eigh(__snake_case ) # Last eigenvalue is the maximum one. lowerCamelCase_ =eigen_values[-1] # Last column in this matrix is eigenvector corresponding to largest eigenvalue. lowerCamelCase_ =eigen_vectors[:, -1] # Check our implementation and numpy gives close answers. assert np.abs(eigen_value - eigen_value_max ) <= 1e-6 # Take absolute values element wise of each eigenvector. # as they are only unique to a minus sign. assert np.linalg.norm(np.abs(__snake_case ) - np.abs(__snake_case ) ) <= 1e-6 if __name__ == "__main__": import doctest doctest.testmod() test_power_iteration()
676
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" super().__init__() # make sure scheduler can always be converted to DDIM lowerCamelCase_ =DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=lowerCAmelCase, scheduler=lowerCAmelCase ) @torch.no_grad() def __call__( self, lowerCAmelCase = 1, lowerCAmelCase = None, lowerCAmelCase = 0.0, lowerCAmelCase = 50, lowerCAmelCase = None, lowerCAmelCase = "pil", lowerCAmelCase = True, ): """simple docstring""" if isinstance(self.unet.config.sample_size, lowerCAmelCase ): lowerCamelCase_ =( batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size, ) else: lowerCamelCase_ =(batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) if isinstance(lowerCAmelCase, lowerCAmelCase ) and len(lowerCAmelCase ) != batch_size: raise ValueError( f'''You have passed a list of generators of length {len(lowerCAmelCase )}, but requested an effective batch''' f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) lowerCamelCase_ =randn_tensor(lowerCAmelCase, generator=lowerCAmelCase, device=self.device, dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(lowerCAmelCase ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output lowerCamelCase_ =self.unet(lowerCAmelCase, lowerCAmelCase ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 lowerCamelCase_ =self.scheduler.step( lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, eta=lowerCAmelCase, use_clipped_model_output=lowerCAmelCase, generator=lowerCAmelCase ).prev_sample lowerCamelCase_ =(image / 2 + 0.5).clamp(0, 1 ) lowerCamelCase_ =image.cpu().permute(0, 2, 3, 1 ).numpy() if output_type == "pil": lowerCamelCase_ =self.numpy_to_pil(lowerCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowerCAmelCase )
676
1
'''simple docstring''' import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available from transformers.models.gpta.tokenization_gpta import GPTaTokenizer from transformers.testing_utils import require_keras_nlp, require_tf, slow if is_tf_available(): import tensorflow as tf if is_keras_nlp_available(): from transformers.models.gpta import TFGPTaTokenizer a_ : Optional[Any] = ["""gpt2"""] a_ : str = """gpt2""" if is_tf_available(): class __UpperCamelCase ( tf.Module ): def __init__( self, lowerCAmelCase ): """simple docstring""" super().__init__() lowerCamelCase_ =tokenizer lowerCamelCase_ =AutoConfig.from_pretrained(lowerCAmelCase ) lowerCamelCase_ =TFGPTaLMHeadModel.from_config(lowerCAmelCase ) @tf.function(input_signature=(tf.TensorSpec((None,), tf.string, name='''text''' ),) ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.tokenizer(lowerCAmelCase ) lowerCamelCase_ =tokenized['''input_ids'''].to_tensor() lowerCamelCase_ =tf.cast(input_ids_dense > 0, tf.intaa ) # input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN]) lowerCamelCase_ =self.model(input_ids=lowerCAmelCase, attention_mask=lowerCAmelCase )['''logits'''] return outputs @require_tf @require_keras_nlp class __UpperCamelCase ( unittest.TestCase ): def lowercase__ ( self ): """simple docstring""" super().setUp() lowerCamelCase_ =[GPTaTokenizer.from_pretrained(lowerCAmelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS)] lowerCamelCase_ =[TFGPTaTokenizer.from_pretrained(lowerCAmelCase ) for checkpoint in TOKENIZER_CHECKPOINTS] assert len(self.tokenizers ) == len(self.tf_tokenizers ) lowerCamelCase_ =[ '''This is a straightforward English test sentence.''', '''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''', '''Now we\'re going to add some Chinese: 一 二 三 一二三''', '''And some much more rare Chinese: 齉 堃 齉堃''', '''Je vais aussi écrire en français pour tester les accents''', '''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''', ] lowerCamelCase_ =list(zip(self.test_sentences, self.test_sentences[::-1] ) ) def lowercase__ ( self ): """simple docstring""" for tokenizer, tf_tokenizer in zip(self.tokenizers, self.tf_tokenizers ): for test_inputs in self.test_sentences: lowerCamelCase_ =tokenizer([test_inputs], return_tensors='''tf''' ) lowerCamelCase_ =tf_tokenizer([test_inputs] ) for key in python_outputs.keys(): # convert them to numpy to avoid messing with ragged tensors lowerCamelCase_ =python_outputs[key].numpy() lowerCamelCase_ =tf_outputs[key].numpy() self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) ) self.assertTrue(tf.reduce_all(tf.cast(lowerCAmelCase, tf.intaa ) == tf_outputs_values ) ) @slow def lowercase__ ( self ): """simple docstring""" for tf_tokenizer in self.tf_tokenizers: lowerCamelCase_ =tf.function(lowerCAmelCase ) for test_inputs in self.test_sentences: lowerCamelCase_ =tf.constant(lowerCAmelCase ) lowerCamelCase_ =compiled_tokenizer(lowerCAmelCase ) lowerCamelCase_ =tf_tokenizer(lowerCAmelCase ) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) ) @slow def lowercase__ ( self ): """simple docstring""" for tf_tokenizer in self.tf_tokenizers: lowerCamelCase_ =ModelToSave(tokenizer=lowerCAmelCase ) lowerCamelCase_ =tf.convert_to_tensor([self.test_sentences[0]] ) lowerCamelCase_ =model.serving(lowerCAmelCase ) # Build model with some sample inputs with TemporaryDirectory() as tempdir: lowerCamelCase_ =Path(lowerCAmelCase ) / '''saved.model''' tf.saved_model.save(lowerCAmelCase, lowerCAmelCase, signatures={'''serving_default''': model.serving} ) lowerCamelCase_ =tf.saved_model.load(lowerCAmelCase ) lowerCamelCase_ =loaded_model.signatures['''serving_default'''](lowerCAmelCase )['''output_0'''] # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertTrue(tf.reduce_all(out == loaded_output ) ) @slow def lowercase__ ( self ): """simple docstring""" for tf_tokenizer in self.tf_tokenizers: lowerCamelCase_ =tf.convert_to_tensor([self.test_sentences[0]] ) lowerCamelCase_ =tf_tokenizer(lowerCAmelCase ) # Build model with some sample inputs lowerCamelCase_ =tf_tokenizer.get_config() lowerCamelCase_ =TFGPTaTokenizer.from_config(lowerCAmelCase ) lowerCamelCase_ =model_from_config(lowerCAmelCase ) for key in from_config_output.keys(): self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) ) @slow def lowercase__ ( self ): """simple docstring""" for tf_tokenizer in self.tf_tokenizers: # for the test to run lowerCamelCase_ =123_123 for max_length in [3, 5, 1_024]: lowerCamelCase_ =tf.convert_to_tensor([self.test_sentences[0]] ) lowerCamelCase_ =tf_tokenizer(lowerCAmelCase, max_length=lowerCAmelCase ) lowerCamelCase_ =out['''input_ids'''].numpy().shape[1] assert out_length == max_length
676
'''simple docstring''' from maths.prime_check import is_prime def a_ ( __snake_case : int ) -> int: """simple docstring""" if not isinstance(__snake_case , __snake_case ): lowerCamelCase_ =F'''Input value of [number={number}] must be an integer''' raise TypeError(__snake_case ) if is_prime(__snake_case ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
676
1
'''simple docstring''' from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def a_ ( __snake_case : str ) -> None: """simple docstring""" lowerCamelCase_, lowerCamelCase_ =analyze_text(__snake_case ) lowerCamelCase_ =list(''' ''' + ascii_lowercase ) # what is our total sum of probabilities. lowerCamelCase_ =sum(single_char_strings.values() ) # one length string lowerCamelCase_ =0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: lowerCamelCase_ =single_char_strings[ch] lowerCamelCase_ =my_str / all_sum my_fir_sum += prob * math.loga(__snake_case ) # entropy formula. # print entropy print(F'''{round(-1 * my_fir_sum ):.1f}''' ) # two len string lowerCamelCase_ =sum(two_char_strings.values() ) lowerCamelCase_ =0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: lowerCamelCase_ =cha + cha if sequence in two_char_strings: lowerCamelCase_ =two_char_strings[sequence] lowerCamelCase_ =int(__snake_case ) / all_sum my_sec_sum += prob * math.loga(__snake_case ) # print second entropy print(F'''{round(-1 * my_sec_sum ):.1f}''' ) # print the difference between them print(F'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' ) def a_ ( __snake_case : str ) -> tuple[dict, dict]: """simple docstring""" lowerCamelCase_ =Counter() # type: ignore lowerCamelCase_ =Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0 , len(__snake_case ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def a_ ( ) -> Dict: """simple docstring""" import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
676
'''simple docstring''' # DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin, SchedulerOutput @dataclass class __UpperCamelCase ( lowerCamelCase__ ): lowercase : torch.FloatTensor lowercase : torch.FloatTensor class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): lowercase : Tuple =1 @register_to_config def __init__( self, lowerCAmelCase = 2_000, lowerCAmelCase = 0.1_5, lowerCAmelCase = 0.0_1, lowerCAmelCase = 1_3_4_8.0, lowerCAmelCase = 1e-5, lowerCAmelCase = 1, ): """simple docstring""" lowerCamelCase_ =sigma_max # setable values lowerCamelCase_ =None self.set_sigmas(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ): """simple docstring""" return sample def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None ): """simple docstring""" lowerCamelCase_ =sampling_eps if sampling_eps is not None else self.config.sampling_eps lowerCamelCase_ =torch.linspace(1, lowerCAmelCase, lowerCAmelCase, device=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None ): """simple docstring""" lowerCamelCase_ =sigma_min if sigma_min is not None else self.config.sigma_min lowerCamelCase_ =sigma_max if sigma_max is not None else self.config.sigma_max lowerCamelCase_ =sampling_eps if sampling_eps is not None else self.config.sampling_eps if self.timesteps is None: self.set_timesteps(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps) lowerCamelCase_ =torch.exp(torch.linspace(math.log(lowerCAmelCase ), math.log(lowerCAmelCase ), lowerCAmelCase ) ) lowerCamelCase_ =torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" return torch.where( timesteps == 0, torch.zeros_like(t.to(timesteps.device ) ), self.discrete_sigmas[timesteps - 1].to(timesteps.device ), ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = True, ): """simple docstring""" if self.timesteps is None: raise ValueError( '''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' ) lowerCamelCase_ =timestep * torch.ones( sample.shape[0], device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0]) lowerCamelCase_ =(timestep * (len(self.timesteps ) - 1)).long() # mps requires indices to be in the same device, so we use cpu as is the default with cuda lowerCamelCase_ =timesteps.to(self.discrete_sigmas.device ) lowerCamelCase_ =self.discrete_sigmas[timesteps].to(sample.device ) lowerCamelCase_ =self.get_adjacent_sigma(lowerCAmelCase, lowerCAmelCase ).to(sample.device ) lowerCamelCase_ =torch.zeros_like(lowerCAmelCase ) lowerCamelCase_ =(sigma**2 - adjacent_sigma**2) ** 0.5 # equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x) # also equation 47 shows the analog from SDE models to ancestral sampling methods lowerCamelCase_ =diffusion.flatten() while len(diffusion.shape ) < len(sample.shape ): lowerCamelCase_ =diffusion.unsqueeze(-1 ) lowerCamelCase_ =drift - diffusion**2 * model_output # equation 6: sample noise for the diffusion term of lowerCamelCase_ =randn_tensor( sample.shape, layout=sample.layout, generator=lowerCAmelCase, device=sample.device, dtype=sample.dtype ) lowerCamelCase_ =sample - drift # subtract because `dt` is a small negative timestep # TODO is the variable diffusion the correct scaling term for the noise? lowerCamelCase_ =prev_sample_mean + diffusion * noise # add impact of diffusion field g if not return_dict: return (prev_sample, prev_sample_mean) return SdeVeOutput(prev_sample=lowerCAmelCase, prev_sample_mean=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = True, ): """simple docstring""" if self.timesteps is None: raise ValueError( '''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' ) # For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z" # sample noise for correction lowerCamelCase_ =randn_tensor(sample.shape, layout=sample.layout, generator=lowerCAmelCase ).to(sample.device ) # compute step size from the model_output, the noise, and the snr lowerCamelCase_ =torch.norm(model_output.reshape(model_output.shape[0], -1 ), dim=-1 ).mean() lowerCamelCase_ =torch.norm(noise.reshape(noise.shape[0], -1 ), dim=-1 ).mean() lowerCamelCase_ =(self.config.snr * noise_norm / grad_norm) ** 2 * 2 lowerCamelCase_ =step_size * torch.ones(sample.shape[0] ).to(sample.device ) # self.repeat_scalar(step_size, sample.shape[0]) # compute corrected sample: model_output term and noise term lowerCamelCase_ =step_size.flatten() while len(step_size.shape ) < len(sample.shape ): lowerCamelCase_ =step_size.unsqueeze(-1 ) lowerCamelCase_ =sample + step_size * model_output lowerCamelCase_ =prev_sample_mean + ((step_size * 2) ** 0.5) * noise if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =timesteps.to(original_samples.device ) lowerCamelCase_ =self.discrete_sigmas.to(original_samples.device )[timesteps] lowerCamelCase_ =( noise * sigmas[:, None, None, None] if noise is not None else torch.randn_like(lowerCAmelCase ) * sigmas[:, None, None, None] ) lowerCamelCase_ =noise + original_samples return noisy_samples def __len__( self ): """simple docstring""" return self.config.num_train_timesteps
676
1
'''simple docstring''' import copy import unittest from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_MULTIPLE_CHOICE_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, ) from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class __UpperCamelCase : def __init__( self, lowerCAmelCase, lowerCAmelCase=2, lowerCAmelCase=3, lowerCAmelCase=4, lowerCAmelCase=2, lowerCAmelCase=7, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=99, lowerCAmelCase=36, lowerCAmelCase=3, lowerCAmelCase=4, lowerCAmelCase=37, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=16, lowerCAmelCase=2, lowerCAmelCase=0.0_2, lowerCAmelCase=6, lowerCAmelCase=6, lowerCAmelCase=3, lowerCAmelCase=4, lowerCAmelCase=None, lowerCAmelCase=1_000, ): """simple docstring""" lowerCamelCase_ =parent lowerCamelCase_ =batch_size lowerCamelCase_ =num_channels lowerCamelCase_ =image_size lowerCamelCase_ =patch_size lowerCamelCase_ =text_seq_length lowerCamelCase_ =is_training lowerCamelCase_ =use_input_mask lowerCamelCase_ =use_token_type_ids lowerCamelCase_ =use_labels lowerCamelCase_ =vocab_size lowerCamelCase_ =hidden_size lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =intermediate_size lowerCamelCase_ =hidden_act lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =attention_probs_dropout_prob lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =type_vocab_size lowerCamelCase_ =type_sequence_label_size lowerCamelCase_ =initializer_range lowerCamelCase_ =coordinate_size lowerCamelCase_ =shape_size lowerCamelCase_ =num_labels lowerCamelCase_ =num_choices lowerCamelCase_ =scope lowerCamelCase_ =range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) lowerCamelCase_ =text_seq_length lowerCamelCase_ =(image_size // patch_size) ** 2 + 1 lowerCamelCase_ =self.text_seq_length + self.image_seq_length def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =ids_tensor([self.batch_size, self.text_seq_length], self.vocab_size ) lowerCamelCase_ =ids_tensor([self.batch_size, self.text_seq_length, 4], self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: lowerCamelCase_ =bbox[i, j, 3] lowerCamelCase_ =bbox[i, j, 1] lowerCamelCase_ =t if bbox[i, j, 2] < bbox[i, j, 0]: lowerCamelCase_ =bbox[i, j, 2] lowerCamelCase_ =bbox[i, j, 0] lowerCamelCase_ =t lowerCamelCase_ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase_ =None if self.use_input_mask: lowerCamelCase_ =random_attention_mask([self.batch_size, self.text_seq_length] ) lowerCamelCase_ =None if self.use_token_type_ids: lowerCamelCase_ =ids_tensor([self.batch_size, self.text_seq_length], self.type_vocab_size ) lowerCamelCase_ =None lowerCamelCase_ =None if self.use_labels: lowerCamelCase_ =ids_tensor([self.batch_size], self.type_sequence_label_size ) lowerCamelCase_ =ids_tensor([self.batch_size, self.text_seq_length], self.num_labels ) lowerCamelCase_ =LayoutLMvaConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, coordinate_size=self.coordinate_size, shape_size=self.shape_size, input_size=self.image_size, patch_size=self.patch_size, ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =LayoutLMvaModel(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() # text + image lowerCamelCase_ =model(lowerCAmelCase, pixel_values=lowerCAmelCase ) lowerCamelCase_ =model( lowerCAmelCase, bbox=lowerCAmelCase, pixel_values=lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase ) lowerCamelCase_ =model(lowerCAmelCase, bbox=lowerCAmelCase, pixel_values=lowerCAmelCase, token_type_ids=lowerCAmelCase ) lowerCamelCase_ =model(lowerCAmelCase, bbox=lowerCAmelCase, pixel_values=lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) # text only lowerCamelCase_ =model(lowerCAmelCase ) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only lowerCamelCase_ =model(pixel_values=lowerCAmelCase ) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.image_seq_length, self.hidden_size) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.num_labels lowerCamelCase_ =LayoutLMvaForSequenceClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model( lowerCAmelCase, bbox=lowerCAmelCase, pixel_values=lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.num_labels lowerCamelCase_ =LayoutLMvaForTokenClassification(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model( lowerCAmelCase, bbox=lowerCAmelCase, pixel_values=lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.text_seq_length, self.num_labels) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =LayoutLMvaForQuestionAnswering(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model( lowerCAmelCase, bbox=lowerCAmelCase, pixel_values=lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.prepare_config_and_inputs() ( ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ) =config_and_inputs lowerCamelCase_ ={ '''input_ids''': input_ids, '''bbox''': bbox, '''pixel_values''': pixel_values, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase : Tuple =False lowercase : Any =False lowercase : List[str] =False lowercase : Any =( ( LayoutLMvaModel, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaForQuestionAnswering, ) if is_torch_available() else () ) lowercase : Any =( {'document-question-answering': LayoutLMvaForQuestionAnswering, 'feature-extraction': LayoutLMvaModel} if is_torch_available() else {} ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" return True def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =LayoutLMvaModelTester(self ) lowerCamelCase_ =ConfigTester(self, config_class=lowerCAmelCase, hidden_size=37 ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=False ): """simple docstring""" lowerCamelCase_ =copy.deepcopy(lowerCAmelCase ) if model_class in get_values(lowerCAmelCase ): lowerCamelCase_ ={ k: v.unsqueeze(1 ).expand(-1, self.model_tester.num_choices, -1 ).contiguous() if isinstance(lowerCAmelCase, torch.Tensor ) and v.ndim > 1 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(lowerCAmelCase ): lowerCamelCase_ =torch.ones(self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase ) elif model_class in get_values(lowerCAmelCase ): lowerCamelCase_ =torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase ) lowerCamelCase_ =torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase ) elif model_class in [ *get_values(lowerCAmelCase ), ]: lowerCamelCase_ =torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase ) elif model_class in [ *get_values(lowerCAmelCase ), ]: lowerCamelCase_ =torch.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length), dtype=torch.long, device=lowerCAmelCase, ) return inputs_dict def lowercase__ ( self ): """simple docstring""" self.config_tester.run_common_tests() def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowerCamelCase_ =type self.model_tester.create_and_check_model(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase ) @slow def lowercase__ ( self ): """simple docstring""" for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ =LayoutLMvaModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) def a_ ( ) -> Dict: """simple docstring""" lowerCamelCase_ =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch class __UpperCamelCase ( unittest.TestCase ): @cached_property def lowercase__ ( self ): """simple docstring""" return LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase ) if is_vision_available() else None @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =LayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ).to(lowerCAmelCase ) lowerCamelCase_ =self.default_image_processor lowerCamelCase_ =prepare_img() lowerCamelCase_ =image_processor(images=lowerCAmelCase, return_tensors='''pt''' ).pixel_values.to(lowerCAmelCase ) lowerCamelCase_ =torch.tensor([[1, 2]] ) lowerCamelCase_ =torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 ) # forward pass lowerCamelCase_ =model( input_ids=input_ids.to(lowerCAmelCase ), bbox=bbox.to(lowerCAmelCase ), pixel_values=pixel_values.to(lowerCAmelCase ), ) # verify the logits lowerCamelCase_ =torch.Size((1, 199, 768) ) self.assertEqual(outputs.last_hidden_state.shape, lowerCAmelCase ) lowerCamelCase_ =torch.tensor( [[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], lowerCAmelCase, atol=1e-4 ) )
676
'''simple docstring''' def a_ ( __snake_case : int , __snake_case : int ) -> str: """simple docstring""" if not isinstance(__snake_case , __snake_case ): raise ValueError('''iterations must be defined as integers''' ) if not isinstance(__snake_case , __snake_case ) or not number >= 1: raise ValueError( '''starting number must be and integer and be more than 0''' ) if not iterations >= 1: raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' ) lowerCamelCase_ ='''''' while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(__snake_case ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
676
1
'''simple docstring''' import unittest from transformers.testing_utils import CaptureStdout from transformers.tools.python_interpreter import evaluate def a_ ( __snake_case : Tuple ) -> int: """simple docstring""" return x + 2 class __UpperCamelCase ( unittest.TestCase ): def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''x = 3''' lowerCamelCase_ ={} lowerCamelCase_ =evaluate(lowerCAmelCase, {}, state=lowerCAmelCase ) assert result == 3 self.assertDictEqual(lowerCAmelCase, {'''x''': 3} ) lowerCamelCase_ ='''x = y''' lowerCamelCase_ ={'''y''': 5} lowerCamelCase_ =evaluate(lowerCAmelCase, {}, state=lowerCAmelCase ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(lowerCAmelCase, {'''x''': 5, '''y''': 5} ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''y = add_two(x)''' lowerCamelCase_ ={'''x''': 3} lowerCamelCase_ =evaluate(lowerCAmelCase, {'''add_two''': add_two}, state=lowerCAmelCase ) assert result == 5 self.assertDictEqual(lowerCAmelCase, {'''x''': 3, '''y''': 5} ) # Won't work without the tool with CaptureStdout() as out: lowerCamelCase_ =evaluate(lowerCAmelCase, {}, state=lowerCAmelCase ) assert result is None assert "tried to execute add_two" in out.out def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''x = 3''' lowerCamelCase_ ={} lowerCamelCase_ =evaluate(lowerCAmelCase, {}, state=lowerCAmelCase ) assert result == 3 self.assertDictEqual(lowerCAmelCase, {'''x''': 3} ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''test_dict = {\'x\': x, \'y\': add_two(x)}''' lowerCamelCase_ ={'''x''': 3} lowerCamelCase_ =evaluate(lowerCAmelCase, {'''add_two''': add_two}, state=lowerCAmelCase ) self.assertDictEqual(lowerCAmelCase, {'''x''': 3, '''y''': 5} ) self.assertDictEqual(lowerCAmelCase, {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''x = 3\ny = 5''' lowerCamelCase_ ={} lowerCamelCase_ =evaluate(lowerCAmelCase, {}, state=lowerCAmelCase ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(lowerCAmelCase, {'''x''': 3, '''y''': 5} ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''text = f\'This is x: {x}.\'''' lowerCamelCase_ ={'''x''': 3} lowerCamelCase_ =evaluate(lowerCAmelCase, {}, state=lowerCAmelCase ) # evaluate returns the value of the last assignment. assert result == "This is x: 3." self.assertDictEqual(lowerCAmelCase, {'''x''': 3, '''text''': '''This is x: 3.'''} ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''if x <= 3:\n y = 2\nelse:\n y = 5''' lowerCamelCase_ ={'''x''': 3} lowerCamelCase_ =evaluate(lowerCAmelCase, {}, state=lowerCAmelCase ) # evaluate returns the value of the last assignment. assert result == 2 self.assertDictEqual(lowerCAmelCase, {'''x''': 3, '''y''': 2} ) lowerCamelCase_ ={'''x''': 8} lowerCamelCase_ =evaluate(lowerCAmelCase, {}, state=lowerCAmelCase ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(lowerCAmelCase, {'''x''': 8, '''y''': 5} ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''test_list = [x, add_two(x)]''' lowerCamelCase_ ={'''x''': 3} lowerCamelCase_ =evaluate(lowerCAmelCase, {'''add_two''': add_two}, state=lowerCAmelCase ) self.assertListEqual(lowerCAmelCase, [3, 5] ) self.assertDictEqual(lowerCAmelCase, {'''x''': 3, '''test_list''': [3, 5]} ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''y = x''' lowerCamelCase_ ={'''x''': 3} lowerCamelCase_ =evaluate(lowerCAmelCase, {}, state=lowerCAmelCase ) assert result == 3 self.assertDictEqual(lowerCAmelCase, {'''x''': 3, '''y''': 3} ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''test_list = [x, add_two(x)]\ntest_list[1]''' lowerCamelCase_ ={'''x''': 3} lowerCamelCase_ =evaluate(lowerCAmelCase, {'''add_two''': add_two}, state=lowerCAmelCase ) assert result == 5 self.assertDictEqual(lowerCAmelCase, {'''x''': 3, '''test_list''': [3, 5]} ) lowerCamelCase_ ='''test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']''' lowerCamelCase_ ={'''x''': 3} lowerCamelCase_ =evaluate(lowerCAmelCase, {'''add_two''': add_two}, state=lowerCAmelCase ) assert result == 5 self.assertDictEqual(lowerCAmelCase, {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''x = 0\nfor i in range(3):\n x = i''' lowerCamelCase_ ={} lowerCamelCase_ =evaluate(lowerCAmelCase, {'''range''': range}, state=lowerCAmelCase ) assert result == 2 self.assertDictEqual(lowerCAmelCase, {'''x''': 2, '''i''': 2} )
676
'''simple docstring''' from typing import List import numpy as np def a_ ( __snake_case : dict ) -> int: """simple docstring""" lowerCamelCase_ ={key: len(__snake_case ) for key, value in gen_kwargs.items() if isinstance(__snake_case , __snake_case )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( '''Sharding is ambiguous for this dataset: ''' + '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n''' + '''\n'''.join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() ) + '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, ''' + '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.''' ) ) lowerCamelCase_ =max(lists_lengths.values() , default=0 ) return max(1 , __snake_case ) def a_ ( __snake_case : int , __snake_case : int ) -> List[range]: """simple docstring""" lowerCamelCase_ =[] for group_idx in range(__snake_case ): lowerCamelCase_ =num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break lowerCamelCase_ =shards_indices_per_group[-1].stop if shards_indices_per_group else 0 lowerCamelCase_ =range(__snake_case , start + num_shards_to_add ) shards_indices_per_group.append(__snake_case ) return shards_indices_per_group def a_ ( __snake_case : dict , __snake_case : int ) -> List[dict]: """simple docstring""" lowerCamelCase_ =_number_of_shards_in_gen_kwargs(__snake_case ) if num_shards == 1: return [dict(__snake_case )] else: lowerCamelCase_ =_distribute_shards(num_shards=__snake_case , max_num_jobs=__snake_case ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(__snake_case , __snake_case ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(__snake_case ) ) ] def a_ ( __snake_case : List[dict] ) -> dict: """simple docstring""" return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , __snake_case ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def a_ ( __snake_case : np.random.Generator , __snake_case : dict ) -> dict: """simple docstring""" lowerCamelCase_ ={len(__snake_case ) for value in gen_kwargs.values() if isinstance(__snake_case , __snake_case )} lowerCamelCase_ ={} for size in list_sizes: lowerCamelCase_ =list(range(__snake_case ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes lowerCamelCase_ =dict(__snake_case ) for key, value in shuffled_kwargs.items(): if isinstance(__snake_case , __snake_case ): lowerCamelCase_ =[value[i] for i in indices_per_size[len(__snake_case )]] return shuffled_kwargs
676
1
'''simple docstring''' from ..utils import ( OptionalDependencyNotAvailable, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .scheduling_consistency_models import CMStochasticIterativeScheduler from .scheduling_ddim import DDIMScheduler from .scheduling_ddim_inverse import DDIMInverseScheduler from .scheduling_ddim_parallel import DDIMParallelScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_ddpm_parallel import DDPMParallelScheduler from .scheduling_deis_multistep import DEISMultistepScheduler from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler from .scheduling_euler_discrete import EulerDiscreteScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_repaint import RePaintScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_unclip import UnCLIPScheduler from .scheduling_unipc_multistep import UniPCMultistepScheduler from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin from .scheduling_vq_diffusion import VQDiffusionScheduler try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * # noqa F403 else: from .scheduling_ddim_flax import FlaxDDIMScheduler from .scheduling_ddpm_flax import FlaxDDPMScheduler from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler from .scheduling_pndm_flax import FlaxPNDMScheduler from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler from .scheduling_utils_flax import ( FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .scheduling_lms_discrete import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
676
'''simple docstring''' import argparse import logging import pickle import random import time import numpy as np from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO ) a_ : int = logging.getLogger(__name__) def a_ ( ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ =argparse.ArgumentParser( description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' ) parser.add_argument('''--file_path''' , type=__snake_case , default='''data/dump.txt''' , help='''The path to the data.''' ) parser.add_argument('''--tokenizer_type''' , type=__snake_case , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] ) parser.add_argument('''--tokenizer_name''' , type=__snake_case , default='''bert-base-uncased''' , help='''The tokenizer to use.''' ) parser.add_argument('''--dump_file''' , type=__snake_case , default='''data/dump''' , help='''The dump file prefix.''' ) lowerCamelCase_ =parser.parse_args() logger.info(F'''Loading Tokenizer ({args.tokenizer_name})''' ) if args.tokenizer_type == "bert": lowerCamelCase_ =BertTokenizer.from_pretrained(args.tokenizer_name ) lowerCamelCase_ =tokenizer.special_tokens_map['''cls_token'''] # `[CLS]` lowerCamelCase_ =tokenizer.special_tokens_map['''sep_token'''] # `[SEP]` elif args.tokenizer_type == "roberta": lowerCamelCase_ =RobertaTokenizer.from_pretrained(args.tokenizer_name ) lowerCamelCase_ =tokenizer.special_tokens_map['''cls_token'''] # `<s>` lowerCamelCase_ =tokenizer.special_tokens_map['''sep_token'''] # `</s>` elif args.tokenizer_type == "gpt2": lowerCamelCase_ =GPTaTokenizer.from_pretrained(args.tokenizer_name ) lowerCamelCase_ =tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>` lowerCamelCase_ =tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>` logger.info(F'''Loading text from {args.file_path}''' ) with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp: lowerCamelCase_ =fp.readlines() logger.info('''Start encoding''' ) logger.info(F'''{len(__snake_case )} examples to process.''' ) lowerCamelCase_ =[] lowerCamelCase_ =0 lowerCamelCase_ =1_0000 lowerCamelCase_ =time.time() for text in data: lowerCamelCase_ =F'''{bos} {text.strip()} {sep}''' lowerCamelCase_ =tokenizer.encode(__snake_case , add_special_tokens=__snake_case ) rslt.append(__snake_case ) iter += 1 if iter % interval == 0: lowerCamelCase_ =time.time() logger.info(F'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' ) lowerCamelCase_ =time.time() logger.info('''Finished binarization''' ) logger.info(F'''{len(__snake_case )} examples processed.''' ) lowerCamelCase_ =F'''{args.dump_file}.{args.tokenizer_name}.pickle''' lowerCamelCase_ =tokenizer.vocab_size if vocab_size < (1 << 16): lowerCamelCase_ =[np.uintaa(__snake_case ) for d in rslt] else: lowerCamelCase_ =[np.intaa(__snake_case ) for d in rslt] random.shuffle(rslt_ ) logger.info(F'''Dump to {dp_file}''' ) with open(__snake_case , '''wb''' ) as handle: pickle.dump(rslt_ , __snake_case , protocol=pickle.HIGHEST_PROTOCOL ) if __name__ == "__main__": main()
676
1
'''simple docstring''' import argparse import requests import torch from PIL import Image from torchvision.transforms import Compose, Normalize, Resize, ToTensor from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor def a_ ( __snake_case : str ) -> int: """simple docstring""" lowerCamelCase_ =SwinaSRConfig() if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: lowerCamelCase_ =4 elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: lowerCamelCase_ =4 lowerCamelCase_ =48 lowerCamelCase_ ='''pixelshuffle_aux''' elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: lowerCamelCase_ =[6, 6, 6, 6] lowerCamelCase_ =60 lowerCamelCase_ =[6, 6, 6, 6] lowerCamelCase_ ='''pixelshuffledirect''' elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: lowerCamelCase_ =4 lowerCamelCase_ ='''nearest+conv''' elif "Swin2SR_Jpeg_dynamic" in checkpoint_url: lowerCamelCase_ =1 lowerCamelCase_ =1 lowerCamelCase_ =126 lowerCamelCase_ =7 lowerCamelCase_ =2_5_5.0 lowerCamelCase_ ='''''' return config def a_ ( __snake_case : Union[str, Any] , __snake_case : List[Any] ) -> List[Any]: """simple docstring""" if "patch_embed.proj" in name and "layers" not in name: lowerCamelCase_ =name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "patch_embed.norm" in name: lowerCamelCase_ =name.replace('''patch_embed.norm''' , '''embeddings.patch_embeddings.layernorm''' ) if "layers" in name: lowerCamelCase_ =name.replace('''layers''' , '''encoder.stages''' ) if "residual_group.blocks" in name: lowerCamelCase_ =name.replace('''residual_group.blocks''' , '''layers''' ) if "attn.proj" in name: lowerCamelCase_ =name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: lowerCamelCase_ =name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: lowerCamelCase_ =name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: lowerCamelCase_ =name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: lowerCamelCase_ =name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: lowerCamelCase_ =name.replace('''mlp.fc2''' , '''output.dense''' ) if "q_bias" in name: lowerCamelCase_ =name.replace('''q_bias''' , '''query.bias''' ) if "k_bias" in name: lowerCamelCase_ =name.replace('''k_bias''' , '''key.bias''' ) if "v_bias" in name: lowerCamelCase_ =name.replace('''v_bias''' , '''value.bias''' ) if "cpb_mlp" in name: lowerCamelCase_ =name.replace('''cpb_mlp''' , '''continuous_position_bias_mlp''' ) if "patch_embed.proj" in name: lowerCamelCase_ =name.replace('''patch_embed.proj''' , '''patch_embed.projection''' ) if name == "norm.weight": lowerCamelCase_ ='''layernorm.weight''' if name == "norm.bias": lowerCamelCase_ ='''layernorm.bias''' if "conv_first" in name: lowerCamelCase_ =name.replace('''conv_first''' , '''first_convolution''' ) if ( "upsample" in name or "conv_before_upsample" in name or "conv_bicubic" in name or "conv_up" in name or "conv_hr" in name or "conv_last" in name or "aux" in name ): # heads if "conv_last" in name: lowerCamelCase_ =name.replace('''conv_last''' , '''final_convolution''' ) if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]: if "conv_before_upsample.0" in name: lowerCamelCase_ =name.replace('''conv_before_upsample.0''' , '''conv_before_upsample''' ) if "upsample.0" in name: lowerCamelCase_ =name.replace('''upsample.0''' , '''upsample.convolution_0''' ) if "upsample.2" in name: lowerCamelCase_ =name.replace('''upsample.2''' , '''upsample.convolution_1''' ) lowerCamelCase_ ='''upsample.''' + name elif config.upsampler == "pixelshuffledirect": lowerCamelCase_ =name.replace('''upsample.0.weight''' , '''upsample.conv.weight''' ) lowerCamelCase_ =name.replace('''upsample.0.bias''' , '''upsample.conv.bias''' ) else: pass else: lowerCamelCase_ ='''swin2sr.''' + name return name def a_ ( __snake_case : Tuple , __snake_case : List[str] ) -> Union[str, Any]: """simple docstring""" for key in orig_state_dict.copy().keys(): lowerCamelCase_ =orig_state_dict.pop(__snake_case ) if "qkv" in key: lowerCamelCase_ =key.split('''.''' ) lowerCamelCase_ =int(key_split[1] ) lowerCamelCase_ =int(key_split[4] ) lowerCamelCase_ =config.embed_dim if "weight" in key: lowerCamelCase_ =val[:dim, :] lowerCamelCase_ =val[dim : dim * 2, :] lowerCamelCase_ =val[-dim:, :] else: lowerCamelCase_ =val[:dim] lowerCamelCase_ =val[dim : dim * 2] lowerCamelCase_ =val[-dim:] pass else: lowerCamelCase_ =val return orig_state_dict def a_ ( __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : int ) -> List[str]: """simple docstring""" lowerCamelCase_ =get_config(__snake_case ) lowerCamelCase_ =SwinaSRForImageSuperResolution(__snake_case ) model.eval() lowerCamelCase_ =torch.hub.load_state_dict_from_url(__snake_case , map_location='''cpu''' ) lowerCamelCase_ =convert_state_dict(__snake_case , __snake_case ) lowerCamelCase_, lowerCamelCase_ =model.load_state_dict(__snake_case , strict=__snake_case ) if len(__snake_case ) > 0: raise ValueError('''Missing keys when converting: {}'''.format(__snake_case ) ) for key in unexpected_keys: if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key): raise ValueError(F'''Unexpected key {key} in state_dict''' ) # verify values lowerCamelCase_ ='''https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true''' lowerCamelCase_ =Image.open(requests.get(__snake_case , stream=__snake_case ).raw ).convert('''RGB''' ) lowerCamelCase_ =SwinaSRImageProcessor() # pixel_values = processor(image, return_tensors="pt").pixel_values lowerCamelCase_ =126 if '''Jpeg''' in checkpoint_url else 256 lowerCamelCase_ =Compose( [ Resize((image_size, image_size) ), ToTensor(), Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ), ] ) lowerCamelCase_ =transforms(__snake_case ).unsqueeze(0 ) if config.num_channels == 1: lowerCamelCase_ =pixel_values[:, 0, :, :].unsqueeze(1 ) lowerCamelCase_ =model(__snake_case ) # assert values if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url: lowerCamelCase_ =torch.Size([1, 3, 512, 512] ) lowerCamelCase_ =torch.tensor( [[-0.7_0_8_7, -0.7_1_3_8, -0.6_7_2_1], [-0.8_3_4_0, -0.8_0_9_5, -0.7_2_9_8], [-0.9_1_4_9, -0.8_4_1_4, -0.7_9_4_0]] ) elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: lowerCamelCase_ =torch.Size([1, 3, 1024, 1024] ) lowerCamelCase_ =torch.tensor( [[-0.7_7_7_5, -0.8_1_0_5, -0.8_9_3_3], [-0.7_7_6_4, -0.8_3_5_6, -0.9_2_2_5], [-0.7_9_7_6, -0.8_6_8_6, -0.9_5_7_9]] ) elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: # TODO values didn't match exactly here lowerCamelCase_ =torch.Size([1, 3, 1024, 1024] ) lowerCamelCase_ =torch.tensor( [[-0.8_0_3_5, -0.7_5_0_4, -0.7_4_9_1], [-0.8_5_3_8, -0.8_1_2_4, -0.7_7_8_2], [-0.8_8_0_4, -0.8_6_5_1, -0.8_4_9_3]] ) elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: lowerCamelCase_ =torch.Size([1, 3, 512, 512] ) lowerCamelCase_ =torch.tensor( [[-0.7_6_6_9, -0.8_6_6_2, -0.8_7_6_7], [-0.8_8_1_0, -0.9_9_6_2, -0.9_8_2_0], [-0.9_3_4_0, -1.0_3_2_2, -1.1_1_4_9]] ) elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: lowerCamelCase_ =torch.Size([1, 3, 1024, 1024] ) lowerCamelCase_ =torch.tensor( [[-0.5_2_3_8, -0.5_5_5_7, -0.6_3_2_1], [-0.6_0_1_6, -0.5_9_0_3, -0.6_3_9_1], [-0.6_2_4_4, -0.6_3_3_4, -0.6_8_8_9]] ) assert ( outputs.reconstruction.shape == expected_shape ), F'''Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}''' assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , __snake_case , atol=1e-3 ) print('''Looks ok!''' ) lowerCamelCase_ ={ '''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''': ( '''swin2SR-classical-sr-x2-64''' ), '''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth''': ( '''swin2SR-classical-sr-x4-64''' ), '''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth''': ( '''swin2SR-compressed-sr-x4-48''' ), '''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth''': ( '''swin2SR-lightweight-x2-64''' ), '''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth''': ( '''swin2SR-realworld-sr-x4-64-bsrgan-psnr''' ), } lowerCamelCase_ =url_to_name[checkpoint_url] if pytorch_dump_folder_path is not None: print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__snake_case ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(__snake_case ) if push_to_hub: model.push_to_hub(F'''caidas/{model_name}''' ) processor.push_to_hub(F'''caidas/{model_name}''' ) if __name__ == "__main__": a_ : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint_url""", default="""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""", type=str, help="""URL of the original Swin2SR checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the converted model to the hub.""") a_ : List[str] = parser.parse_args() convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
676
'''simple docstring''' import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : str = logging.get_logger(__name__) a_ : int = { """RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""", } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : List[str] ='mvp' lowercase : List[str] =['past_key_values'] lowercase : Dict ={'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self, lowerCAmelCase=50_267, lowerCAmelCase=1_024, lowerCAmelCase=12, lowerCAmelCase=4_096, lowerCAmelCase=16, lowerCAmelCase=12, lowerCAmelCase=4_096, lowerCAmelCase=16, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase="gelu", lowerCAmelCase=1_024, lowerCAmelCase=0.1, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=0.0, lowerCAmelCase=False, lowerCAmelCase=True, lowerCAmelCase=1, lowerCAmelCase=0, lowerCAmelCase=2, lowerCAmelCase=True, lowerCAmelCase=2, lowerCAmelCase=2, lowerCAmelCase=False, lowerCAmelCase=100, lowerCAmelCase=800, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =vocab_size lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =d_model lowerCamelCase_ =encoder_ffn_dim lowerCamelCase_ =encoder_layers lowerCamelCase_ =encoder_attention_heads lowerCamelCase_ =decoder_ffn_dim lowerCamelCase_ =decoder_layers lowerCamelCase_ =decoder_attention_heads lowerCamelCase_ =dropout lowerCamelCase_ =attention_dropout lowerCamelCase_ =activation_dropout lowerCamelCase_ =activation_function lowerCamelCase_ =init_std lowerCamelCase_ =encoder_layerdrop lowerCamelCase_ =decoder_layerdrop lowerCamelCase_ =classifier_dropout lowerCamelCase_ =use_cache lowerCamelCase_ =encoder_layers lowerCamelCase_ =scale_embedding # scale factor will be sqrt(d_model) if True lowerCamelCase_ =use_prompt lowerCamelCase_ =prompt_length lowerCamelCase_ =prompt_mid_dim super().__init__( pad_token_id=lowerCAmelCase, bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, is_encoder_decoder=lowerCAmelCase, decoder_start_token_id=lowerCAmelCase, forced_eos_token_id=lowerCAmelCase, **lowerCAmelCase, ) if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''', lowerCAmelCase ): lowerCamelCase_ =self.bos_token_id warnings.warn( f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. ''' '''The config can simply be saved and uploaded again to be fixed.''' )
676
1
'''simple docstring''' import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def a_ ( __snake_case : int , __snake_case : Optional[Any]=0.9_9_9 , __snake_case : Union[str, Any]="cosine" , ) -> Any: """simple docstring""" if alpha_transform_type == "cosine": def alpha_bar_fn(__snake_case : Any ): return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(__snake_case : Dict ): return math.exp(t * -1_2.0 ) else: raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) lowerCamelCase_ =[] for i in range(__snake_case ): lowerCamelCase_ =i / num_diffusion_timesteps lowerCamelCase_ =(i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(__snake_case ) / alpha_bar_fn(__snake_case ) , __snake_case ) ) return torch.tensor(__snake_case , dtype=torch.floataa ) class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): lowercase : str =[e.name for e in KarrasDiffusionSchedulers] lowercase : List[str] =2 @register_to_config def __init__( self, lowerCAmelCase = 1_000, lowerCAmelCase = 0.0_0_0_8_5, lowerCAmelCase = 0.0_1_2, lowerCAmelCase = "linear", lowerCAmelCase = None, lowerCAmelCase = "epsilon", lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = 1.0, lowerCAmelCase = "linspace", lowerCAmelCase = 0, ): """simple docstring""" if trained_betas is not None: lowerCamelCase_ =torch.tensor(lowerCAmelCase, dtype=torch.floataa ) elif beta_schedule == "linear": lowerCamelCase_ =torch.linspace(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. lowerCamelCase_ =( torch.linspace(beta_start**0.5, beta_end**0.5, lowerCAmelCase, dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule lowerCamelCase_ =betas_for_alpha_bar(lowerCAmelCase, alpha_transform_type='''cosine''' ) elif beta_schedule == "exp": lowerCamelCase_ =betas_for_alpha_bar(lowerCAmelCase, alpha_transform_type='''exp''' ) else: raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' ) lowerCamelCase_ =1.0 - self.betas lowerCamelCase_ =torch.cumprod(self.alphas, dim=0 ) # set all values self.set_timesteps(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =use_karras_sigmas def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ): """simple docstring""" if schedule_timesteps is None: lowerCamelCase_ =self.timesteps lowerCamelCase_ =(schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: lowerCamelCase_ =1 if len(lowerCAmelCase ) > 1 else 0 else: lowerCamelCase_ =timestep.cpu().item() if torch.is_tensor(lowerCAmelCase ) else timestep lowerCamelCase_ =self._index_counter[timestep_int] return indices[pos].item() @property def lowercase__ ( self ): """simple docstring""" if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =self.index_for_timestep(lowerCAmelCase ) lowerCamelCase_ =self.sigmas[step_index] lowerCamelCase_ =sample / ((sigma**2 + 1) ** 0.5) return sample def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, ): """simple docstring""" lowerCamelCase_ =num_inference_steps lowerCamelCase_ =num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": lowerCamelCase_ =np.linspace(0, num_train_timesteps - 1, lowerCAmelCase, dtype=lowerCAmelCase )[::-1].copy() elif self.config.timestep_spacing == "leading": lowerCamelCase_ =num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 lowerCamelCase_ =(np.arange(0, lowerCAmelCase ) * step_ratio).round()[::-1].copy().astype(lowerCAmelCase ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": lowerCamelCase_ =num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 lowerCamelCase_ =(np.arange(lowerCAmelCase, 0, -step_ratio )).round().copy().astype(lowerCAmelCase ) timesteps -= 1 else: raise ValueError( f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' ) lowerCamelCase_ =np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) lowerCamelCase_ =np.log(lowerCAmelCase ) lowerCamelCase_ =np.interp(lowerCAmelCase, np.arange(0, len(lowerCAmelCase ) ), lowerCAmelCase ) if self.config.use_karras_sigmas: lowerCamelCase_ =self._convert_to_karras(in_sigmas=lowerCAmelCase, num_inference_steps=self.num_inference_steps ) lowerCamelCase_ =np.array([self._sigma_to_t(lowerCAmelCase, lowerCAmelCase ) for sigma in sigmas] ) lowerCamelCase_ =np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) lowerCamelCase_ =torch.from_numpy(lowerCAmelCase ).to(device=lowerCAmelCase ) lowerCamelCase_ =torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] ) lowerCamelCase_ =torch.from_numpy(lowerCAmelCase ) lowerCamelCase_ =torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] ) if str(lowerCAmelCase ).startswith('''mps''' ): # mps does not support float64 lowerCamelCase_ =timesteps.to(lowerCAmelCase, dtype=torch.floataa ) else: lowerCamelCase_ =timesteps.to(device=lowerCAmelCase ) # empty dt and derivative lowerCamelCase_ =None lowerCamelCase_ =None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter lowerCamelCase_ =defaultdict(lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =np.log(lowerCAmelCase ) # get distribution lowerCamelCase_ =log_sigma - log_sigmas[:, np.newaxis] # get sigmas range lowerCamelCase_ =np.cumsum((dists >= 0), axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 ) lowerCamelCase_ =low_idx + 1 lowerCamelCase_ =log_sigmas[low_idx] lowerCamelCase_ =log_sigmas[high_idx] # interpolate sigmas lowerCamelCase_ =(low - log_sigma) / (low - high) lowerCamelCase_ =np.clip(lowerCAmelCase, 0, 1 ) # transform interpolation to time range lowerCamelCase_ =(1 - w) * low_idx + w * high_idx lowerCamelCase_ =t.reshape(sigma.shape ) return t def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =in_sigmas[-1].item() lowerCamelCase_ =in_sigmas[0].item() lowerCamelCase_ =7.0 # 7.0 is the value used in the paper lowerCamelCase_ =np.linspace(0, 1, lowerCAmelCase ) lowerCamelCase_ =sigma_min ** (1 / rho) lowerCamelCase_ =sigma_max ** (1 / rho) lowerCamelCase_ =(max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas @property def lowercase__ ( self ): """simple docstring""" return self.dt is None def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = True, ): """simple docstring""" lowerCamelCase_ =self.index_for_timestep(lowerCAmelCase ) # advance index counter by 1 lowerCamelCase_ =timestep.cpu().item() if torch.is_tensor(lowerCAmelCase ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: lowerCamelCase_ =self.sigmas[step_index] lowerCamelCase_ =self.sigmas[step_index + 1] else: # 2nd order / Heun's method lowerCamelCase_ =self.sigmas[step_index - 1] lowerCamelCase_ =self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API lowerCamelCase_ =0 lowerCamelCase_ =sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": lowerCamelCase_ =sigma_hat if self.state_in_first_order else sigma_next lowerCamelCase_ =sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": lowerCamelCase_ =sigma_hat if self.state_in_first_order else sigma_next lowerCamelCase_ =model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": lowerCamelCase_ =model_output else: raise ValueError( f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' ) if self.config.clip_sample: lowerCamelCase_ =pred_original_sample.clamp( -self.config.clip_sample_range, self.config.clip_sample_range ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order lowerCamelCase_ =(sample - pred_original_sample) / sigma_hat # 3. delta timestep lowerCamelCase_ =sigma_next - sigma_hat # store for 2nd order step lowerCamelCase_ =derivative lowerCamelCase_ =dt lowerCamelCase_ =sample else: # 2. 2nd order / Heun's method lowerCamelCase_ =(sample - pred_original_sample) / sigma_next lowerCamelCase_ =(self.prev_derivative + derivative) / 2 # 3. take prev timestep & sample lowerCamelCase_ =self.dt lowerCamelCase_ =self.sample # free dt and derivative # Note, this puts the scheduler in "first order mode" lowerCamelCase_ =None lowerCamelCase_ =None lowerCamelCase_ =None lowerCamelCase_ =sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(lowerCAmelCase ): # mps does not support float64 lowerCamelCase_ =self.timesteps.to(original_samples.device, dtype=torch.floataa ) lowerCamelCase_ =timesteps.to(original_samples.device, dtype=torch.floataa ) else: lowerCamelCase_ =self.timesteps.to(original_samples.device ) lowerCamelCase_ =timesteps.to(original_samples.device ) lowerCamelCase_ =[self.index_for_timestep(lowerCAmelCase, lowerCAmelCase ) for t in timesteps] lowerCamelCase_ =sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): lowerCamelCase_ =sigma.unsqueeze(-1 ) lowerCamelCase_ =original_samples + noise * sigma return noisy_samples def __len__( self ): """simple docstring""" return self.config.num_train_timesteps
676
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a_ : int = logging.get_logger(__name__) a_ : str = {"""vocab_file""": """spiece.model"""} a_ : Optional[int] = { """vocab_file""": { """bert_for_seq_generation""": ( """https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model""" ), } } a_ : List[Any] = {"""bert_for_seq_generation""": 5_12} class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[int] =VOCAB_FILES_NAMES lowercase : Optional[int] =PRETRAINED_VOCAB_FILES_MAP lowercase : Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : List[int] =[] lowercase : str =['input_ids', 'attention_mask'] def __init__( self, lowerCAmelCase, lowerCAmelCase="<s>", lowerCAmelCase="</s>", lowerCAmelCase="<unk>", lowerCAmelCase="<pad>", lowerCAmelCase="<::::>", lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ ={} if sp_model_kwargs is None else sp_model_kwargs # Add extra_ids to the special token list super().__init__( bos_token=lowerCAmelCase, eos_token=lowerCAmelCase, unk_token=lowerCAmelCase, pad_token=lowerCAmelCase, sep_token=lowerCAmelCase, sp_model_kwargs=self.sp_model_kwargs, **lowerCAmelCase, ) lowerCamelCase_ =vocab_file lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowerCAmelCase ) @property def lowercase__ ( self ): """simple docstring""" return self.sp_model.get_piece_size() def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ={self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): """simple docstring""" lowerCamelCase_ =self.__dict__.copy() lowerCamelCase_ =None return state def __setstate__( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =d # for backward compatibility if not hasattr(self, '''sp_model_kwargs''' ): lowerCamelCase_ ={} lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return self.sp_model.encode(lowerCAmelCase, out_type=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return self.sp_model.piece_to_id(lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.sp_model.IdToPiece(lowerCAmelCase ) return token def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =[] lowerCamelCase_ ='''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(lowerCAmelCase ) + token lowerCamelCase_ =[] else: current_sub_tokens.append(lowerCAmelCase ) out_string += self.sp_model.decode(lowerCAmelCase ) return out_string.strip() def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ): """simple docstring""" if not os.path.isdir(lowerCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCamelCase_ =os.path.join( lowerCAmelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file, lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(lowerCAmelCase, '''wb''' ) as fi: lowerCamelCase_ =self.sp_model.serialized_model_proto() fi.write(lowerCAmelCase ) return (out_vocab_file,)
676
1
from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.utils.data import DistributedSampler, RandomSampler from transformers import PreTrainedModel, Trainer, logging from transformers.integrations import is_fairscale_available from transformers.models.fsmt.configuration_fsmt import FSMTConfig from transformers.optimization import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.trainer_pt_utils import get_tpu_sampler from transformers.training_args import ParallelMode from transformers.utils import is_torch_tpu_available if is_fairscale_available(): from fairscale.optim import OSS SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : List[str] = { """linear""": get_linear_schedule_with_warmup, """cosine""": get_cosine_schedule_with_warmup, """cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup, """polynomial""": get_polynomial_decay_schedule_with_warmup, """constant""": get_constant_schedule, """constant_w_warmup""": get_constant_schedule_with_warmup, } class lowerCamelCase_ ( lowerCamelCase ): def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , *__lowerCAmelCase , **__lowerCAmelCase ): """simple docstring""" super().__init__(*__lowerCAmelCase , **__lowerCAmelCase ) if config is None: assert isinstance(self.model , __lowerCAmelCase ), ( "If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is" F''' {self.model.__class__}''' ) __magic_name__ :Dict = self.model.config else: __magic_name__ :List[Any] = config __magic_name__ :Tuple = data_args __magic_name__ :Any = self.config.tgt_vocab_size if isinstance(self.config , __lowerCAmelCase ) else self.config.vocab_size if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss): assert self.config.pad_token_id is not None, ( "Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss" " calculation or doing label smoothing." ) if self.config.pad_token_id is None and self.config.eos_token_id is not None: logger.warning( F'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for''' ''' padding..''' ) if self.args.label_smoothing == 0: __magic_name__ :List[Any] = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id ) else: # dynamically import label_smoothed_nll_loss from utils import label_smoothed_nll_loss __magic_name__ :str = label_smoothed_nll_loss def A ( self , __lowerCAmelCase ): """simple docstring""" if self.optimizer is None: __magic_name__ :int = ['''bias''', '''LayerNorm.weight'''] __magic_name__ :Union[str, Any] = [ { '''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )], '''weight_decay''': self.args.weight_decay, }, { '''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0, }, ] __magic_name__ :Tuple = Adafactor if self.args.adafactor else AdamW if self.args.adafactor: __magic_name__ :List[Any] = Adafactor __magic_name__ :Union[str, Any] = {'''scale_parameter''': False, '''relative_step''': False} else: __magic_name__ :Union[str, Any] = AdamW __magic_name__ :int = { '''betas''': (self.args.adam_betaa, self.args.adam_betaa), '''eps''': self.args.adam_epsilon, } __magic_name__ :List[str] = self.args.learning_rate if self.sharded_ddp: __magic_name__ :List[str] = OSS( params=__lowerCAmelCase , optim=__lowerCAmelCase , **__lowerCAmelCase , ) else: __magic_name__ :List[str] = optimizer_cls(__lowerCAmelCase , **__lowerCAmelCase ) if self.lr_scheduler is None: __magic_name__ :Union[str, Any] = self._get_lr_scheduler(__lowerCAmelCase ) else: # ignoring --lr_scheduler logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' ) def A ( self , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Optional[int] = arg_to_scheduler[self.args.lr_scheduler] if self.args.lr_scheduler == "constant": __magic_name__ :int = schedule_func(self.optimizer ) elif self.args.lr_scheduler == "constant_w_warmup": __magic_name__ :List[str] = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps ) else: __magic_name__ :str = schedule_func( self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=__lowerCAmelCase ) return scheduler def A ( self ): """simple docstring""" if isinstance(self.train_dataset , torch.utils.data.IterableDataset ): return None elif is_torch_tpu_available(): return get_tpu_sampler(self.train_dataset ) else: if self.args.sortish_sampler: self.train_dataset.make_sortish_sampler( self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , ) return ( RandomSampler(self.train_dataset ) if self.args.local_rank == -1 else DistributedSampler(self.train_dataset ) ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" if self.args.label_smoothing == 0: if self.data_args is not None and self.data_args.ignore_pad_token_for_loss: # force training to ignore pad token __magic_name__ :Tuple = model(**__lowerCAmelCase , use_cache=__lowerCAmelCase )[0] __magic_name__ :List[Any] = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) ) else: # compute usual loss via models __magic_name__ , __magic_name__ :Dict = model(**__lowerCAmelCase , labels=__lowerCAmelCase , use_cache=__lowerCAmelCase )[:2] else: # compute label smoothed loss __magic_name__ :Optional[int] = model(**__lowerCAmelCase , use_cache=__lowerCAmelCase )[0] __magic_name__ :Dict = torch.nn.functional.log_softmax(__lowerCAmelCase , dim=-1 ) __magic_name__ , __magic_name__ :int = self.loss_fn(__lowerCAmelCase , __lowerCAmelCase , self.args.label_smoothing , ignore_index=self.config.pad_token_id ) return loss, logits def A ( self , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :str = inputs.pop('''labels''' ) __magic_name__ , __magic_name__ :Optional[Any] = self._compute_loss(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) return loss def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , ): """simple docstring""" __magic_name__ :Optional[int] = self._prepare_inputs(__lowerCAmelCase ) __magic_name__ :Optional[int] = { '''max_length''': self.data_args.val_max_target_length if self.data_args is not None else self.config.max_length, '''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams, } if self.args.predict_with_generate and not self.args.prediction_loss_only: __magic_name__ :Optional[Any] = self.model.generate( inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , **__lowerCAmelCase , ) # in case the batch is shorter than max length, the output should be padded if generated_tokens.shape[-1] < gen_kwargs["max_length"]: __magic_name__ :Optional[Any] = self._pad_tensors_to_max_len(__lowerCAmelCase , gen_kwargs['''max_length'''] ) __magic_name__ :int = inputs.pop('''labels''' ) with torch.no_grad(): # compute loss on predict data __magic_name__ , __magic_name__ :Optional[int] = self._compute_loss(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) __magic_name__ :Tuple = loss.mean().detach() if self.args.prediction_loss_only: return (loss, None, None) __magic_name__ :int = generated_tokens if self.args.predict_with_generate else logits if labels.shape[-1] < gen_kwargs["max_length"]: __magic_name__ :Any = self._pad_tensors_to_max_len(__lowerCAmelCase , gen_kwargs['''max_length'''] ) return (loss, logits, labels) def A ( self , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" # If PAD token is not defined at least EOS token has to be defined __magic_name__ :Optional[Any] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id if pad_token_id is None: raise ValueError( '''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be''' F''' padded to `max_length`={max_length}''' ) __magic_name__ :str = pad_token_id * torch.ones( (tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device ) __magic_name__ :Optional[Any] = tensor return padded_tensor
0
'''simple docstring''' from collections.abc import Sequence def a_ ( __snake_case : Sequence[float] , __snake_case : float ) -> float: """simple docstring""" return sum(c * (x**i) for i, c in enumerate(__snake_case ) ) def a_ ( __snake_case : Sequence[float] , __snake_case : float ) -> float: """simple docstring""" lowerCamelCase_ =0.0 for coeff in reversed(__snake_case ): lowerCamelCase_ =result * x + coeff return result if __name__ == "__main__": a_ : Optional[int] = (0.0, 0.0, 5.0, 9.3, 7.0) a_ : Tuple = 10.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
676
0
from random import randint, random def _A ( _lowercase , _lowercase , _lowercase , _lowercase = False , _lowercase = False , _lowercase = 5 , ) -> list: """simple docstring""" __UpperCamelCase = [[-1] * number_of_cells] # Create a highway without any car __UpperCamelCase = 0 __UpperCamelCase = max(_lowercase , 0 ) while i < number_of_cells: __UpperCamelCase = ( randint(0 , _lowercase ) if random_speed else initial_speed ) # Place the cars i += ( randint(1 , max_speed * 2 ) if random_frequency else frequency ) # Arbitrary number, may need tuning return highway def _A ( _lowercase , _lowercase ) -> int: """simple docstring""" __UpperCamelCase = 0 __UpperCamelCase = highway_now[car_index + 1 :] for cell in range(len(_lowercase ) ): # May need a better name for this if cells[cell] != -1: # If the cell is not empty then return distance # we have the distance we wanted distance += 1 # Here if the car is near the end of the highway return distance + get_distance(_lowercase , -1 ) def _A ( _lowercase , _lowercase , _lowercase ) -> list: """simple docstring""" __UpperCamelCase = len(_lowercase ) # Beforce calculations, the highway is empty __UpperCamelCase = [-1] * number_of_cells for car_index in range(_lowercase ): if highway_now[car_index] != -1: # Add 1 to the current speed of the car and cap the speed __UpperCamelCase = min(highway_now[car_index] + 1 , _lowercase ) # Number of empty cell before the next car __UpperCamelCase = get_distance(_lowercase , _lowercase ) - 1 # We can't have the car causing an accident __UpperCamelCase = min(next_highway[car_index] , _lowercase ) if random() < probability: # Randomly, a driver will slow down __UpperCamelCase = max(next_highway[car_index] - 1 , 0 ) return next_highway def _A ( _lowercase , _lowercase , _lowercase , _lowercase ) -> list: """simple docstring""" __UpperCamelCase = len(highway[0] ) for i in range(_lowercase ): __UpperCamelCase = update(highway[i] , _lowercase , _lowercase ) __UpperCamelCase = [-1] * number_of_cells for car_index in range(_lowercase ): __UpperCamelCase = next_speeds_calculated[car_index] if speed != -1: # Change the position based on the speed (with % to create the loop) __UpperCamelCase = (car_index + speed) % number_of_cells # Commit the change of position __UpperCamelCase = speed highway.append(_lowercase ) return highway if __name__ == "__main__": import doctest doctest.testmod()
1
'''simple docstring''' import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[int] =['image_processor', 'tokenizer'] lowercase : str ='CLIPImageProcessor' lowercase : Optional[Any] =('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast') def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''', lowerCAmelCase, ) lowerCamelCase_ =kwargs.pop('''feature_extractor''' ) lowerCamelCase_ =image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(lowerCAmelCase, lowerCAmelCase ) def __call__( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ): """simple docstring""" if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: lowerCamelCase_ =self.tokenizer(lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase ) if images is not None: lowerCamelCase_ =self.image_processor(lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase ) if text is not None and images is not None: lowerCamelCase_ =image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**lowerCAmelCase ), tensor_type=lowerCAmelCase ) def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase ) @property def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.tokenizer.model_input_names lowerCamelCase_ =self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
676
0
import os import pytest from datasets import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, ) UpperCAmelCase_ = pytest.mark.integration @pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] ) def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :int ) -> Tuple: inspect_dataset(_snake_case , _snake_case ) _A = path + '''.py''' assert script_name in os.listdir(_snake_case ) assert "__pycache__" not in os.listdir(_snake_case ) @pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' ) @pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' ) @pytest.mark.parametrize('''path''' , ['''accuracy'''] ) def SCREAMING_SNAKE_CASE_ ( _snake_case :List[Any] , _snake_case :Union[str, Any] ) -> Any: inspect_metric(_snake_case , _snake_case ) _A = path + '''.py''' assert script_name in os.listdir(_snake_case ) assert "__pycache__" not in os.listdir(_snake_case ) @pytest.mark.parametrize( '''path, config_name, expected_splits''' , [ ('''squad''', '''plain_text''', ['''train''', '''validation''']), ('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']), ('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']), ] , ) def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] , _snake_case :List[str] , _snake_case :str ) -> Union[str, Any]: _A = get_dataset_config_info(_snake_case , config_name=_snake_case ) assert info.config_name == config_name assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( '''path, config_name, expected_exception''' , [ ('''paws''', None, ValueError), ] , ) def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[Any] , _snake_case :str , _snake_case :List[str] ) -> Any: with pytest.raises(_snake_case ): get_dataset_config_info(_snake_case , config_name=_snake_case ) @pytest.mark.parametrize( '''path, expected''' , [ ('''squad''', '''plain_text'''), ('''acronym_identification''', '''default'''), ('''lhoestq/squad''', '''plain_text'''), ('''lhoestq/test''', '''default'''), ('''lhoestq/demo1''', '''lhoestq--demo1'''), ('''dalle-mini/wit''', '''dalle-mini--wit'''), ] , ) def SCREAMING_SNAKE_CASE_ ( _snake_case :str , _snake_case :int ) -> List[str]: _A = get_dataset_config_names(_snake_case ) assert expected in config_names @pytest.mark.parametrize( '''path, expected_configs, expected_splits_in_first_config''' , [ ('''squad''', ['''plain_text'''], ['''train''', '''validation''']), ('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']), ('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']), ] , ) def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] , _snake_case :Any , _snake_case :Any ) -> Optional[Any]: _A = get_dataset_infos(_snake_case ) assert list(infos.keys() ) == expected_configs _A = expected_configs[0] assert expected_config in infos _A = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits_in_first_config @pytest.mark.parametrize( '''path, expected_config, expected_splits''' , [ ('''squad''', '''plain_text''', ['''train''', '''validation''']), ('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']), ('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']), ] , ) def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[Any] , _snake_case :Optional[Any] , _snake_case :Any ) -> str: _A = get_dataset_infos(_snake_case ) assert expected_config in infos _A = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( '''path, config_name, expected_exception''' , [ ('''paws''', None, ValueError), ] , ) def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] , _snake_case :Tuple , _snake_case :Dict ) -> List[str]: with pytest.raises(_snake_case ): get_dataset_split_names(_snake_case , config_name=_snake_case )
2
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING a_ : Optional[Any] = logging.get_logger(__name__) @add_end_docstrings(lowerCamelCase__ ) class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" super().__init__(*lowerCAmelCase, **lowerCAmelCase ) requires_backends(self, '''vision''' ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING ) def lowercase__ ( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None ): """simple docstring""" lowerCamelCase_ ={} lowerCamelCase_ ={} if prompt is not None: lowerCamelCase_ =prompt if generate_kwargs is not None: lowerCamelCase_ =generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: lowerCamelCase_ ={} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( '''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,''' ''' please use only one''' ) lowerCamelCase_ =max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return super().__call__(lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ): """simple docstring""" lowerCamelCase_ =load_image(lowerCAmelCase ) if prompt is not None: if not isinstance(lowerCAmelCase, lowerCAmelCase ): raise ValueError( f'''Received an invalid text input, got - {type(lowerCAmelCase )} - but expected a single string. ''' '''Note also that one single text can be provided for conditional image to text generation.''' ) lowerCamelCase_ =self.model.config.model_type if model_type == "git": lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework ) lowerCamelCase_ =self.tokenizer(text=lowerCAmelCase, add_special_tokens=lowerCAmelCase ).input_ids lowerCamelCase_ =[self.tokenizer.cls_token_id] + input_ids lowerCamelCase_ =torch.tensor(lowerCAmelCase ).unsqueeze(0 ) model_inputs.update({'''input_ids''': input_ids} ) elif model_type == "pix2struct": lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, header_text=lowerCAmelCase, return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework ) lowerCamelCase_ =self.tokenizer(lowerCAmelCase, return_tensors=self.framework ) model_inputs.update(lowerCAmelCase ) else: raise ValueError(f'''Model type {model_type} does not support conditional text generation''' ) else: lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: lowerCamelCase_ =None return model_inputs def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ): """simple docstring""" if ( "input_ids" in model_inputs and isinstance(model_inputs['''input_ids'''], lowerCAmelCase ) and all(x is None for x in model_inputs['''input_ids'''] ) ): lowerCamelCase_ =None if generate_kwargs is None: lowerCamelCase_ ={} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. lowerCamelCase_ =model_inputs.pop(self.model.main_input_name ) lowerCamelCase_ =self.model.generate(lowerCAmelCase, **lowerCAmelCase, **lowerCAmelCase ) return model_outputs def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =[] for output_ids in model_outputs: lowerCamelCase_ ={ '''generated_text''': self.tokenizer.decode( lowerCAmelCase, skip_special_tokens=lowerCAmelCase, ) } records.append(lowerCAmelCase ) return records
676
0
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...feature_extraction_utils import FeatureExtractionMixin from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType, logging lowerCAmelCase : Any = logging.get_logger(__name__) lowerCAmelCase : Optional[int] = { 'deepmind/language-perceiver': 'https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json', # See all Perceiver models at https://huggingface.co/models?filter=perceiver } class SCREAMING_SNAKE_CASE__ ( snake_case_): lowerCAmelCase_ = """perceiver""" def __init__( self , A_=256 , A_=1280 , A_=768 , A_=1 , A_=26 , A_=8 , A_=8 , A_=None , A_=None , A_="kv" , A_=1 , A_=1 , A_="gelu" , A_=0.1 , A_=0.02 , A_=1e-12 , A_=True , A_=262 , A_=2048 , A_=56 , A_=[368, 496] , A_=16 , A_=1920 , A_=16 , A_=[1, 16, 224, 224] , **A_ , )-> str: '''simple docstring''' super().__init__(**A_ ) UpperCamelCase = num_latents UpperCamelCase = d_latents UpperCamelCase = d_model UpperCamelCase = num_blocks UpperCamelCase = num_self_attends_per_block UpperCamelCase = num_self_attention_heads UpperCamelCase = num_cross_attention_heads UpperCamelCase = qk_channels UpperCamelCase = v_channels UpperCamelCase = cross_attention_shape_for_attention UpperCamelCase = self_attention_widening_factor UpperCamelCase = cross_attention_widening_factor UpperCamelCase = hidden_act UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = initializer_range UpperCamelCase = layer_norm_eps UpperCamelCase = use_query_residual # masked language modeling attributes UpperCamelCase = vocab_size UpperCamelCase = max_position_embeddings # image classification attributes UpperCamelCase = image_size # flow attributes UpperCamelCase = train_size # multimodal autoencoding attributes UpperCamelCase = num_frames UpperCamelCase = audio_samples_per_frame UpperCamelCase = samples_per_patch UpperCamelCase = output_shape class SCREAMING_SNAKE_CASE__ ( snake_case_): @property def UpperCAmelCase_ ( self )-> Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task == "multiple-choice": UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'} else: UpperCamelCase = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('inputs', dynamic_axis), ('attention_mask', dynamic_axis), ] ) @property def UpperCAmelCase_ ( self )-> float: '''simple docstring''' return 1e-4 def UpperCAmelCase_ ( self , A_ , A_ = -1 , A_ = -1 , A_ = -1 , A_ = False , A_ = None , A_ = 3 , A_ = 40 , A_ = 40 , )-> Mapping[str, Any]: '''simple docstring''' if isinstance(A_ , A_ ): # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX UpperCamelCase = compute_effective_axis_dimension( A_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX UpperCamelCase = preprocessor.num_special_tokens_to_add(A_ ) UpperCamelCase = compute_effective_axis_dimension( A_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=A_ ) # Generate dummy inputs according to compute batch and sequence UpperCamelCase = [' '.join(['a'] ) * seq_length] * batch_size UpperCamelCase = dict(preprocessor(A_ , return_tensors=A_ ) ) UpperCamelCase = inputs.pop('input_ids' ) return inputs elif isinstance(A_ , A_ ) and preprocessor.model_input_names[0] == "pixel_values": # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX UpperCamelCase = compute_effective_axis_dimension(A_ , fixed_dimension=OnnxConfig.default_fixed_batch ) UpperCamelCase = self._generate_dummy_images(A_ , A_ , A_ , A_ ) UpperCamelCase = dict(preprocessor(images=A_ , return_tensors=A_ ) ) UpperCamelCase = inputs.pop('pixel_values' ) return inputs else: raise ValueError( 'Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.' )
3
'''simple docstring''' import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def a_ ( __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Any ) -> str: """simple docstring""" # Initialise PyTorch model lowerCamelCase_ =BertConfig.from_json_file(__snake_case ) print(F'''Building PyTorch model from configuration: {config}''' ) lowerCamelCase_ =BertForPreTraining(__snake_case ) # Load weights from tf checkpoint load_tf_weights_in_bert(__snake_case , __snake_case , __snake_case ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , __snake_case ) if __name__ == "__main__": a_ : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--bert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained BERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) a_ : Optional[Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
676
0
"""simple docstring""" from __future__ import annotations from math import pi # Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of # Pi and the function __UpperCamelCase : Any = 1.054_571_817e-34 # unit of ℏ : J * s __UpperCamelCase : List[Any] = 3e8 # unit of c : m * s^-1 def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float ): if (force, area, distance).count(0 ) != 1: raise ValueError('One and only one argument must be 0' ) if force < 0: raise ValueError('Magnitude of force can not be negative' ) if distance < 0: raise ValueError('Distance can not be negative' ) if area < 0: raise ValueError('Area can not be negative' ) if force == 0: lowerCAmelCase = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( 240 * (distance) ** 4 ) return {"force": force} elif area == 0: lowerCAmelCase = (240 * force * (distance) ** 4) / ( REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 ) return {"area": area} elif distance == 0: lowerCAmelCase = ( (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force) ) ** (1 / 4) return {"distance": distance} raise ValueError('One and only one argument must be 0' ) # Run doctest if __name__ == "__main__": import doctest doctest.testmod()
4
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : Union[str, Any] = logging.get_logger(__name__) a_ : Optional[int] = { """BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""", # See all AltCLIP models at https://huggingface.co/models?filter=altclip } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[Any] ='altclip_text_model' def __init__( self, lowerCAmelCase=250_002, lowerCAmelCase=1_024, lowerCAmelCase=24, lowerCAmelCase=16, lowerCAmelCase=4_096, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=514, lowerCAmelCase=1, lowerCAmelCase=0.0_2, lowerCAmelCase=0.0_2, lowerCAmelCase=1e-05, lowerCAmelCase=1, lowerCAmelCase=0, lowerCAmelCase=2, lowerCAmelCase="absolute", lowerCAmelCase=True, lowerCAmelCase=768, **lowerCAmelCase, ): """simple docstring""" super().__init__(pad_token_id=lowerCAmelCase, bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, **lowerCAmelCase ) lowerCamelCase_ =vocab_size lowerCamelCase_ =hidden_size lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =hidden_act lowerCamelCase_ =intermediate_size lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =attention_probs_dropout_prob lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =type_vocab_size lowerCamelCase_ =initializer_range lowerCamelCase_ =initializer_factor lowerCamelCase_ =layer_norm_eps lowerCamelCase_ =position_embedding_type lowerCamelCase_ =use_cache lowerCamelCase_ =project_dim class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Dict ='altclip_vision_model' def __init__( self, lowerCAmelCase=768, lowerCAmelCase=3_072, lowerCAmelCase=512, lowerCAmelCase=12, lowerCAmelCase=12, lowerCAmelCase=3, lowerCAmelCase=224, lowerCAmelCase=32, lowerCAmelCase="quick_gelu", lowerCAmelCase=1e-5, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=1.0, **lowerCAmelCase, ): """simple docstring""" super().__init__(**lowerCAmelCase ) lowerCamelCase_ =hidden_size lowerCamelCase_ =intermediate_size lowerCamelCase_ =projection_dim lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =num_channels lowerCamelCase_ =patch_size lowerCamelCase_ =image_size lowerCamelCase_ =initializer_range lowerCamelCase_ =initializer_factor lowerCamelCase_ =attention_dropout lowerCamelCase_ =layer_norm_eps lowerCamelCase_ =hidden_act @classmethod def lowercase__ ( cls, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" cls._set_token_in_kwargs(lowerCAmelCase ) lowerCamelCase_, lowerCamelCase_ =cls.get_config_dict(lowerCAmelCase, **lowerCAmelCase ) # get the vision config dict if we are loading from AltCLIPConfig if config_dict.get('''model_type''' ) == "altclip": lowerCamelCase_ =config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls, '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(lowerCAmelCase, **lowerCAmelCase ) class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Dict ='altclip' lowercase : str =True def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=768, lowerCAmelCase=2.6_5_9_2, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =kwargs.pop('''text_config_dict''', lowerCAmelCase ) lowerCamelCase_ =kwargs.pop('''vision_config_dict''', lowerCAmelCase ) super().__init__(**lowerCAmelCase ) # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`. if text_config_dict is not None: if text_config is None: lowerCamelCase_ ={} # This is the complete result when using `text_config_dict`. lowerCamelCase_ =AltCLIPTextConfig(**lowerCAmelCase ).to_dict() # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different. for key, value in _text_config_dict.items(): if key in text_config and value != text_config[key] and key not in ["transformers_version"]: # If specified in `text_config_dict` if key in text_config_dict: lowerCamelCase_ =( f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. ''' f'''The value `text_config_dict["{key}"]` will be used instead.''' ) # If inferred from default argument values (just to be super careful) else: lowerCamelCase_ =( f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The ''' f'''value `text_config["{key}"]` will be overriden.''' ) logger.warning(lowerCAmelCase ) # Update all values in `text_config` with the ones in `_text_config_dict`. text_config.update(_text_config_dict ) if vision_config_dict is not None: if vision_config is None: lowerCamelCase_ ={} # This is the complete result when using `vision_config_dict`. lowerCamelCase_ =AltCLIPVisionConfig(**lowerCAmelCase ).to_dict() # convert keys to string instead of integer if "id2label" in _vision_config_dict: lowerCamelCase_ ={ str(lowerCAmelCase ): value for key, value in _vision_config_dict['''id2label'''].items() } # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different. for key, value in _vision_config_dict.items(): if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]: # If specified in `vision_config_dict` if key in vision_config_dict: lowerCamelCase_ =( f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different ''' f'''values. The value `vision_config_dict["{key}"]` will be used instead.''' ) # If inferred from default argument values (just to be super careful) else: lowerCamelCase_ =( f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. ''' f'''The value `vision_config["{key}"]` will be overriden.''' ) logger.warning(lowerCAmelCase ) # Update all values in `vision_config` with the ones in `_vision_config_dict`. vision_config.update(_vision_config_dict ) if text_config is None: lowerCamelCase_ ={} logger.info('''`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.''' ) if vision_config is None: lowerCamelCase_ ={} logger.info('''`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.''' ) lowerCamelCase_ =AltCLIPTextConfig(**lowerCAmelCase ) lowerCamelCase_ =AltCLIPVisionConfig(**lowerCAmelCase ) lowerCamelCase_ =projection_dim lowerCamelCase_ =logit_scale_init_value lowerCamelCase_ =1.0 @classmethod def lowercase__ ( cls, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =copy.deepcopy(self.__dict__ ) lowerCamelCase_ =self.text_config.to_dict() lowerCamelCase_ =self.vision_config.to_dict() lowerCamelCase_ =self.__class__.model_type return output
676
0
'''simple docstring''' def A (__lowerCamelCase :int , __lowerCamelCase :int ): if b == 0: return 1 if (b % 2) == 0: return actual_power(__lowerCamelCase , int(b / 2 ) ) * actual_power(__lowerCamelCase , int(b / 2 ) ) else: return a * actual_power(__lowerCamelCase , int(b / 2 ) ) * actual_power(__lowerCamelCase , int(b / 2 ) ) def A (__lowerCamelCase :int , __lowerCamelCase :int ): if b < 0: return 1 / actual_power(__lowerCamelCase , __lowerCamelCase ) return actual_power(__lowerCamelCase , __lowerCamelCase ) if __name__ == "__main__": print(power(-2, -3))
5
'''simple docstring''' import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, lowerCAmelCase, lowerCAmelCase=13, lowerCAmelCase=7, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=2, lowerCAmelCase=99, lowerCAmelCase=0, lowerCAmelCase=32, lowerCAmelCase=5, lowerCAmelCase=4, lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=12, lowerCAmelCase=2, lowerCAmelCase=0.0_2, lowerCAmelCase=3, lowerCAmelCase=4, lowerCAmelCase="last", lowerCAmelCase=None, lowerCAmelCase=None, ): """simple docstring""" lowerCamelCase_ =parent lowerCamelCase_ =batch_size lowerCamelCase_ =seq_length lowerCamelCase_ =is_training lowerCamelCase_ =use_input_lengths lowerCamelCase_ =use_token_type_ids lowerCamelCase_ =use_labels lowerCamelCase_ =gelu_activation lowerCamelCase_ =sinusoidal_embeddings lowerCamelCase_ =causal lowerCamelCase_ =asm lowerCamelCase_ =n_langs lowerCamelCase_ =vocab_size lowerCamelCase_ =n_special lowerCamelCase_ =hidden_size lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =attention_probs_dropout_prob lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =type_vocab_size lowerCamelCase_ =type_sequence_label_size lowerCamelCase_ =initializer_range lowerCamelCase_ =num_labels lowerCamelCase_ =num_choices lowerCamelCase_ =summary_type lowerCamelCase_ =use_proj lowerCamelCase_ =scope def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase_ =None if self.use_input_lengths: lowerCamelCase_ =( ids_tensor([self.batch_size], vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length lowerCamelCase_ =None if self.use_token_type_ids: lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.n_langs ) lowerCamelCase_ =None lowerCamelCase_ =None lowerCamelCase_ =None if self.use_labels: lowerCamelCase_ =ids_tensor([self.batch_size], self.type_sequence_label_size ) lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.num_labels ) lowerCamelCase_ =ids_tensor([self.batch_size], 2 ).float() lowerCamelCase_ =ids_tensor([self.batch_size], self.num_choices ) lowerCamelCase_ =self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def lowercase__ ( self ): """simple docstring""" return FlaubertConfig( vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =FlaubertModel(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase, lengths=lowerCAmelCase, langs=lowerCAmelCase ) lowerCamelCase_ =model(lowerCAmelCase, langs=lowerCAmelCase ) lowerCamelCase_ =model(lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =FlaubertWithLMHeadModel(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape, () ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =FlaubertForQuestionAnsweringSimple(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase ) lowerCamelCase_ =model(lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =FlaubertForQuestionAnswering(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase ) lowerCamelCase_ =model( lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase, cls_index=lowerCAmelCase, is_impossible=lowerCAmelCase, p_mask=lowerCAmelCase, ) lowerCamelCase_ =model( lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase, cls_index=lowerCAmelCase, is_impossible=lowerCAmelCase, ) ((lowerCamelCase_), ) =result_with_labels.to_tuple() lowerCamelCase_ =model(lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase ) ((lowerCamelCase_), ) =result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape, () ) self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =FlaubertForSequenceClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase ) lowerCamelCase_ =model(lowerCAmelCase, labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape, () ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =self.num_labels lowerCamelCase_ =FlaubertForTokenClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase, attention_mask=lowerCAmelCase, labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =self.num_choices lowerCamelCase_ =FlaubertForMultipleChoice(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() lowerCamelCase_ =token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() lowerCamelCase_ =input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() lowerCamelCase_ =model( lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.prepare_config_and_inputs() ( ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ) =config_and_inputs lowerCamelCase_ ={ '''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase : List[Any] =( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) lowercase : Tuple =( { 'feature-extraction': FlaubertModel, 'fill-mask': FlaubertWithLMHeadModel, 'question-answering': FlaubertForQuestionAnsweringSimple, 'text-classification': FlaubertForSequenceClassification, 'token-classification': FlaubertForTokenClassification, 'zero-shot': FlaubertForSequenceClassification, } if is_torch_available() else {} ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('''Fast''' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=False ): """simple docstring""" lowerCamelCase_ =super()._prepare_for_class(lowerCAmelCase, lowerCAmelCase, return_labels=lowerCAmelCase ) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": lowerCamelCase_ =torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase ) lowerCamelCase_ =torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase ) return inputs_dict def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =FlaubertModelTester(self ) lowerCamelCase_ =ConfigTester(self, config_class=lowerCAmelCase, emb_dim=37 ) def lowercase__ ( self ): """simple docstring""" self.config_tester.run_common_tests() def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*lowerCAmelCase ) @slow def lowercase__ ( self ): """simple docstring""" for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ =FlaubertModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) @slow @require_torch_gpu def lowercase__ ( self ): """simple docstring""" lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return lowerCamelCase_ =True lowerCamelCase_ =model_class(config=lowerCAmelCase ) lowerCamelCase_ =self._prepare_for_class(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =torch.jit.trace( lowerCAmelCase, (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(lowerCAmelCase, os.path.join(lowerCAmelCase, '''traced_model.pt''' ) ) lowerCamelCase_ =torch.jit.load(os.path.join(lowerCAmelCase, '''traced_model.pt''' ), map_location=lowerCAmelCase ) loaded(inputs_dict['''input_ids'''].to(lowerCAmelCase ), inputs_dict['''attention_mask'''].to(lowerCAmelCase ) ) @require_torch class __UpperCamelCase ( unittest.TestCase ): @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' ) lowerCamelCase_ =torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] ) with torch.no_grad(): lowerCamelCase_ =model(lowerCAmelCase )[0] lowerCamelCase_ =torch.Size((1, 11, 768) ) self.assertEqual(output.shape, lowerCAmelCase ) lowerCamelCase_ =torch.tensor( [[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], lowerCAmelCase, atol=1e-4 ) )
676
0
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_big_bird import BigBirdTokenizer else: _lowerCamelCase = None _lowerCamelCase = logging.get_logger(__name__) _lowerCamelCase = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} _lowerCamelCase = { 'vocab_file': { 'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model', 'google/bigbird-roberta-large': ( 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model' ), 'google/bigbird-base-trivia-itc': ( 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model' ), }, 'tokenizer_file': { 'google/bigbird-roberta-base': ( 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json' ), 'google/bigbird-roberta-large': ( 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json' ), 'google/bigbird-base-trivia-itc': ( 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json' ), }, } _lowerCamelCase = { 'google/bigbird-roberta-base': 4096, 'google/bigbird-roberta-large': 4096, 'google/bigbird-base-trivia-itc': 4096, } _lowerCamelCase = '▁' class UpperCamelCase_ ( UpperCamelCase__ ): lowerCamelCase_ = VOCAB_FILES_NAMES lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase_ = BigBirdTokenizer lowerCamelCase_ = ["input_ids", "attention_mask"] lowerCamelCase_ = [] def __init__( self :str , __A :Tuple=None , __A :Optional[Any]=None , __A :Any="<unk>" , __A :Any="<s>" , __A :Union[str, Any]="</s>" , __A :List[str]="<pad>" , __A :Optional[Any]="[SEP]" , __A :str="[MASK]" , __A :Optional[Any]="[CLS]" , **__A :Union[str, Any] , ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else unk_token SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token super().__init__( __A , tokenizer_file=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , **__A , ) SCREAMING_SNAKE_CASE__ = vocab_file SCREAMING_SNAKE_CASE__ = False if not self.vocab_file else True def _snake_case ( self :List[Any] , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [self.sep_token_id] SCREAMING_SNAKE_CASE__ = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _snake_case ( self :Optional[int] , __A :List[int] , __A :Optional[List[int]] = None , __A :bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( """You should not supply a second sequence if the provided sequence of """ """ids is already formatted with special tokens for the model.""" ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is None: return [1] + ([0] * len(__A )) + [1] return [1] + ([0] * len(__A )) + [1] + ([0] * len(__A )) + [1] def _snake_case ( self :Optional[Any] , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [self.sep_token_id] SCREAMING_SNAKE_CASE__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _snake_case ( self :int , __A :str , __A :Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(__A ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return SCREAMING_SNAKE_CASE__ = os.path.join( __A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ): copyfile(self.vocab_file , __A ) return (out_vocab_file,)
6
'''simple docstring''' import os from pickle import UnpicklingError from typing import Dict, Tuple import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict, unflatten_dict import transformers from .utils import logging a_ : List[Any] = logging.get_logger(__name__) def a_ ( __snake_case : Optional[int] , __snake_case : str , __snake_case : List[Any] , __snake_case : int=False ) -> List[str]: """simple docstring""" try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise if not is_sharded: lowerCamelCase_ =os.path.abspath(__snake_case ) logger.info(F'''Loading PyTorch weights from {pt_path}''' ) lowerCamelCase_ =torch.load(__snake_case , map_location='''cpu''' ) logger.info(F'''PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.''' ) lowerCamelCase_ =convert_pytorch_state_dict_to_flax(__snake_case , __snake_case ) else: # model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files lowerCamelCase_ =convert_pytorch_sharded_state_dict_to_flax(__snake_case , __snake_case ) return flax_state_dict def a_ ( __snake_case : Tuple[str] , __snake_case : np.ndarray , __snake_case : Dict[str, jnp.ndarray] , __snake_case : str , ) -> (Tuple[str], np.ndarray): """simple docstring""" def is_key_or_prefix_key_in_dict(__snake_case : Tuple[str] ) -> bool: return len(set(__snake_case ) & {key, (model_prefix,) + key} ) > 0 # layer norm lowerCamelCase_ =pt_tuple_key[:-1] + ('''scale''',) if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(__snake_case ): return renamed_pt_tuple_key, pt_tensor # batch norm layer mean lowerCamelCase_ =pt_tuple_key[:-1] + ('''mean''',) if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(__snake_case ): return renamed_pt_tuple_key, pt_tensor # batch norm layer var lowerCamelCase_ =pt_tuple_key[:-1] + ('''var''',) if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(__snake_case ): return renamed_pt_tuple_key, pt_tensor # embedding lowerCamelCase_ =pt_tuple_key[:-1] + ('''embedding''',) if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(__snake_case ): return renamed_pt_tuple_key, pt_tensor # conv layer lowerCamelCase_ =pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(__snake_case ): lowerCamelCase_ =pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowerCamelCase_ =pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(__snake_case ): lowerCamelCase_ =pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowerCamelCase_ =pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowerCamelCase_ =pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 lowerCamelCase_ =None if pt_tuple_key[-3::2] == ("parametrizations", "original0"): lowerCamelCase_ =pt_tuple_key[-2] + '''_g''' elif pt_tuple_key[-3::2] == ("parametrizations", "original1"): lowerCamelCase_ =pt_tuple_key[-2] + '''_v''' if name is not None: lowerCamelCase_ =pt_tuple_key[:-3] + (name,) return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def a_ ( __snake_case : Union[str, Any] , __snake_case : str ) -> str: """simple docstring""" # convert pytorch tensor to numpy lowerCamelCase_ ={k: v.numpy() for k, v in pt_state_dict.items()} lowerCamelCase_ =flax_model.base_model_prefix # use params dict if the model contains batch norm layers if "params" in flax_model.params: lowerCamelCase_ =flax_model.params['''params'''] else: lowerCamelCase_ =flax_model.params lowerCamelCase_ =flatten_dict(__snake_case ) # add batch_stats keys,values to dict if "batch_stats" in flax_model.params: lowerCamelCase_ =flatten_dict(flax_model.params['''batch_stats'''] ) random_flax_state_dict.update(__snake_case ) lowerCamelCase_ ={} lowerCamelCase_ =(model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) lowerCamelCase_ =(model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCamelCase_ =tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary lowerCamelCase_ =pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: lowerCamelCase_ =pt_tuple_key[1:] # Correctly rename weight parameters lowerCamelCase_, lowerCamelCase_ =rename_key_and_reshape_tensor( __snake_case , __snake_case , __snake_case , __snake_case ) # add model prefix if necessary lowerCamelCase_ =(model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: lowerCamelCase_ =(model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1] or "var" in flax_key[-1]: lowerCamelCase_ =jnp.asarray(__snake_case ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(__snake_case , __snake_case ) continue # also add unexpected weight so that warning is thrown lowerCamelCase_ =jnp.asarray(__snake_case ) else: # also add unexpected weight so that warning is thrown lowerCamelCase_ =jnp.asarray(__snake_case ) return unflatten_dict(__snake_case ) def a_ ( __snake_case : Union[str, Any] , __snake_case : List[Any] ) -> Optional[Any]: """simple docstring""" import torch # Load the index lowerCamelCase_ ={} for shard_file in shard_filenames: # load using msgpack utils lowerCamelCase_ =torch.load(__snake_case ) lowerCamelCase_ ={k: v.numpy() for k, v in pt_state_dict.items()} lowerCamelCase_ =flax_model.base_model_prefix # use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict if "batch_stats" in flax_model.params: lowerCamelCase_ =flax_model.params['''params'''] lowerCamelCase_ =flatten_dict(__snake_case ) random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) ) else: lowerCamelCase_ =flax_model.params lowerCamelCase_ =flatten_dict(__snake_case ) lowerCamelCase_ =(model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) lowerCamelCase_ =(model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCamelCase_ =tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary lowerCamelCase_ =pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: lowerCamelCase_ =pt_tuple_key[1:] # Correctly rename weight parameters lowerCamelCase_, lowerCamelCase_ =rename_key_and_reshape_tensor( __snake_case , __snake_case , __snake_case , __snake_case ) # add model prefix if necessary lowerCamelCase_ =(model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: lowerCamelCase_ =(model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1]: lowerCamelCase_ =jnp.asarray(__snake_case ) continue if "var" in flax_key[-1]: lowerCamelCase_ =jnp.asarray(__snake_case ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(__snake_case , __snake_case ) continue # also add unexpected weight so that warning is thrown lowerCamelCase_ =jnp.asarray(__snake_case ) else: # also add unexpected weight so that warning is thrown lowerCamelCase_ =jnp.asarray(__snake_case ) return unflatten_dict(__snake_case ) def a_ ( __snake_case : List[str] , __snake_case : Dict ) -> str: """simple docstring""" lowerCamelCase_ =os.path.abspath(__snake_case ) logger.info(F'''Loading Flax weights from {flax_checkpoint_path}''' ) # import correct flax class lowerCamelCase_ =getattr(__snake_case , '''Flax''' + model.__class__.__name__ ) # load flax weight dict with open(__snake_case , '''rb''' ) as state_f: try: lowerCamelCase_ =from_bytes(__snake_case , state_f.read() ) except UnpicklingError: raise EnvironmentError(F'''Unable to convert {flax_checkpoint_path} to Flax deserializable object. ''' ) return load_flax_weights_in_pytorch_model(__snake_case , __snake_case ) def a_ ( __snake_case : Optional[Any] , __snake_case : Union[str, Any] ) -> Optional[int]: """simple docstring""" try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise # check if we have bf16 weights lowerCamelCase_ =flatten_dict(jax.tree_util.tree_map(lambda __snake_case : x.dtype == jnp.bfloataa , __snake_case ) ).values() if any(__snake_case ): # convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( '''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` ''' '''before loading those in PyTorch model.''' ) lowerCamelCase_ =jax.tree_util.tree_map( lambda __snake_case : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __snake_case ) lowerCamelCase_ =flatten_dict(__snake_case ) lowerCamelCase_ =pt_model.state_dict() lowerCamelCase_ =(pt_model.base_model_prefix in flax_state) and ( pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) lowerCamelCase_ =(pt_model.base_model_prefix not in flax_state) and ( pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) # keep track of unexpected & missing keys lowerCamelCase_ =[] lowerCamelCase_ =set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): lowerCamelCase_ =flax_key_tuple[0] == pt_model.base_model_prefix lowerCamelCase_ ='''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict # adapt flax_key to prepare for loading from/to base model only if load_model_with_head_into_base_model and has_base_model_prefix: lowerCamelCase_ =flax_key_tuple[1:] elif load_base_model_into_model_with_head and require_base_model_prefix: lowerCamelCase_ =(pt_model.base_model_prefix,) + flax_key_tuple # rename flax weights to PyTorch format if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(__snake_case ) not in pt_model_dict: # conv layer lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',) lowerCamelCase_ =jnp.transpose(__snake_case , (3, 2, 0, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(__snake_case ) not in pt_model_dict: # linear layer lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',) lowerCamelCase_ =flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',) # adding batch stats from flax batch norm to pt elif "mean" in flax_key_tuple[-1]: lowerCamelCase_ =flax_key_tuple[:-1] + ('''running_mean''',) elif "var" in flax_key_tuple[-1]: lowerCamelCase_ =flax_key_tuple[:-1] + ('''running_var''',) if "batch_stats" in flax_state: lowerCamelCase_ ='''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header else: lowerCamelCase_ ='''.'''.join(__snake_case ) # We also need to look at `pt_model_dict` and see if there are keys requiring further transformation. lowerCamelCase_ ={} # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 for key in pt_model_dict: lowerCamelCase_ =key.split('''.''' ) lowerCamelCase_ =None if key_components[-3::2] == ["parametrizations", "original0"]: lowerCamelCase_ =key_components[-2] + '''_g''' elif key_components[-3::2] == ["parametrizations", "original1"]: lowerCamelCase_ =key_components[-2] + '''_v''' if name is not None: lowerCamelCase_ =key_components[:-3] + [name] lowerCamelCase_ ='''.'''.join(__snake_case ) lowerCamelCase_ =key if flax_key in special_pt_names: lowerCamelCase_ =special_pt_names[flax_key] if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( F'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected ''' F'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) else: # add weight to pytorch dict lowerCamelCase_ =np.asarray(__snake_case ) if not isinstance(__snake_case , np.ndarray ) else flax_tensor lowerCamelCase_ =torch.from_numpy(__snake_case ) # remove from missing keys missing_keys.remove(__snake_case ) else: # weight is not expected by PyTorch model unexpected_keys.append(__snake_case ) pt_model.load_state_dict(__snake_case ) # re-transform missing_keys to list lowerCamelCase_ =list(__snake_case ) if len(__snake_case ) > 0: logger.warning( '''Some weights of the Flax model were not used when initializing the PyTorch model''' F''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing''' F''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture''' ''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This''' F''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect''' ''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a''' ''' FlaxBertForSequenceClassification model).''' ) else: logger.warning(F'''All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n''' ) if len(__snake_case ) > 0: logger.warning( F'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly''' F''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to''' ''' use it for predictions and inference.''' ) else: logger.warning( F'''All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n''' '''If your task is similar to the task the model of the checkpoint was trained on, ''' F'''you can already use {pt_model.__class__.__name__} for predictions without further training.''' ) return pt_model
676
0
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging a = logging.get_logger(__name__) a = '''▁''' a = {'''vocab_file''': '''sentencepiece.bpe.model'''} a = { '''vocab_file''': { '''facebook/nllb-200-distilled-600M''': ( '''https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model''' ), } } a = { '''facebook/nllb-200-distilled-600M''': 1_024, } # fmt: off a = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn'''] class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : List[Any] = VOCAB_FILES_NAMES UpperCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase : Optional[Any] = ['''input_ids''', '''attention_mask'''] UpperCAmelCase : List[int] = [] UpperCAmelCase : List[int] = [] def __init__( self : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int="<s>" , _UpperCAmelCase : Union[str, Any]="</s>" , _UpperCAmelCase : Any="</s>" , _UpperCAmelCase : str="<s>" , _UpperCAmelCase : int="<unk>" , _UpperCAmelCase : Optional[Any]="<pad>" , _UpperCAmelCase : Dict="<mask>" , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Optional[Dict[str, Any]] = None , _UpperCAmelCase : Any=None , _UpperCAmelCase : Tuple=False , **_UpperCAmelCase : Tuple , ): # Mask token behave like a normal word, i.e. include the space before it _A = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token _A = {} if sp_model_kwargs is None else sp_model_kwargs _A = legacy_behaviour super().__init__( bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , src_lang=_UpperCAmelCase , tgt_lang=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=_UpperCAmelCase , **_UpperCAmelCase , ) _A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_UpperCAmelCase ) ) _A = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' # spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s' # Mimic fairseq token-to-id alignment for the first 4 token _A = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab _A = 1 _A = len(self.sp_model ) _A = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_UpperCAmelCase ) } _A = {v: k for k, v in self.lang_code_to_id.items()} _A = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) _A = {v: k for k, v in self.fairseq_tokens_to_ids.items()} _A = list(self.lang_code_to_id.keys() ) if additional_special_tokens is not None: # Only add those special tokens if they are not already there. self._additional_special_tokens.extend( [t for t in additional_special_tokens if t not in self._additional_special_tokens] ) _A = src_lang if src_lang is not None else 'eng_Latn' _A = self.lang_code_to_id[self._src_lang] _A = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self : List[Any] ): _A = self.__dict__.copy() _A = None _A = self.sp_model.serialized_model_proto() return state def __setstate__( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] ): _A = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): _A = {} _A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) @property def lowerCAmelCase_ ( self : Optional[int] ): return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def lowerCAmelCase_ ( self : Union[str, Any] ): return self._src_lang @src_lang.setter def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : str ): _A = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase ) _A = [1] * len(self.prefix_tokens ) _A = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(_UpperCAmelCase )) + suffix_ones return prefix_ones + ([0] * len(_UpperCAmelCase )) + ([0] * len(_UpperCAmelCase )) + suffix_ones def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ): if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ): _A = [self.sep_token_id] _A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] , _UpperCAmelCase : Optional[str] , **_UpperCAmelCase : Union[str, Any] ): if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' ) _A = src_lang _A = self(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ) _A = self.convert_tokens_to_ids(_UpperCAmelCase ) _A = tgt_lang_id return inputs def lowerCAmelCase_ ( self : List[Any] ): _A = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : str ): return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase ) def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : Union[str, Any] ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] _A = self.sp_model.PieceToId(_UpperCAmelCase ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : List[Any] ): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Dict ): _A = ''.join(_UpperCAmelCase ).replace(_UpperCAmelCase , ' ' ).strip() return out_string def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ): if not os.path.isdir(_UpperCAmelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return _A = os.path.join( _UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(_UpperCAmelCase , 'wb' ) as fi: _A = self.sp_model.serialized_model_proto() fi.write(_UpperCAmelCase ) return (out_vocab_file,) def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : str = "eng_Latn" , _UpperCAmelCase : Optional[List[str]] = None , _UpperCAmelCase : str = "fra_Latn" , **_UpperCAmelCase : str , ): _A = src_lang _A = tgt_lang return super().prepare_seqaseq_batch(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) def lowerCAmelCase_ ( self : Optional[Any] ): return self.set_src_lang_special_tokens(self.src_lang ) def lowerCAmelCase_ ( self : int ): return self.set_tgt_lang_special_tokens(self.tgt_lang ) def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Tuple ): _A = self.lang_code_to_id[src_lang] if self.legacy_behaviour: _A = [] _A = [self.eos_token_id, self.cur_lang_code] else: _A = [self.cur_lang_code] _A = [self.eos_token_id] def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : str ): _A = self.lang_code_to_id[lang] if self.legacy_behaviour: _A = [] _A = [self.eos_token_id, self.cur_lang_code] else: _A = [self.cur_lang_code] _A = [self.eos_token_id]
7
'''simple docstring''' def a_ ( __snake_case : str , __snake_case : str ) -> str: """simple docstring""" lowerCamelCase_ =len(__snake_case ) lowerCamelCase_ =len(__snake_case ) lowerCamelCase_ =( first_str_length if first_str_length > second_str_length else second_str_length ) lowerCamelCase_ =[] for char_count in range(__snake_case ): if char_count < first_str_length: output_list.append(first_str[char_count] ) if char_count < second_str_length: output_list.append(second_str[char_count] ) return "".join(__snake_case ) if __name__ == "__main__": print(alternative_string_arrange("""AB""", """XYZ"""), end=""" """)
676
0
'''simple docstring''' import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class SCREAMING_SNAKE_CASE : def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=16 , _UpperCAmelCase=[1, 2, 1] , _UpperCAmelCase=[2, 2, 4] , _UpperCAmelCase=2 , _UpperCAmelCase=2.0 , _UpperCAmelCase=True , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.1 , _UpperCAmelCase="gelu" , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-5 , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=10 , _UpperCAmelCase=8 , _UpperCAmelCase=["stage1", "stage2", "stage3"] , _UpperCAmelCase=[1, 2, 3] , ): '''simple docstring''' __A : Union[str, Any] = parent __A : Dict = batch_size __A : Any = image_size __A : List[str] = patch_size __A : List[str] = num_channels __A : Any = embed_dim __A : Dict = depths __A : List[Any] = num_heads __A : str = window_size __A : Union[str, Any] = mlp_ratio __A : str = qkv_bias __A : Dict = hidden_dropout_prob __A : Tuple = attention_probs_dropout_prob __A : int = drop_path_rate __A : str = hidden_act __A : str = use_absolute_embeddings __A : str = patch_norm __A : Dict = layer_norm_eps __A : List[str] = initializer_range __A : str = is_training __A : Union[str, Any] = scope __A : int = use_labels __A : Any = type_sequence_label_size __A : List[str] = encoder_stride __A : str = out_features __A : int = out_indices def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) __A : Optional[int] = None if self.use_labels: __A : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size) __A : Union[str, Any] = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' __A : Any = MaskFormerSwinModel(config=_UpperCAmelCase) model.to(_UpperCAmelCase) model.eval() __A : Any = model(_UpperCAmelCase) __A : Optional[int] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1)) __A : List[str] = int(config.embed_dim * 2 ** (len(config.depths) - 1)) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim)) def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' __A : Any = MaskFormerSwinBackbone(config=_UpperCAmelCase) model.to(_UpperCAmelCase) model.eval() __A : List[Any] = model(_UpperCAmelCase) # verify feature maps self.parent.assertEqual(len(result.feature_maps) , len(config.out_features)) self.parent.assertListEqual(list(result.feature_maps[0].shape) , [13, 16, 16, 16]) # verify channels self.parent.assertEqual(len(model.channels) , len(config.out_features)) self.parent.assertListEqual(model.channels , [16, 32, 64]) # verify ValueError with self.parent.assertRaises(_UpperCAmelCase): __A : Optional[int] = ['stem'] __A : Dict = MaskFormerSwinBackbone(config=_UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Dict = self.prepare_config_and_inputs() __A ,__A ,__A : int = config_and_inputs __A : Tuple = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE (a__ , a__ , unittest.TestCase ): lowerCAmelCase = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) lowerCAmelCase = {'''feature-extraction''': MaskFormerSwinModel} if is_torch_available() else {} lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False lowerCAmelCase = False def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Tuple = MaskFormerSwinModelTester(self) __A : Union[str, Any] = ConfigTester(self , config_class=_UpperCAmelCase , embed_dim=37) @require_torch_multi_gpu @unittest.skip( reason=( '`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with' ' `nn.DataParallel`' )) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' return def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_UpperCAmelCase) @unittest.skip('Swin does not use inputs_embeds') def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' pass @unittest.skip('Swin does not support feedforward chunking') def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A ,__A : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __A : str = model_class(_UpperCAmelCase) self.assertIsInstance(model.get_input_embeddings() , (nn.Module)) __A : Any = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear)) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A ,__A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __A : str = model_class(_UpperCAmelCase) __A : Union[str, Any] = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic __A : str = [*signature.parameters.keys()] __A : Tuple = ['pixel_values'] self.assertListEqual(arg_names[:1] , _UpperCAmelCase) @unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions') def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' pass @unittest.skip(reason='MaskFormerSwin is only used as an internal backbone') def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase): '''simple docstring''' __A : int = model_class(_UpperCAmelCase) model.to(_UpperCAmelCase) model.eval() with torch.no_grad(): __A : Optional[Any] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase)) __A : Optional[int] = outputs.hidden_states __A : Union[str, Any] = getattr( self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths) + 1) self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase) # Swin has a different seq_length __A : int = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable) else (config.patch_size, config.patch_size) ) __A : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , ) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A ,__A : Any = self.model_tester.prepare_config_and_inputs_for_common() __A : Tuple = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: __A : List[Any] = True self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __A : List[str] = True self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A ,__A : Any = self.model_tester.prepare_config_and_inputs_for_common() __A : Optional[Any] = 3 __A : List[str] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) __A : Any = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable) else (config.patch_size, config.patch_size) ) __A : Optional[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) __A : Union[str, Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: __A : Optional[int] = True self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width)) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __A : Optional[int] = True self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width)) @unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints') def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' pass @unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin') def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' pass @unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin') def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A ,__A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(_UpperCAmelCase): __A : Any = 0 return t def check_equivalence(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase={}): with torch.no_grad(): __A : Dict = model(**_UpperCAmelCase , return_dict=_UpperCAmelCase , **_UpperCAmelCase) __A : Union[str, Any] = model(**_UpperCAmelCase , return_dict=_UpperCAmelCase , **_UpperCAmelCase).to_tuple() def recursive_check(_UpperCAmelCase , _UpperCAmelCase): if isinstance(_UpperCAmelCase , (List, Tuple)): for tuple_iterable_value, dict_iterable_value in zip(_UpperCAmelCase , _UpperCAmelCase): recursive_check(_UpperCAmelCase , _UpperCAmelCase) elif isinstance(_UpperCAmelCase , _UpperCAmelCase): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values()): recursive_check(_UpperCAmelCase , _UpperCAmelCase) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(_UpperCAmelCase) , set_nan_tensor_to_zero(_UpperCAmelCase) , atol=1e-5) , msg=( 'Tuple and dict output are not equal. Difference:' F' {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:' F' {torch.isnan(_UpperCAmelCase).any()} and `inf`: {torch.isinf(_UpperCAmelCase)}. Dict has' F' `nan`: {torch.isnan(_UpperCAmelCase).any()} and `inf`: {torch.isinf(_UpperCAmelCase)}.' ) , ) recursive_check(_UpperCAmelCase , _UpperCAmelCase) for model_class in self.all_model_classes: __A : Tuple = model_class(_UpperCAmelCase) model.to(_UpperCAmelCase) model.eval() __A : Tuple = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase) __A : List[str] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase) check_equivalence(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) __A : int = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase) __A : Dict = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase) check_equivalence(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) __A : Any = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase) __A : Any = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase) check_equivalence(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , {'output_hidden_states': True}) __A : List[str] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase) __A : str = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase) check_equivalence(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , {'output_hidden_states': True}) @require_torch class SCREAMING_SNAKE_CASE (unittest.TestCase , a__ ): lowerCAmelCase = (MaskFormerSwinBackbone,) if is_torch_available() else () lowerCAmelCase = MaskFormerSwinConfig def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Optional[int] = MaskFormerSwinModelTester(self) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A ,__A : str = self.model_tester.prepare_config_and_inputs_for_common() __A : Tuple = inputs_dict['pixel_values'].shape[0] for backbone_class in self.all_model_classes: __A : Optional[int] = backbone_class(_UpperCAmelCase) backbone.to(_UpperCAmelCase) backbone.eval() __A : Tuple = backbone(**_UpperCAmelCase) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , _UpperCAmelCase) self.assertTrue(len(outputs.feature_maps) == len(backbone.channels)) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels)) self.assertIsNone(outputs.hidden_states) self.assertIsNone(outputs.attentions) # Test output_hidden_states=True __A : Dict = backbone(**_UpperCAmelCase , output_hidden_states=_UpperCAmelCase) self.assertIsNotNone(outputs.hidden_states) self.assertTrue(len(outputs.hidden_states) , len(backbone.stage_names)) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) __A ,__A ,__A : List[Any] = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels)) # Test output_attentions=True if self.has_attentions: __A : Union[str, Any] = backbone(**_UpperCAmelCase , output_attentions=_UpperCAmelCase) self.assertIsNotNone(outputs.attentions)
8
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ : Any = {"""configuration_timm_backbone""": ["""TimmBackboneConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Optional[Any] = ["""TimmBackbone"""] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys a_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
676
0
def A ( __UpperCamelCase ) -> int: A__ = abs(__UpperCamelCase ) A__ = 0 while n > 0: res += n % 10 n //= 10 return res def A ( __UpperCamelCase ) -> int: A__ = abs(__UpperCamelCase ) return n if n < 10 else n % 10 + sum_of_digits(n // 10 ) def A ( __UpperCamelCase ) -> int: return sum(int(__UpperCamelCase ) for c in str(abs(__UpperCamelCase ) ) ) def A ( ) -> None: from collections.abc import Callable from timeit import timeit def benchmark_a_function(__UpperCamelCase , __UpperCamelCase ) -> None: A__ = f'''{func.__name__}({value})''' A__ = timeit(f'''__main__.{call}''' , setup='import __main__' ) print(f'''{call:56} = {func(__UpperCamelCase )} -- {timing:.4f} seconds''' ) for value in (262_144, 1_125_899_906_842_624, 1_267_650_600_228_229_401_496_703_205_376): for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact): benchmark_a_function(__UpperCamelCase , __UpperCamelCase ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
9
'''simple docstring''' import functools def a_ ( __snake_case : str , __snake_case : str ) -> int: """simple docstring""" lowerCamelCase_ =len(__snake_case ) lowerCamelCase_ =len(__snake_case ) @functools.cache def min_distance(__snake_case : int , __snake_case : int ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa lowerCamelCase_ =int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , __snake_case ) , 1 + min_distance(__snake_case , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
676
0
import os import unittest from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer from transformers.testing_utils import require_jieba, tooslow from ...test_tokenization_common import TokenizerTesterMixin @require_jieba class lowerCAmelCase_ ( __lowercase, unittest.TestCase ): UpperCAmelCase = CpmAntTokenizer UpperCAmelCase = False def UpperCamelCase_ ( self : List[str] ): super().setUp() _UpperCamelCase = [ '''<d>''', '''</d>''', '''<s>''', '''</s>''', '''</_>''', '''<unk>''', '''<pad>''', '''</n>''', '''我''', '''是''', '''C''', '''P''', '''M''', '''A''', '''n''', '''t''', ] _UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) @tooslow def UpperCamelCase_ ( self : Dict ): _UpperCamelCase = CpmAntTokenizer.from_pretrained('''openbmb/cpm-ant-10b''' ) _UpperCamelCase = '''今天天气真好!''' _UpperCamelCase = ['''今天''', '''天气''', '''真''', '''好''', '''!'''] _UpperCamelCase = tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) _UpperCamelCase = '''今天天气真好!''' _UpperCamelCase = [tokenizer.bos_token] + tokens _UpperCamelCase = [6, 9802, 1_4962, 2082, 831, 244] self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , _A ) _UpperCamelCase = tokenizer.decode(_A ) self.assertEqual(_A , _A )
10
'''simple docstring''' def a_ ( __snake_case : int ) -> bool: """simple docstring""" if not isinstance(__snake_case , __snake_case ): lowerCamelCase_ =F'''Input value of [number={number}] must be an integer''' raise TypeError(__snake_case ) if number < 0: return False lowerCamelCase_ =number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
676
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) lowercase_ = { "configuration_blip": [ "BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "BlipConfig", "BlipTextConfig", "BlipVisionConfig", ], "processing_blip": ["BlipProcessor"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["BlipImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ "BLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "BlipModel", "BlipPreTrainedModel", "BlipForConditionalGeneration", "BlipForQuestionAnswering", "BlipVisionModel", "BlipTextModel", "BlipForImageTextRetrieval", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ "TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "TFBlipModel", "TFBlipPreTrainedModel", "TFBlipForConditionalGeneration", "TFBlipForQuestionAnswering", "TFBlipVisionModel", "TFBlipTextModel", "TFBlipForImageTextRetrieval", ] if TYPE_CHECKING: from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig from .processing_blip import BlipProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_blip import BlipImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip import ( BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, BlipModel, BlipPreTrainedModel, BlipTextModel, BlipVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blip import ( TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFBlipForConditionalGeneration, TFBlipForImageTextRetrieval, TFBlipForQuestionAnswering, TFBlipModel, TFBlipPreTrainedModel, TFBlipTextModel, TFBlipVisionModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
11
'''simple docstring''' from __future__ import annotations a_ : int = list[list[int]] # assigning initial values to the grid a_ : Matrix = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution a_ : Matrix = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def a_ ( __snake_case : Matrix , __snake_case : int , __snake_case : int , __snake_case : int ) -> bool: """simple docstring""" for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def a_ ( __snake_case : Matrix ) -> tuple[int, int] | None: """simple docstring""" for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def a_ ( __snake_case : Matrix ) -> Matrix | None: """simple docstring""" if location := find_empty_location(__snake_case ): lowerCamelCase_, lowerCamelCase_ =location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(__snake_case , __snake_case , __snake_case , __snake_case ): lowerCamelCase_ =digit if sudoku(__snake_case ) is not None: return grid lowerCamelCase_ =0 return None def a_ ( __snake_case : Matrix ) -> None: """simple docstring""" for row in grid: for cell in row: print(__snake_case , end=''' ''' ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print("""\nExample grid:\n""" + """=""" * 20) print_solution(example_grid) print("""\nExample grid solution:""") a_ : Union[str, Any] = sudoku(example_grid) if solution is not None: print_solution(solution) else: print("""Cannot find a solution.""")
676
0
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _snake_case ( unittest.TestCase ): def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=2_24 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=4_00 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5] , ): '''simple docstring''' lowercase__ : List[str] = size if size is not None else {"""height""": 18, """width""": 18} lowercase__ : int = parent lowercase__ : Union[str, Any] = batch_size lowercase__ : List[str] = num_channels lowercase__ : str = image_size lowercase__ : int = min_resolution lowercase__ : Dict = max_resolution lowercase__ : Tuple = do_resize lowercase__ : Union[str, Any] = size lowercase__ : Any = do_normalize lowercase__ : Tuple = image_mean lowercase__ : str = image_std def lowercase__ ( self): '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class _snake_case ( UpperCAmelCase_ , unittest.TestCase ): __lowerCAmelCase : Optional[Any] = ViTImageProcessor if is_vision_available() else None def lowercase__ ( self): '''simple docstring''' lowercase__ : str = EfficientFormerImageProcessorTester(self) @property def lowercase__ ( self): '''simple docstring''' return self.image_proc_tester.prepare_image_processor_dict() def lowercase__ ( self): '''simple docstring''' lowercase__ : Any = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """image_mean""")) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """image_std""")) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_normalize""")) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_resize""")) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """size""")) def lowercase__ ( self): '''simple docstring''' pass def lowercase__ ( self): '''simple docstring''' lowercase__ : str = self.image_processing_class(**self.image_processor_dict) # create random PIL images lowercase__ : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=SCREAMING_SNAKE_CASE_) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image) # Test not batched input lowercase__ : int = image_processor(image_inputs[0] , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched lowercase__ : str = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) def lowercase__ ( self): '''simple docstring''' lowercase__ : Tuple = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors lowercase__ : str = prepare_image_inputs(self.image_proc_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , numpify=SCREAMING_SNAKE_CASE_) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray) # Test not batched input lowercase__ : Optional[int] = image_processor(image_inputs[0] , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched lowercase__ : Dict = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) def lowercase__ ( self): '''simple docstring''' lowercase__ : List[str] = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors lowercase__ : Dict = prepare_image_inputs(self.image_proc_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor) # Test not batched input lowercase__ : int = image_processor(image_inputs[0] , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched lowercase__ : Any = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , )
12
'''simple docstring''' from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : Union[str, Any] = logging.get_logger(__name__) a_ : Tuple = { """huggingface/informer-tourism-monthly""": ( """https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json""" ), # See all Informer models at https://huggingface.co/models?filter=informer } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Union[str, Any] ='informer' lowercase : Union[str, Any] ={ 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', 'num_hidden_layers': 'encoder_layers', } def __init__( self, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = "student_t", lowerCAmelCase = "nll", lowerCAmelCase = 1, lowerCAmelCase = None, lowerCAmelCase = "mean", lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = 64, lowerCAmelCase = 32, lowerCAmelCase = 32, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = True, lowerCAmelCase = "gelu", lowerCAmelCase = 0.0_5, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 100, lowerCAmelCase = 0.0_2, lowerCAmelCase=True, lowerCAmelCase = "prob", lowerCAmelCase = 5, lowerCAmelCase = True, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =prediction_length lowerCamelCase_ =context_length or prediction_length lowerCamelCase_ =distribution_output lowerCamelCase_ =loss lowerCamelCase_ =input_size lowerCamelCase_ =num_time_features lowerCamelCase_ =lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7] lowerCamelCase_ =scaling lowerCamelCase_ =num_dynamic_real_features lowerCamelCase_ =num_static_real_features lowerCamelCase_ =num_static_categorical_features # set cardinality if cardinality and num_static_categorical_features > 0: if len(lowerCAmelCase ) != num_static_categorical_features: raise ValueError( '''The cardinality should be a list of the same length as `num_static_categorical_features`''' ) lowerCamelCase_ =cardinality else: lowerCamelCase_ =[0] # set embedding_dimension if embedding_dimension and num_static_categorical_features > 0: if len(lowerCAmelCase ) != num_static_categorical_features: raise ValueError( '''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' ) lowerCamelCase_ =embedding_dimension else: lowerCamelCase_ =[min(50, (cat + 1) // 2 ) for cat in self.cardinality] lowerCamelCase_ =num_parallel_samples # Transformer architecture configuration lowerCamelCase_ =input_size * len(self.lags_sequence ) + self._number_of_features lowerCamelCase_ =d_model lowerCamelCase_ =encoder_attention_heads lowerCamelCase_ =decoder_attention_heads lowerCamelCase_ =encoder_ffn_dim lowerCamelCase_ =decoder_ffn_dim lowerCamelCase_ =encoder_layers lowerCamelCase_ =decoder_layers lowerCamelCase_ =dropout lowerCamelCase_ =attention_dropout lowerCamelCase_ =activation_dropout lowerCamelCase_ =encoder_layerdrop lowerCamelCase_ =decoder_layerdrop lowerCamelCase_ =activation_function lowerCamelCase_ =init_std lowerCamelCase_ =use_cache # Informer lowerCamelCase_ =attention_type lowerCamelCase_ =sampling_factor lowerCamelCase_ =distil super().__init__(is_encoder_decoder=lowerCAmelCase, **lowerCAmelCase ) @property def lowercase__ ( self ): """simple docstring""" return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
676
0
'''simple docstring''' # coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import platform import sys A__ : List[str] = """3""" print("""Python version:""", sys.version) print("""OS platform:""", platform.platform()) print("""OS architecture:""", platform.machine()) try: import torch print("""Torch version:""", torch.__version__) print("""Cuda available:""", torch.cuda.is_available()) print("""Cuda version:""", torch.version.cuda) print("""CuDNN version:""", torch.backends.cudnn.version()) print("""Number of GPUs available:""", torch.cuda.device_count()) except ImportError: print("""Torch version:""", None) try: import transformers print("""transformers version:""", transformers.__version__) except ImportError: print("""transformers version:""", None)
13
'''simple docstring''' from __future__ import annotations def a_ ( __snake_case : int ) -> list[int]: """simple docstring""" lowerCamelCase_ =[True] * limit lowerCamelCase_ =False lowerCamelCase_ =False lowerCamelCase_ =True for i in range(3 , int(limit**0.5 + 1 ) , 2 ): lowerCamelCase_ =i * 2 while index < limit: lowerCamelCase_ =False lowerCamelCase_ =index + i lowerCamelCase_ =[2] for i in range(3 , __snake_case , 2 ): if is_prime[i]: primes.append(__snake_case ) return primes def a_ ( __snake_case : int = 100_0000 ) -> int: """simple docstring""" lowerCamelCase_ =prime_sieve(__snake_case ) lowerCamelCase_ =0 lowerCamelCase_ =0 for i in range(len(__snake_case ) ): for j in range(i + length , len(__snake_case ) ): lowerCamelCase_ =sum(primes[i:j] ) if sol >= ceiling: break if sol in primes: lowerCamelCase_ =j - i lowerCamelCase_ =sol return largest if __name__ == "__main__": print(F"""{solution() = }""")
676
0
import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, AutoConfig, AutoImageProcessor, CLIPConfig, CLIPImageProcessor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils''')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_image_processing import CustomImageProcessor # noqa E402 class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" def __lowercase ( self ) -> List[Any]: _a : int = 0 def __lowercase ( self ) -> List[str]: _a : Dict = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' ) self.assertIsInstance(_a , _a ) def __lowercase ( self ) -> Union[str, Any]: with tempfile.TemporaryDirectory() as tmpdirname: _a : Tuple = Path(_a ) / '''preprocessor_config.json''' _a : Optional[Any] = Path(_a ) / '''config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) ) _a : List[str] = AutoImageProcessor.from_pretrained(_a ) self.assertIsInstance(_a , _a ) def __lowercase ( self ) -> Optional[Any]: # Ensure we can load the image processor from the feature extractor config with tempfile.TemporaryDirectory() as tmpdirname: _a : Optional[int] = Path(_a ) / '''preprocessor_config.json''' _a : Any = Path(_a ) / '''config.json''' json.dump( {'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) ) _a : Optional[Any] = AutoImageProcessor.from_pretrained(_a ) self.assertIsInstance(_a , _a ) def __lowercase ( self ) -> Optional[Any]: with tempfile.TemporaryDirectory() as tmpdirname: _a : Dict = CLIPConfig() # Create a dummy config file with image_proceesor_type _a : Tuple = Path(_a ) / '''preprocessor_config.json''' _a : List[str] = Path(_a ) / '''config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) ) # remove image_processor_type to make sure config.json alone is enough to load image processor locally _a : Tuple = AutoImageProcessor.from_pretrained(_a ).to_dict() config_dict.pop('''image_processor_type''' ) _a : Tuple = CLIPImageProcessor(**_a ) # save in new folder model_config.save_pretrained(_a ) config.save_pretrained(_a ) _a : List[str] = AutoImageProcessor.from_pretrained(_a ) # make sure private variable is not incorrectly saved _a : Optional[int] = json.loads(config.to_json_string() ) self.assertTrue('''_processor_class''' not in dict_as_saved ) self.assertIsInstance(_a , _a ) def __lowercase ( self ) -> Dict: with tempfile.TemporaryDirectory() as tmpdirname: _a : Optional[int] = Path(_a ) / '''preprocessor_config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , ) _a : List[str] = AutoImageProcessor.from_pretrained(_a ) self.assertIsInstance(_a , _a ) def __lowercase ( self ) -> Any: with self.assertRaisesRegex( _a , '''clip-base is not a local folder and is not a valid model identifier''' ): _a : Dict = AutoImageProcessor.from_pretrained('''clip-base''' ) def __lowercase ( self ) -> List[Any]: with self.assertRaisesRegex( _a , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): _a : List[str] = AutoImageProcessor.from_pretrained(_a , revision='''aaaaaa''' ) def __lowercase ( self ) -> Dict: with self.assertRaisesRegex( _a , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ): _a : Optional[int] = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' ) def __lowercase ( self ) -> Union[str, Any]: # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(_a ): _a : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(_a ): _a : Optional[Any] = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a ) _a : Union[str, Any] = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) # Test image processor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(_a ) _a : Optional[Any] = AutoImageProcessor.from_pretrained(_a , trust_remote_code=_a ) self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' ) def __lowercase ( self ) -> Dict: try: AutoConfig.register('''custom''' , _a ) AutoImageProcessor.register(_a , _a ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_a ): AutoImageProcessor.register(_a , _a ) with tempfile.TemporaryDirectory() as tmpdirname: _a : int = Path(_a ) / '''preprocessor_config.json''' _a : int = Path(_a ) / '''config.json''' json.dump( {'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) ) _a : int = CustomImageProcessor.from_pretrained(_a ) # Now that the config is registered, it can be used as any other config with the auto-API with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(_a ) _a : Optional[Any] = AutoImageProcessor.from_pretrained(_a ) self.assertIsInstance(_a , _a ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] def __lowercase ( self ) -> Union[str, Any]: class UpperCAmelCase_ ( __lowercase ): """simple docstring""" UpperCAmelCase__ : Tuple = True try: AutoConfig.register('''custom''' , _a ) AutoImageProcessor.register(_a , _a ) # If remote code is not set, the default is to use local _a : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) self.assertTrue(image_processor.is_local ) # If remote code is disabled, we load the local one. _a : int = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) self.assertTrue(image_processor.is_local ) # If remote is enabled, we load from the Hub _a : Dict = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) self.assertTrue(not hasattr(_a , '''is_local''' ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
14
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" super().__init__() # make sure scheduler can always be converted to DDIM lowerCamelCase_ =DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=lowerCAmelCase, scheduler=lowerCAmelCase ) @torch.no_grad() def __call__( self, lowerCAmelCase = 1, lowerCAmelCase = None, lowerCAmelCase = 0.0, lowerCAmelCase = 50, lowerCAmelCase = None, lowerCAmelCase = "pil", lowerCAmelCase = True, ): """simple docstring""" if isinstance(self.unet.config.sample_size, lowerCAmelCase ): lowerCamelCase_ =( batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size, ) else: lowerCamelCase_ =(batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) if isinstance(lowerCAmelCase, lowerCAmelCase ) and len(lowerCAmelCase ) != batch_size: raise ValueError( f'''You have passed a list of generators of length {len(lowerCAmelCase )}, but requested an effective batch''' f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) lowerCamelCase_ =randn_tensor(lowerCAmelCase, generator=lowerCAmelCase, device=self.device, dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(lowerCAmelCase ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output lowerCamelCase_ =self.unet(lowerCAmelCase, lowerCAmelCase ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 lowerCamelCase_ =self.scheduler.step( lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, eta=lowerCAmelCase, use_clipped_model_output=lowerCAmelCase, generator=lowerCAmelCase ).prev_sample lowerCamelCase_ =(image / 2 + 0.5).clamp(0, 1 ) lowerCamelCase_ =image.cpu().permute(0, 2, 3, 1 ).numpy() if output_type == "pil": lowerCamelCase_ =self.numpy_to_pil(lowerCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowerCAmelCase )
676
0
from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import ScoreSdeVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class A ( UpperCAmelCase__ ): '''simple docstring''' A__ = 42 A__ = 42 def __init__(self : Optional[Any] , _UpperCAmelCase : UNetaDModel , _UpperCAmelCase : ScoreSdeVeScheduler ) -> Union[str, Any]: """simple docstring""" super().__init__() self.register_modules(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase ) @torch.no_grad() def __call__(self : List[Any] , _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = 2000 , _UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _UpperCAmelCase : Optional[str] = "pil" , _UpperCAmelCase : bool = True , **_UpperCAmelCase : Any , ) -> Union[ImagePipelineOutput, Tuple]: """simple docstring""" lowercase__ = self.unet.config.sample_size lowercase__ = (batch_size, 3, img_size, img_size) lowercase__ = self.unet lowercase__ = randn_tensor(_UpperCAmelCase , generator=_UpperCAmelCase ) * self.scheduler.init_noise_sigma lowercase__ = sample.to(self.device ) self.scheduler.set_timesteps(_UpperCAmelCase ) self.scheduler.set_sigmas(_UpperCAmelCase ) for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): lowercase__ = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device ) # correction step for _ in range(self.scheduler.config.correct_steps ): lowercase__ = self.unet(_UpperCAmelCase , _UpperCAmelCase ).sample lowercase__ = self.scheduler.step_correct(_UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample # prediction step lowercase__ = model(_UpperCAmelCase , _UpperCAmelCase ).sample lowercase__ = self.scheduler.step_pred(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ) lowercase__ , lowercase__ = output.prev_sample, output.prev_sample_mean lowercase__ = sample_mean.clamp(0 , 1 ) lowercase__ = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowercase__ = self.numpy_to_pil(_UpperCAmelCase ) if not return_dict: return (sample,) return ImagePipelineOutput(images=_UpperCAmelCase )
15
'''simple docstring''' from maths.prime_check import is_prime def a_ ( __snake_case : int ) -> int: """simple docstring""" if not isinstance(__snake_case , __snake_case ): lowerCamelCase_ =F'''Input value of [number={number}] must be an integer''' raise TypeError(__snake_case ) if is_prime(__snake_case ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
676
0
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool __A : Tuple = { 'Acehnese Arabic': 'ace_Arab', 'Acehnese Latin': 'ace_Latn', 'Mesopotamian Arabic': 'acm_Arab', 'Ta\'izzi-Adeni Arabic': 'acq_Arab', 'Tunisian Arabic': 'aeb_Arab', 'Afrikaans': 'afr_Latn', 'South Levantine Arabic': 'ajp_Arab', 'Akan': 'aka_Latn', 'Amharic': 'amh_Ethi', 'North Levantine Arabic': 'apc_Arab', 'Modern Standard Arabic': 'arb_Arab', 'Modern Standard Arabic Romanized': 'arb_Latn', 'Najdi Arabic': 'ars_Arab', 'Moroccan Arabic': 'ary_Arab', 'Egyptian Arabic': 'arz_Arab', 'Assamese': 'asm_Beng', 'Asturian': 'ast_Latn', 'Awadhi': 'awa_Deva', 'Central Aymara': 'ayr_Latn', 'South Azerbaijani': 'azb_Arab', 'North Azerbaijani': 'azj_Latn', 'Bashkir': 'bak_Cyrl', 'Bambara': 'bam_Latn', 'Balinese': 'ban_Latn', 'Belarusian': 'bel_Cyrl', 'Bemba': 'bem_Latn', 'Bengali': 'ben_Beng', 'Bhojpuri': 'bho_Deva', 'Banjar Arabic': 'bjn_Arab', 'Banjar Latin': 'bjn_Latn', 'Standard Tibetan': 'bod_Tibt', 'Bosnian': 'bos_Latn', 'Buginese': 'bug_Latn', 'Bulgarian': 'bul_Cyrl', 'Catalan': 'cat_Latn', 'Cebuano': 'ceb_Latn', 'Czech': 'ces_Latn', 'Chokwe': 'cjk_Latn', 'Central Kurdish': 'ckb_Arab', 'Crimean Tatar': 'crh_Latn', 'Welsh': 'cym_Latn', 'Danish': 'dan_Latn', 'German': 'deu_Latn', 'Southwestern Dinka': 'dik_Latn', 'Dyula': 'dyu_Latn', 'Dzongkha': 'dzo_Tibt', 'Greek': 'ell_Grek', 'English': 'eng_Latn', 'Esperanto': 'epo_Latn', 'Estonian': 'est_Latn', 'Basque': 'eus_Latn', 'Ewe': 'ewe_Latn', 'Faroese': 'fao_Latn', 'Fijian': 'fij_Latn', 'Finnish': 'fin_Latn', 'Fon': 'fon_Latn', 'French': 'fra_Latn', 'Friulian': 'fur_Latn', 'Nigerian Fulfulde': 'fuv_Latn', 'Scottish Gaelic': 'gla_Latn', 'Irish': 'gle_Latn', 'Galician': 'glg_Latn', 'Guarani': 'grn_Latn', 'Gujarati': 'guj_Gujr', 'Haitian Creole': 'hat_Latn', 'Hausa': 'hau_Latn', 'Hebrew': 'heb_Hebr', 'Hindi': 'hin_Deva', 'Chhattisgarhi': 'hne_Deva', 'Croatian': 'hrv_Latn', 'Hungarian': 'hun_Latn', 'Armenian': 'hye_Armn', 'Igbo': 'ibo_Latn', 'Ilocano': 'ilo_Latn', 'Indonesian': 'ind_Latn', 'Icelandic': 'isl_Latn', 'Italian': 'ita_Latn', 'Javanese': 'jav_Latn', 'Japanese': 'jpn_Jpan', 'Kabyle': 'kab_Latn', 'Jingpho': 'kac_Latn', 'Kamba': 'kam_Latn', 'Kannada': 'kan_Knda', 'Kashmiri Arabic': 'kas_Arab', 'Kashmiri Devanagari': 'kas_Deva', 'Georgian': 'kat_Geor', 'Central Kanuri Arabic': 'knc_Arab', 'Central Kanuri Latin': 'knc_Latn', 'Kazakh': 'kaz_Cyrl', 'Kabiyè': 'kbp_Latn', 'Kabuverdianu': 'kea_Latn', 'Khmer': 'khm_Khmr', 'Kikuyu': 'kik_Latn', 'Kinyarwanda': 'kin_Latn', 'Kyrgyz': 'kir_Cyrl', 'Kimbundu': 'kmb_Latn', 'Northern Kurdish': 'kmr_Latn', 'Kikongo': 'kon_Latn', 'Korean': 'kor_Hang', 'Lao': 'lao_Laoo', 'Ligurian': 'lij_Latn', 'Limburgish': 'lim_Latn', 'Lingala': 'lin_Latn', 'Lithuanian': 'lit_Latn', 'Lombard': 'lmo_Latn', 'Latgalian': 'ltg_Latn', 'Luxembourgish': 'ltz_Latn', 'Luba-Kasai': 'lua_Latn', 'Ganda': 'lug_Latn', 'Luo': 'luo_Latn', 'Mizo': 'lus_Latn', 'Standard Latvian': 'lvs_Latn', 'Magahi': 'mag_Deva', 'Maithili': 'mai_Deva', 'Malayalam': 'mal_Mlym', 'Marathi': 'mar_Deva', 'Minangkabau Arabic ': 'min_Arab', 'Minangkabau Latin': 'min_Latn', 'Macedonian': 'mkd_Cyrl', 'Plateau Malagasy': 'plt_Latn', 'Maltese': 'mlt_Latn', 'Meitei Bengali': 'mni_Beng', 'Halh Mongolian': 'khk_Cyrl', 'Mossi': 'mos_Latn', 'Maori': 'mri_Latn', 'Burmese': 'mya_Mymr', 'Dutch': 'nld_Latn', 'Norwegian Nynorsk': 'nno_Latn', 'Norwegian Bokmål': 'nob_Latn', 'Nepali': 'npi_Deva', 'Northern Sotho': 'nso_Latn', 'Nuer': 'nus_Latn', 'Nyanja': 'nya_Latn', 'Occitan': 'oci_Latn', 'West Central Oromo': 'gaz_Latn', 'Odia': 'ory_Orya', 'Pangasinan': 'pag_Latn', 'Eastern Panjabi': 'pan_Guru', 'Papiamento': 'pap_Latn', 'Western Persian': 'pes_Arab', 'Polish': 'pol_Latn', 'Portuguese': 'por_Latn', 'Dari': 'prs_Arab', 'Southern Pashto': 'pbt_Arab', 'Ayacucho Quechua': 'quy_Latn', 'Romanian': 'ron_Latn', 'Rundi': 'run_Latn', 'Russian': 'rus_Cyrl', 'Sango': 'sag_Latn', 'Sanskrit': 'san_Deva', 'Santali': 'sat_Olck', 'Sicilian': 'scn_Latn', 'Shan': 'shn_Mymr', 'Sinhala': 'sin_Sinh', 'Slovak': 'slk_Latn', 'Slovenian': 'slv_Latn', 'Samoan': 'smo_Latn', 'Shona': 'sna_Latn', 'Sindhi': 'snd_Arab', 'Somali': 'som_Latn', 'Southern Sotho': 'sot_Latn', 'Spanish': 'spa_Latn', 'Tosk Albanian': 'als_Latn', 'Sardinian': 'srd_Latn', 'Serbian': 'srp_Cyrl', 'Swati': 'ssw_Latn', 'Sundanese': 'sun_Latn', 'Swedish': 'swe_Latn', 'Swahili': 'swh_Latn', 'Silesian': 'szl_Latn', 'Tamil': 'tam_Taml', 'Tatar': 'tat_Cyrl', 'Telugu': 'tel_Telu', 'Tajik': 'tgk_Cyrl', 'Tagalog': 'tgl_Latn', 'Thai': 'tha_Thai', 'Tigrinya': 'tir_Ethi', 'Tamasheq Latin': 'taq_Latn', 'Tamasheq Tifinagh': 'taq_Tfng', 'Tok Pisin': 'tpi_Latn', 'Tswana': 'tsn_Latn', 'Tsonga': 'tso_Latn', 'Turkmen': 'tuk_Latn', 'Tumbuka': 'tum_Latn', 'Turkish': 'tur_Latn', 'Twi': 'twi_Latn', 'Central Atlas Tamazight': 'tzm_Tfng', 'Uyghur': 'uig_Arab', 'Ukrainian': 'ukr_Cyrl', 'Umbundu': 'umb_Latn', 'Urdu': 'urd_Arab', 'Northern Uzbek': 'uzn_Latn', 'Venetian': 'vec_Latn', 'Vietnamese': 'vie_Latn', 'Waray': 'war_Latn', 'Wolof': 'wol_Latn', 'Xhosa': 'xho_Latn', 'Eastern Yiddish': 'ydd_Hebr', 'Yoruba': 'yor_Latn', 'Yue Chinese': 'yue_Hant', 'Chinese Simplified': 'zho_Hans', 'Chinese Traditional': 'zho_Hant', 'Standard Malay': 'zsm_Latn', 'Zulu': 'zul_Latn', } class _SCREAMING_SNAKE_CASE ( __snake_case ): '''simple docstring''' lowerCamelCase__ = "facebook/nllb-200-distilled-600M" lowerCamelCase__ = ( "This is a tool that translates text from a language to another. It takes three inputs: `text`, which should " "be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, " "which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in " "plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`." ) lowerCamelCase__ = "translator" lowerCamelCase__ = AutoTokenizer lowerCamelCase__ = AutoModelForSeqaSeqLM lowerCamelCase__ = LANGUAGE_CODES lowerCamelCase__ = ["text", "text", "text"] lowerCamelCase__ = ["text"] def _snake_case ( self : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] ): if src_lang not in self.lang_to_code: raise ValueError(f"{src_lang} is not a supported language." ) if tgt_lang not in self.lang_to_code: raise ValueError(f"{tgt_lang} is not a supported language." ) SCREAMING_SNAKE_CASE = self.lang_to_code[src_lang] SCREAMING_SNAKE_CASE = self.lang_to_code[tgt_lang] return self.pre_processor._build_translation_inputs( __lowerCamelCase , return_tensors="pt" , src_lang=__lowerCamelCase , tgt_lang=__lowerCamelCase ) def _snake_case ( self : Any , __lowerCamelCase : Optional[Any] ): return self.model.generate(**__lowerCamelCase ) def _snake_case ( self : str , __lowerCamelCase : str ): return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=__lowerCamelCase )
16
'''simple docstring''' # DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin, SchedulerOutput @dataclass class __UpperCamelCase ( lowerCamelCase__ ): lowercase : torch.FloatTensor lowercase : torch.FloatTensor class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): lowercase : Tuple =1 @register_to_config def __init__( self, lowerCAmelCase = 2_000, lowerCAmelCase = 0.1_5, lowerCAmelCase = 0.0_1, lowerCAmelCase = 1_3_4_8.0, lowerCAmelCase = 1e-5, lowerCAmelCase = 1, ): """simple docstring""" lowerCamelCase_ =sigma_max # setable values lowerCamelCase_ =None self.set_sigmas(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ): """simple docstring""" return sample def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None ): """simple docstring""" lowerCamelCase_ =sampling_eps if sampling_eps is not None else self.config.sampling_eps lowerCamelCase_ =torch.linspace(1, lowerCAmelCase, lowerCAmelCase, device=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None ): """simple docstring""" lowerCamelCase_ =sigma_min if sigma_min is not None else self.config.sigma_min lowerCamelCase_ =sigma_max if sigma_max is not None else self.config.sigma_max lowerCamelCase_ =sampling_eps if sampling_eps is not None else self.config.sampling_eps if self.timesteps is None: self.set_timesteps(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps) lowerCamelCase_ =torch.exp(torch.linspace(math.log(lowerCAmelCase ), math.log(lowerCAmelCase ), lowerCAmelCase ) ) lowerCamelCase_ =torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" return torch.where( timesteps == 0, torch.zeros_like(t.to(timesteps.device ) ), self.discrete_sigmas[timesteps - 1].to(timesteps.device ), ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = True, ): """simple docstring""" if self.timesteps is None: raise ValueError( '''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' ) lowerCamelCase_ =timestep * torch.ones( sample.shape[0], device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0]) lowerCamelCase_ =(timestep * (len(self.timesteps ) - 1)).long() # mps requires indices to be in the same device, so we use cpu as is the default with cuda lowerCamelCase_ =timesteps.to(self.discrete_sigmas.device ) lowerCamelCase_ =self.discrete_sigmas[timesteps].to(sample.device ) lowerCamelCase_ =self.get_adjacent_sigma(lowerCAmelCase, lowerCAmelCase ).to(sample.device ) lowerCamelCase_ =torch.zeros_like(lowerCAmelCase ) lowerCamelCase_ =(sigma**2 - adjacent_sigma**2) ** 0.5 # equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x) # also equation 47 shows the analog from SDE models to ancestral sampling methods lowerCamelCase_ =diffusion.flatten() while len(diffusion.shape ) < len(sample.shape ): lowerCamelCase_ =diffusion.unsqueeze(-1 ) lowerCamelCase_ =drift - diffusion**2 * model_output # equation 6: sample noise for the diffusion term of lowerCamelCase_ =randn_tensor( sample.shape, layout=sample.layout, generator=lowerCAmelCase, device=sample.device, dtype=sample.dtype ) lowerCamelCase_ =sample - drift # subtract because `dt` is a small negative timestep # TODO is the variable diffusion the correct scaling term for the noise? lowerCamelCase_ =prev_sample_mean + diffusion * noise # add impact of diffusion field g if not return_dict: return (prev_sample, prev_sample_mean) return SdeVeOutput(prev_sample=lowerCAmelCase, prev_sample_mean=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = True, ): """simple docstring""" if self.timesteps is None: raise ValueError( '''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' ) # For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z" # sample noise for correction lowerCamelCase_ =randn_tensor(sample.shape, layout=sample.layout, generator=lowerCAmelCase ).to(sample.device ) # compute step size from the model_output, the noise, and the snr lowerCamelCase_ =torch.norm(model_output.reshape(model_output.shape[0], -1 ), dim=-1 ).mean() lowerCamelCase_ =torch.norm(noise.reshape(noise.shape[0], -1 ), dim=-1 ).mean() lowerCamelCase_ =(self.config.snr * noise_norm / grad_norm) ** 2 * 2 lowerCamelCase_ =step_size * torch.ones(sample.shape[0] ).to(sample.device ) # self.repeat_scalar(step_size, sample.shape[0]) # compute corrected sample: model_output term and noise term lowerCamelCase_ =step_size.flatten() while len(step_size.shape ) < len(sample.shape ): lowerCamelCase_ =step_size.unsqueeze(-1 ) lowerCamelCase_ =sample + step_size * model_output lowerCamelCase_ =prev_sample_mean + ((step_size * 2) ** 0.5) * noise if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =timesteps.to(original_samples.device ) lowerCamelCase_ =self.discrete_sigmas.to(original_samples.device )[timesteps] lowerCamelCase_ =( noise * sigmas[:, None, None, None] if noise is not None else torch.randn_like(lowerCAmelCase ) * sigmas[:, None, None, None] ) lowerCamelCase_ =noise + original_samples return noisy_samples def __len__( self ): """simple docstring""" return self.config.num_train_timesteps
676
0
import copy from typing import Dict, List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING UpperCAmelCase_ : Dict = { '''facebook/mask2former-swin-small-coco-instance''': ( '''https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json''' ) # See all Mask2Former models at https://huggingface.co/models?filter=mask2former } UpperCAmelCase_ : Dict = logging.get_logger(__name__) class lowerCamelCase_ ( _lowercase ): _lowercase : Tuple = '''mask2former''' _lowercase : Optional[Any] = ['''swin'''] _lowercase : Dict = {'''hidden_size''': '''hidden_dim'''} def __init__( self : Optional[Any] , __A : Optional[Dict] = None , __A : int = 256 , __A : int = 256 , __A : int = 256 , __A : int = 1024 , __A : str = "relu" , __A : int = 6 , __A : int = 10 , __A : int = 8 , __A : float = 0.0 , __A : int = 2048 , __A : bool = False , __A : bool = False , __A : int = 4 , __A : int = 255 , __A : int = 100 , __A : float = 0.1 , __A : float = 2.0 , __A : float = 5.0 , __A : float = 5.0 , __A : int = 1_2544 , __A : float = 3.0 , __A : float = 0.7_5 , __A : float = 0.0_2 , __A : float = 1.0 , __A : bool = True , __A : List[int] = [4, 8, 16, 32] , __A : bool = None , **__A : Dict , ): if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.""" ) __A : List[Any] = CONFIG_MAPPING["""swin"""]( image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=__A , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , ) if isinstance(__A , __A ): __A : Any = backbone_config.pop("""model_type""" ) __A : Union[str, Any] = CONFIG_MAPPING[backbone_model_type] __A : str = config_class.from_dict(__A ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( F"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """ F"""Supported model types: {",".join(self.backbones_supported )}""" ) __A : int = backbone_config __A : Optional[Any] = feature_size __A : Union[str, Any] = mask_feature_size __A : List[str] = hidden_dim __A : Union[str, Any] = encoder_feedforward_dim __A : int = activation_function __A : Any = encoder_layers __A : str = decoder_layers __A : List[str] = num_attention_heads __A : Tuple = dropout __A : Tuple = dim_feedforward __A : Optional[int] = pre_norm __A : Optional[Any] = enforce_input_projection __A : Any = common_stride __A : Any = ignore_value __A : List[str] = num_queries __A : List[str] = no_object_weight __A : str = class_weight __A : Any = mask_weight __A : Dict = dice_weight __A : Optional[Any] = train_num_points __A : str = oversample_ratio __A : str = importance_sample_ratio __A : List[str] = init_std __A : Any = init_xavier_std __A : Any = use_auxiliary_loss __A : Optional[int] = feature_strides __A : List[str] = output_auxiliary_logits __A : List[str] = decoder_layers super().__init__(**__A ) @classmethod def lowerCAmelCase_ ( cls : str , __A : PretrainedConfig , **__A : int ): return cls( backbone_config=__A , **__A , ) def lowerCAmelCase_ ( self : Optional[Any] ): __A : Optional[int] = copy.deepcopy(self.__dict__ ) __A : Any = self.backbone_config.to_dict() __A : Optional[int] = self.__class__.model_type return output
17
'''simple docstring''' def a_ ( __snake_case : int , __snake_case : int ) -> str: """simple docstring""" if not isinstance(__snake_case , __snake_case ): raise ValueError('''iterations must be defined as integers''' ) if not isinstance(__snake_case , __snake_case ) or not number >= 1: raise ValueError( '''starting number must be and integer and be more than 0''' ) if not iterations >= 1: raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' ) lowerCamelCase_ ='''''' while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(__snake_case ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
676
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _SCREAMING_SNAKE_CASE = { "configuration_maskformer": ["MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "MaskFormerConfig"], "configuration_maskformer_swin": ["MaskFormerSwinConfig"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = ["MaskFormerFeatureExtractor"] _SCREAMING_SNAKE_CASE = ["MaskFormerImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ "MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "MaskFormerForInstanceSegmentation", "MaskFormerModel", "MaskFormerPreTrainedModel", ] _SCREAMING_SNAKE_CASE = [ "MaskFormerSwinBackbone", "MaskFormerSwinModel", "MaskFormerSwinPreTrainedModel", ] if TYPE_CHECKING: from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig from .configuration_maskformer_swin import MaskFormerSwinConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_maskformer import MaskFormerFeatureExtractor from .image_processing_maskformer import MaskFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskformer import ( MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskFormerForInstanceSegmentation, MaskFormerModel, MaskFormerPreTrainedModel, ) from .modeling_maskformer_swin import ( MaskFormerSwinBackbone, MaskFormerSwinModel, MaskFormerSwinPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure)
18
'''simple docstring''' from typing import List import numpy as np def a_ ( __snake_case : dict ) -> int: """simple docstring""" lowerCamelCase_ ={key: len(__snake_case ) for key, value in gen_kwargs.items() if isinstance(__snake_case , __snake_case )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( '''Sharding is ambiguous for this dataset: ''' + '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n''' + '''\n'''.join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() ) + '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, ''' + '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.''' ) ) lowerCamelCase_ =max(lists_lengths.values() , default=0 ) return max(1 , __snake_case ) def a_ ( __snake_case : int , __snake_case : int ) -> List[range]: """simple docstring""" lowerCamelCase_ =[] for group_idx in range(__snake_case ): lowerCamelCase_ =num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break lowerCamelCase_ =shards_indices_per_group[-1].stop if shards_indices_per_group else 0 lowerCamelCase_ =range(__snake_case , start + num_shards_to_add ) shards_indices_per_group.append(__snake_case ) return shards_indices_per_group def a_ ( __snake_case : dict , __snake_case : int ) -> List[dict]: """simple docstring""" lowerCamelCase_ =_number_of_shards_in_gen_kwargs(__snake_case ) if num_shards == 1: return [dict(__snake_case )] else: lowerCamelCase_ =_distribute_shards(num_shards=__snake_case , max_num_jobs=__snake_case ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(__snake_case , __snake_case ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(__snake_case ) ) ] def a_ ( __snake_case : List[dict] ) -> dict: """simple docstring""" return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , __snake_case ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def a_ ( __snake_case : np.random.Generator , __snake_case : dict ) -> dict: """simple docstring""" lowerCamelCase_ ={len(__snake_case ) for value in gen_kwargs.values() if isinstance(__snake_case , __snake_case )} lowerCamelCase_ ={} for size in list_sizes: lowerCamelCase_ =list(range(__snake_case ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes lowerCamelCase_ =dict(__snake_case ) for key, value in shuffled_kwargs.items(): if isinstance(__snake_case , __snake_case ): lowerCamelCase_ =[value[i] for i in indices_per_size[len(__snake_case )]] return shuffled_kwargs
676
0
"""simple docstring""" from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import torch from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available @dataclass class _UpperCAmelCase( lowerCamelCase ): lowercase__ = 42 try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_text_to_video_synth import TextToVideoSDPipeline from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401 from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
19
'''simple docstring''' import argparse import logging import pickle import random import time import numpy as np from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO ) a_ : int = logging.getLogger(__name__) def a_ ( ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ =argparse.ArgumentParser( description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' ) parser.add_argument('''--file_path''' , type=__snake_case , default='''data/dump.txt''' , help='''The path to the data.''' ) parser.add_argument('''--tokenizer_type''' , type=__snake_case , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] ) parser.add_argument('''--tokenizer_name''' , type=__snake_case , default='''bert-base-uncased''' , help='''The tokenizer to use.''' ) parser.add_argument('''--dump_file''' , type=__snake_case , default='''data/dump''' , help='''The dump file prefix.''' ) lowerCamelCase_ =parser.parse_args() logger.info(F'''Loading Tokenizer ({args.tokenizer_name})''' ) if args.tokenizer_type == "bert": lowerCamelCase_ =BertTokenizer.from_pretrained(args.tokenizer_name ) lowerCamelCase_ =tokenizer.special_tokens_map['''cls_token'''] # `[CLS]` lowerCamelCase_ =tokenizer.special_tokens_map['''sep_token'''] # `[SEP]` elif args.tokenizer_type == "roberta": lowerCamelCase_ =RobertaTokenizer.from_pretrained(args.tokenizer_name ) lowerCamelCase_ =tokenizer.special_tokens_map['''cls_token'''] # `<s>` lowerCamelCase_ =tokenizer.special_tokens_map['''sep_token'''] # `</s>` elif args.tokenizer_type == "gpt2": lowerCamelCase_ =GPTaTokenizer.from_pretrained(args.tokenizer_name ) lowerCamelCase_ =tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>` lowerCamelCase_ =tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>` logger.info(F'''Loading text from {args.file_path}''' ) with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp: lowerCamelCase_ =fp.readlines() logger.info('''Start encoding''' ) logger.info(F'''{len(__snake_case )} examples to process.''' ) lowerCamelCase_ =[] lowerCamelCase_ =0 lowerCamelCase_ =1_0000 lowerCamelCase_ =time.time() for text in data: lowerCamelCase_ =F'''{bos} {text.strip()} {sep}''' lowerCamelCase_ =tokenizer.encode(__snake_case , add_special_tokens=__snake_case ) rslt.append(__snake_case ) iter += 1 if iter % interval == 0: lowerCamelCase_ =time.time() logger.info(F'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' ) lowerCamelCase_ =time.time() logger.info('''Finished binarization''' ) logger.info(F'''{len(__snake_case )} examples processed.''' ) lowerCamelCase_ =F'''{args.dump_file}.{args.tokenizer_name}.pickle''' lowerCamelCase_ =tokenizer.vocab_size if vocab_size < (1 << 16): lowerCamelCase_ =[np.uintaa(__snake_case ) for d in rslt] else: lowerCamelCase_ =[np.intaa(__snake_case ) for d in rslt] random.shuffle(rslt_ ) logger.info(F'''Dump to {dp_file}''' ) with open(__snake_case , '''wb''' ) as handle: pickle.dump(rslt_ , __snake_case , protocol=pickle.HIGHEST_PROTOCOL ) if __name__ == "__main__": main()
676
0
from collections import defaultdict from math import ceil, sqrt def _lowercase( __a : int = 100_0000 , __a : int = 10 ): a__ =defaultdict(__a ) for outer_width in range(3 , (t_limit // 4) + 2 ): if outer_width * outer_width > t_limit: a__ =max( ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 ) else: a__ =1 hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2 for hole_width in range(__a , outer_width - 1 , 2 ): count[outer_width * outer_width - hole_width * hole_width] += 1 return sum(1 for n in count.values() if 1 <= n <= 10 ) if __name__ == "__main__": print(F"""{solution() = }""")
20
'''simple docstring''' import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : str = logging.get_logger(__name__) a_ : int = { """RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""", } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : List[str] ='mvp' lowercase : List[str] =['past_key_values'] lowercase : Dict ={'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self, lowerCAmelCase=50_267, lowerCAmelCase=1_024, lowerCAmelCase=12, lowerCAmelCase=4_096, lowerCAmelCase=16, lowerCAmelCase=12, lowerCAmelCase=4_096, lowerCAmelCase=16, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase="gelu", lowerCAmelCase=1_024, lowerCAmelCase=0.1, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=0.0, lowerCAmelCase=False, lowerCAmelCase=True, lowerCAmelCase=1, lowerCAmelCase=0, lowerCAmelCase=2, lowerCAmelCase=True, lowerCAmelCase=2, lowerCAmelCase=2, lowerCAmelCase=False, lowerCAmelCase=100, lowerCAmelCase=800, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =vocab_size lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =d_model lowerCamelCase_ =encoder_ffn_dim lowerCamelCase_ =encoder_layers lowerCamelCase_ =encoder_attention_heads lowerCamelCase_ =decoder_ffn_dim lowerCamelCase_ =decoder_layers lowerCamelCase_ =decoder_attention_heads lowerCamelCase_ =dropout lowerCamelCase_ =attention_dropout lowerCamelCase_ =activation_dropout lowerCamelCase_ =activation_function lowerCamelCase_ =init_std lowerCamelCase_ =encoder_layerdrop lowerCamelCase_ =decoder_layerdrop lowerCamelCase_ =classifier_dropout lowerCamelCase_ =use_cache lowerCamelCase_ =encoder_layers lowerCamelCase_ =scale_embedding # scale factor will be sqrt(d_model) if True lowerCamelCase_ =use_prompt lowerCamelCase_ =prompt_length lowerCamelCase_ =prompt_mid_dim super().__init__( pad_token_id=lowerCAmelCase, bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, is_encoder_decoder=lowerCAmelCase, decoder_start_token_id=lowerCAmelCase, forced_eos_token_id=lowerCAmelCase, **lowerCAmelCase, ) if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''', lowerCAmelCase ): lowerCamelCase_ =self.bos_token_id warnings.warn( f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. ''' '''The config can simply be saved and uploaded again to be fixed.''' )
676
0
import unittest import numpy as np from transformers import RobertaPreLayerNormConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, ) class __A ( unittest.TestCase ): def __init__( self :Optional[int] , __snake_case :Dict , __snake_case :Union[str, Any]=13 , __snake_case :Dict=7 , __snake_case :Dict=True , __snake_case :Dict=True , __snake_case :Union[str, Any]=True , __snake_case :Any=True , __snake_case :List[Any]=99 , __snake_case :Dict=32 , __snake_case :Union[str, Any]=5 , __snake_case :List[Any]=4 , __snake_case :Optional[Any]=37 , __snake_case :List[str]="gelu" , __snake_case :str=0.1 , __snake_case :Tuple=0.1 , __snake_case :Optional[Any]=5_12 , __snake_case :Union[str, Any]=16 , __snake_case :Optional[Any]=2 , __snake_case :Optional[int]=0.02 , __snake_case :Optional[int]=4 , ): '''simple docstring''' __magic_name__ : str =parent __magic_name__ : Optional[int] =batch_size __magic_name__ : List[Any] =seq_length __magic_name__ : List[str] =is_training __magic_name__ : List[Any] =use_attention_mask __magic_name__ : List[str] =use_token_type_ids __magic_name__ : Any =use_labels __magic_name__ : List[str] =vocab_size __magic_name__ : List[str] =hidden_size __magic_name__ : Tuple =num_hidden_layers __magic_name__ : Optional[Any] =num_attention_heads __magic_name__ : Tuple =intermediate_size __magic_name__ : Optional[Any] =hidden_act __magic_name__ : Any =hidden_dropout_prob __magic_name__ : List[str] =attention_probs_dropout_prob __magic_name__ : List[str] =max_position_embeddings __magic_name__ : Tuple =type_vocab_size __magic_name__ : Optional[int] =type_sequence_label_size __magic_name__ : Any =initializer_range __magic_name__ : Any =num_choices def A__ ( self :Union[str, Any] ): '''simple docstring''' __magic_name__ : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ : Dict =None if self.use_attention_mask: __magic_name__ : Optional[int] =random_attention_mask([self.batch_size, self.seq_length] ) __magic_name__ : str =None if self.use_token_type_ids: __magic_name__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ : Dict =RobertaPreLayerNormConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def A__ ( self :Union[str, Any] ): '''simple docstring''' __magic_name__ : int =self.prepare_config_and_inputs() __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Dict =config_and_inputs __magic_name__ : Optional[int] ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict def A__ ( self :List[str] ): '''simple docstring''' __magic_name__ : Union[str, Any] =self.prepare_config_and_inputs() __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Dict =config_and_inputs __magic_name__ : int =True __magic_name__ : int =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __magic_name__ : int =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax # Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40 class __A ( UpperCamelCase__ , unittest.TestCase ): UpperCamelCase = True UpperCamelCase = ( ( FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, ) if is_flax_available() else () ) def A__ ( self :List[str] ): '''simple docstring''' __magic_name__ : Optional[int] =FlaxRobertaPreLayerNormModelTester(self ) @slow def A__ ( self :Tuple ): '''simple docstring''' for model_class_name in self.all_model_classes: __magic_name__ : List[str] =model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=__snake_case ) __magic_name__ : Any =model(np.ones((1, 1) ) ) self.assertIsNotNone(__snake_case ) @require_flax class __A ( unittest.TestCase ): @slow def A__ ( self :Union[str, Any] ): '''simple docstring''' __magic_name__ : List[Any] =FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=__snake_case ) __magic_name__ : int =np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa ) __magic_name__ : Optional[int] =model(__snake_case )[0] __magic_name__ : str =[1, 11, 5_02_65] self.assertEqual(list(output.shape ) , __snake_case ) # compare the actual values for a slice. __magic_name__ : List[Any] =np.array( [[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , __snake_case , atol=1E-4 ) ) @slow def A__ ( self :List[str] ): '''simple docstring''' __magic_name__ : Optional[int] =FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=__snake_case ) __magic_name__ : int =np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa ) __magic_name__ : List[Any] =model(__snake_case )[0] # compare the actual values for a slice. __magic_name__ : List[Any] =np.array( [[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , __snake_case , atol=1E-4 ) )
21
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a_ : int = logging.get_logger(__name__) a_ : str = {"""vocab_file""": """spiece.model"""} a_ : Optional[int] = { """vocab_file""": { """bert_for_seq_generation""": ( """https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model""" ), } } a_ : List[Any] = {"""bert_for_seq_generation""": 5_12} class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[int] =VOCAB_FILES_NAMES lowercase : Optional[int] =PRETRAINED_VOCAB_FILES_MAP lowercase : Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : List[int] =[] lowercase : str =['input_ids', 'attention_mask'] def __init__( self, lowerCAmelCase, lowerCAmelCase="<s>", lowerCAmelCase="</s>", lowerCAmelCase="<unk>", lowerCAmelCase="<pad>", lowerCAmelCase="<::::>", lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ ={} if sp_model_kwargs is None else sp_model_kwargs # Add extra_ids to the special token list super().__init__( bos_token=lowerCAmelCase, eos_token=lowerCAmelCase, unk_token=lowerCAmelCase, pad_token=lowerCAmelCase, sep_token=lowerCAmelCase, sp_model_kwargs=self.sp_model_kwargs, **lowerCAmelCase, ) lowerCamelCase_ =vocab_file lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowerCAmelCase ) @property def lowercase__ ( self ): """simple docstring""" return self.sp_model.get_piece_size() def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ={self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): """simple docstring""" lowerCamelCase_ =self.__dict__.copy() lowerCamelCase_ =None return state def __setstate__( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =d # for backward compatibility if not hasattr(self, '''sp_model_kwargs''' ): lowerCamelCase_ ={} lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return self.sp_model.encode(lowerCAmelCase, out_type=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return self.sp_model.piece_to_id(lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.sp_model.IdToPiece(lowerCAmelCase ) return token def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =[] lowerCamelCase_ ='''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(lowerCAmelCase ) + token lowerCamelCase_ =[] else: current_sub_tokens.append(lowerCAmelCase ) out_string += self.sp_model.decode(lowerCAmelCase ) return out_string.strip() def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ): """simple docstring""" if not os.path.isdir(lowerCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCamelCase_ =os.path.join( lowerCAmelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file, lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(lowerCAmelCase, '''wb''' ) as fi: lowerCamelCase_ =self.sp_model.serialized_model_proto() fi.write(lowerCAmelCase ) return (out_vocab_file,)
676
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) _snake_case : str = { 'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'], 'tokenization_perceiver': ['PerceiverTokenizer'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : Any = ['PerceiverFeatureExtractor'] _snake_case : Optional[int] = ['PerceiverImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : str = [ 'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST', 'PerceiverForImageClassificationConvProcessing', 'PerceiverForImageClassificationFourier', 'PerceiverForImageClassificationLearned', 'PerceiverForMaskedLM', 'PerceiverForMultimodalAutoencoding', 'PerceiverForOpticalFlow', 'PerceiverForSequenceClassification', 'PerceiverLayer', 'PerceiverModel', 'PerceiverPreTrainedModel', ] if TYPE_CHECKING: from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig from .tokenization_perceiver import PerceiverTokenizer try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_perceiver import PerceiverFeatureExtractor from .image_processing_perceiver import PerceiverImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_perceiver import ( PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST, PerceiverForImageClassificationConvProcessing, PerceiverForImageClassificationFourier, PerceiverForImageClassificationLearned, PerceiverForMaskedLM, PerceiverForMultimodalAutoencoding, PerceiverForOpticalFlow, PerceiverForSequenceClassification, PerceiverLayer, PerceiverModel, PerceiverPreTrainedModel, ) else: import sys _snake_case : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
22
'''simple docstring''' from collections.abc import Sequence def a_ ( __snake_case : Sequence[float] , __snake_case : float ) -> float: """simple docstring""" return sum(c * (x**i) for i, c in enumerate(__snake_case ) ) def a_ ( __snake_case : Sequence[float] , __snake_case : float ) -> float: """simple docstring""" lowerCamelCase_ =0.0 for coeff in reversed(__snake_case ): lowerCamelCase_ =result * x + coeff return result if __name__ == "__main__": a_ : Optional[int] = (0.0, 0.0, 5.0, 9.3, 7.0) a_ : Tuple = 10.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
676
0
from typing import Optional, Tuple import jax import jax.numpy as jnp from flax import linen as nn from flax.core.frozen_dict import FrozenDict from transformers import CLIPConfig, FlaxPreTrainedModel from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule def _snake_case (__lowercase , __lowercase , __lowercase=1e-12): UpperCamelCase_ = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__lowercase , axis=1) , a_min=__lowercase)).T UpperCamelCase_ = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__lowercase , axis=1) , a_min=__lowercase)).T return jnp.matmul(__lowercase , norm_emb_a.T) class _a ( nn.Module ): """simple docstring""" A_ = 42 A_ = jnp.floataa def _UpperCAmelCase ( self ) -> Dict: UpperCamelCase_ = FlaxCLIPVisionModule(self.config.vision_config ) UpperCamelCase_ = nn.Dense(self.config.projection_dim , use_bias=_UpperCAmelCase , dtype=self.dtype ) UpperCamelCase_ = self.param('concept_embeds' , jax.nn.initializers.ones , (17, self.config.projection_dim) ) UpperCamelCase_ = self.param( 'special_care_embeds' , jax.nn.initializers.ones , (3, self.config.projection_dim) ) UpperCamelCase_ = self.param('concept_embeds_weights' , jax.nn.initializers.ones , (17,) ) UpperCamelCase_ = self.param('special_care_embeds_weights' , jax.nn.initializers.ones , (3,) ) def __call__( self , _UpperCAmelCase ) -> List[str]: UpperCamelCase_ = self.vision_model(_UpperCAmelCase )[1] UpperCamelCase_ = self.visual_projection(_UpperCAmelCase ) UpperCamelCase_ = jax_cosine_distance(_UpperCAmelCase , self.special_care_embeds ) UpperCamelCase_ = jax_cosine_distance(_UpperCAmelCase , self.concept_embeds ) # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign image inputs UpperCamelCase_ = 0.0 UpperCamelCase_ = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment UpperCamelCase_ = jnp.round(_UpperCAmelCase , 3 ) UpperCamelCase_ = jnp.any(special_scores > 0 , axis=1 , keepdims=_UpperCAmelCase ) # Use a lower threshold if an image has any special care concept UpperCamelCase_ = is_special_care * 0.0_1 UpperCamelCase_ = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment UpperCamelCase_ = jnp.round(_UpperCAmelCase , 3 ) UpperCamelCase_ = jnp.any(concept_scores > 0 , axis=1 ) return has_nsfw_concepts class _a ( UpperCAmelCase__ ): """simple docstring""" A_ = CLIPConfig A_ = """clip_input""" A_ = FlaxStableDiffusionSafetyCheckerModule def __init__( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = 0 , _UpperCAmelCase = jnp.floataa , _UpperCAmelCase = True , **_UpperCAmelCase , ) -> Union[str, Any]: if input_shape is None: UpperCamelCase_ = (1, 224, 224, 3) UpperCamelCase_ = self.module_class(config=_UpperCAmelCase , dtype=_UpperCAmelCase , **_UpperCAmelCase ) super().__init__(_UpperCAmelCase , _UpperCAmelCase , input_shape=_UpperCAmelCase , seed=_UpperCAmelCase , dtype=_UpperCAmelCase , _do_init=_do_init ) def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None ) -> FrozenDict: # init input tensor UpperCamelCase_ = jax.random.normal(_UpperCAmelCase , _UpperCAmelCase ) UpperCamelCase_ , UpperCamelCase_ = jax.random.split(_UpperCAmelCase ) UpperCamelCase_ = {'params': params_rng, 'dropout': dropout_rng} UpperCamelCase_ = self.module.init(_UpperCAmelCase , _UpperCAmelCase )['params'] return random_params def __call__( self , _UpperCAmelCase , _UpperCAmelCase = None , ) -> Optional[int]: UpperCamelCase_ = jnp.transpose(_UpperCAmelCase , (0, 2, 3, 1) ) return self.module.apply( {'params': params or self.params} , jnp.array(_UpperCAmelCase , dtype=jnp.floataa ) , rngs={} , )
23
'''simple docstring''' import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[int] =['image_processor', 'tokenizer'] lowercase : str ='CLIPImageProcessor' lowercase : Optional[Any] =('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast') def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''', lowerCAmelCase, ) lowerCamelCase_ =kwargs.pop('''feature_extractor''' ) lowerCamelCase_ =image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(lowerCAmelCase, lowerCAmelCase ) def __call__( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ): """simple docstring""" if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: lowerCamelCase_ =self.tokenizer(lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase ) if images is not None: lowerCamelCase_ =self.image_processor(lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase ) if text is not None and images is not None: lowerCamelCase_ =image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**lowerCAmelCase ), tensor_type=lowerCAmelCase ) def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase ) @property def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.tokenizer.model_input_names lowerCamelCase_ =self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
676
0
'''simple docstring''' from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Sequence, Value from .base import TaskTemplate @dataclass(frozen=__lowerCAmelCase) class lowerCAmelCase ( __lowerCAmelCase): # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization __lowercase : str = field(default='''question-answering-extractive''' , metadata={'''include_in_asdict_even_if_is_default''': True}) __lowercase : ClassVar[Features] = Features({'''question''': Value('''string'''), '''context''': Value('''string''')}) __lowercase : ClassVar[Features] = Features( { '''answers''': Sequence( { '''text''': Value('''string'''), '''answer_start''': Value('''int32'''), }) }) __lowercase : str = "question" __lowercase : str = "context" __lowercase : str = "answers" @property def lowerCAmelCase ( self ) -> Dict[str, str]: '''simple docstring''' return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
24
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING a_ : Optional[Any] = logging.get_logger(__name__) @add_end_docstrings(lowerCamelCase__ ) class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" super().__init__(*lowerCAmelCase, **lowerCAmelCase ) requires_backends(self, '''vision''' ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING ) def lowercase__ ( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None ): """simple docstring""" lowerCamelCase_ ={} lowerCamelCase_ ={} if prompt is not None: lowerCamelCase_ =prompt if generate_kwargs is not None: lowerCamelCase_ =generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: lowerCamelCase_ ={} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( '''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,''' ''' please use only one''' ) lowerCamelCase_ =max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return super().__call__(lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ): """simple docstring""" lowerCamelCase_ =load_image(lowerCAmelCase ) if prompt is not None: if not isinstance(lowerCAmelCase, lowerCAmelCase ): raise ValueError( f'''Received an invalid text input, got - {type(lowerCAmelCase )} - but expected a single string. ''' '''Note also that one single text can be provided for conditional image to text generation.''' ) lowerCamelCase_ =self.model.config.model_type if model_type == "git": lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework ) lowerCamelCase_ =self.tokenizer(text=lowerCAmelCase, add_special_tokens=lowerCAmelCase ).input_ids lowerCamelCase_ =[self.tokenizer.cls_token_id] + input_ids lowerCamelCase_ =torch.tensor(lowerCAmelCase ).unsqueeze(0 ) model_inputs.update({'''input_ids''': input_ids} ) elif model_type == "pix2struct": lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, header_text=lowerCAmelCase, return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework ) lowerCamelCase_ =self.tokenizer(lowerCAmelCase, return_tensors=self.framework ) model_inputs.update(lowerCAmelCase ) else: raise ValueError(f'''Model type {model_type} does not support conditional text generation''' ) else: lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: lowerCamelCase_ =None return model_inputs def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ): """simple docstring""" if ( "input_ids" in model_inputs and isinstance(model_inputs['''input_ids'''], lowerCAmelCase ) and all(x is None for x in model_inputs['''input_ids'''] ) ): lowerCamelCase_ =None if generate_kwargs is None: lowerCamelCase_ ={} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. lowerCamelCase_ =model_inputs.pop(self.model.main_input_name ) lowerCamelCase_ =self.model.generate(lowerCAmelCase, **lowerCAmelCase, **lowerCAmelCase ) return model_outputs def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =[] for output_ids in model_outputs: lowerCamelCase_ ={ '''generated_text''': self.tokenizer.decode( lowerCAmelCase, skip_special_tokens=lowerCAmelCase, ) } records.append(lowerCAmelCase ) return records
676
0
from ...configuration_utils import PretrainedConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { 'tanreinama/GPTSAN-2.8B-spout_is_uniform': ( 'https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json' ), } class _UpperCamelCase ( __A ): '''simple docstring''' lowerCamelCase__ ='gptsan-japanese' lowerCamelCase__ =[ 'past_key_values', ] lowerCamelCase__ ={ 'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self : int , a : Any=3_6000 , a : Optional[int]=1280 , a : Tuple=1024 , a : Optional[Any]=8192 , a : List[str]=4096 , a : Optional[Any]=128 , a : Optional[Any]=10 , a : List[Any]=0 , a : Union[str, Any]=16 , a : str=16 , a : Tuple=128 , a : List[str]=0.0 , a : Union[str, Any]=1e-5 , a : int=False , a : Optional[int]=0.0 , a : Union[str, Any]="float32" , a : str=False , a : Dict=False , a : Dict=False , a : Optional[Any]=0.002 , a : Tuple=False , a : List[str]=True , a : int=3_5998 , a : Union[str, Any]=3_5995 , a : int=3_5999 , **a : Optional[int] , ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = vocab_size SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings SCREAMING_SNAKE_CASE : List[Any] = d_model SCREAMING_SNAKE_CASE : Union[str, Any] = d_ff SCREAMING_SNAKE_CASE : Optional[Any] = d_ext SCREAMING_SNAKE_CASE : List[str] = d_spout SCREAMING_SNAKE_CASE : Optional[Any] = num_switch_layers SCREAMING_SNAKE_CASE : str = num_ext_layers SCREAMING_SNAKE_CASE : int = num_switch_layers + num_ext_layers SCREAMING_SNAKE_CASE : str = num_heads SCREAMING_SNAKE_CASE : Optional[int] = num_experts SCREAMING_SNAKE_CASE : Optional[int] = expert_capacity SCREAMING_SNAKE_CASE : List[str] = dropout_rate SCREAMING_SNAKE_CASE : Optional[Any] = layer_norm_epsilon SCREAMING_SNAKE_CASE : List[str] = router_bias SCREAMING_SNAKE_CASE : Union[str, Any] = router_jitter_noise SCREAMING_SNAKE_CASE : Tuple = router_dtype SCREAMING_SNAKE_CASE : Dict = router_ignore_padding_tokens SCREAMING_SNAKE_CASE : Tuple = output_hidden_states SCREAMING_SNAKE_CASE : Union[str, Any] = output_attentions SCREAMING_SNAKE_CASE : str = initializer_factor SCREAMING_SNAKE_CASE : Union[str, Any] = output_router_logits SCREAMING_SNAKE_CASE : Optional[int] = use_cache super().__init__( separator_token_id=a , pad_token_id=a , eos_token_id=a , **a , )
25
'''simple docstring''' import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def a_ ( __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Any ) -> str: """simple docstring""" # Initialise PyTorch model lowerCamelCase_ =BertConfig.from_json_file(__snake_case ) print(F'''Building PyTorch model from configuration: {config}''' ) lowerCamelCase_ =BertForPreTraining(__snake_case ) # Load weights from tf checkpoint load_tf_weights_in_bert(__snake_case , __snake_case , __snake_case ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , __snake_case ) if __name__ == "__main__": a_ : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--bert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained BERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) a_ : Optional[Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
676
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { "microsoft/trocr-base-handwritten": ( "https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json" ), # See all TrOCR models at https://huggingface.co/models?filter=trocr } class _A ( __lowercase ): lowercase__: Tuple = '''trocr''' lowercase__: Tuple = ['''past_key_values'''] lowercase__: Tuple = { '''num_attention_heads''': '''decoder_attention_heads''', '''hidden_size''': '''d_model''', '''num_hidden_layers''': '''decoder_layers''', } def __init__( self : Dict , __magic_name__ : List[Any]=5_02_65 , __magic_name__ : Tuple=10_24 , __magic_name__ : List[Any]=12 , __magic_name__ : int=16 , __magic_name__ : Optional[Any]=40_96 , __magic_name__ : str="gelu" , __magic_name__ : Optional[int]=5_12 , __magic_name__ : int=0.1 , __magic_name__ : List[Any]=0.0 , __magic_name__ : str=0.0 , __magic_name__ : Dict=2 , __magic_name__ : Dict=0.02 , __magic_name__ : Optional[Any]=0.0 , __magic_name__ : Optional[int]=True , __magic_name__ : Dict=False , __magic_name__ : Optional[Any]=True , __magic_name__ : Optional[int]=True , __magic_name__ : List[str]=1 , __magic_name__ : str=0 , __magic_name__ : Dict=2 , **__magic_name__ : List[str] , ) -> Any: """simple docstring""" __snake_case : Optional[Any] = vocab_size __snake_case : Union[str, Any] = d_model __snake_case : List[Any] = decoder_layers __snake_case : Optional[Any] = decoder_attention_heads __snake_case : Optional[int] = decoder_ffn_dim __snake_case : Optional[int] = activation_function __snake_case : Dict = max_position_embeddings __snake_case : Optional[int] = dropout __snake_case : str = attention_dropout __snake_case : List[str] = activation_dropout __snake_case : str = init_std __snake_case : List[str] = decoder_layerdrop __snake_case : Any = use_cache __snake_case : int = scale_embedding __snake_case : List[Any] = use_learned_position_embeddings __snake_case : int = layernorm_embedding super().__init__( pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , decoder_start_token_id=__magic_name__ , **__magic_name__ , )
26
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : Union[str, Any] = logging.get_logger(__name__) a_ : Optional[int] = { """BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""", # See all AltCLIP models at https://huggingface.co/models?filter=altclip } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[Any] ='altclip_text_model' def __init__( self, lowerCAmelCase=250_002, lowerCAmelCase=1_024, lowerCAmelCase=24, lowerCAmelCase=16, lowerCAmelCase=4_096, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=514, lowerCAmelCase=1, lowerCAmelCase=0.0_2, lowerCAmelCase=0.0_2, lowerCAmelCase=1e-05, lowerCAmelCase=1, lowerCAmelCase=0, lowerCAmelCase=2, lowerCAmelCase="absolute", lowerCAmelCase=True, lowerCAmelCase=768, **lowerCAmelCase, ): """simple docstring""" super().__init__(pad_token_id=lowerCAmelCase, bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, **lowerCAmelCase ) lowerCamelCase_ =vocab_size lowerCamelCase_ =hidden_size lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =hidden_act lowerCamelCase_ =intermediate_size lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =attention_probs_dropout_prob lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =type_vocab_size lowerCamelCase_ =initializer_range lowerCamelCase_ =initializer_factor lowerCamelCase_ =layer_norm_eps lowerCamelCase_ =position_embedding_type lowerCamelCase_ =use_cache lowerCamelCase_ =project_dim class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Dict ='altclip_vision_model' def __init__( self, lowerCAmelCase=768, lowerCAmelCase=3_072, lowerCAmelCase=512, lowerCAmelCase=12, lowerCAmelCase=12, lowerCAmelCase=3, lowerCAmelCase=224, lowerCAmelCase=32, lowerCAmelCase="quick_gelu", lowerCAmelCase=1e-5, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=1.0, **lowerCAmelCase, ): """simple docstring""" super().__init__(**lowerCAmelCase ) lowerCamelCase_ =hidden_size lowerCamelCase_ =intermediate_size lowerCamelCase_ =projection_dim lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =num_channels lowerCamelCase_ =patch_size lowerCamelCase_ =image_size lowerCamelCase_ =initializer_range lowerCamelCase_ =initializer_factor lowerCamelCase_ =attention_dropout lowerCamelCase_ =layer_norm_eps lowerCamelCase_ =hidden_act @classmethod def lowercase__ ( cls, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" cls._set_token_in_kwargs(lowerCAmelCase ) lowerCamelCase_, lowerCamelCase_ =cls.get_config_dict(lowerCAmelCase, **lowerCAmelCase ) # get the vision config dict if we are loading from AltCLIPConfig if config_dict.get('''model_type''' ) == "altclip": lowerCamelCase_ =config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls, '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(lowerCAmelCase, **lowerCAmelCase ) class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Dict ='altclip' lowercase : str =True def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=768, lowerCAmelCase=2.6_5_9_2, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =kwargs.pop('''text_config_dict''', lowerCAmelCase ) lowerCamelCase_ =kwargs.pop('''vision_config_dict''', lowerCAmelCase ) super().__init__(**lowerCAmelCase ) # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`. if text_config_dict is not None: if text_config is None: lowerCamelCase_ ={} # This is the complete result when using `text_config_dict`. lowerCamelCase_ =AltCLIPTextConfig(**lowerCAmelCase ).to_dict() # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different. for key, value in _text_config_dict.items(): if key in text_config and value != text_config[key] and key not in ["transformers_version"]: # If specified in `text_config_dict` if key in text_config_dict: lowerCamelCase_ =( f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. ''' f'''The value `text_config_dict["{key}"]` will be used instead.''' ) # If inferred from default argument values (just to be super careful) else: lowerCamelCase_ =( f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The ''' f'''value `text_config["{key}"]` will be overriden.''' ) logger.warning(lowerCAmelCase ) # Update all values in `text_config` with the ones in `_text_config_dict`. text_config.update(_text_config_dict ) if vision_config_dict is not None: if vision_config is None: lowerCamelCase_ ={} # This is the complete result when using `vision_config_dict`. lowerCamelCase_ =AltCLIPVisionConfig(**lowerCAmelCase ).to_dict() # convert keys to string instead of integer if "id2label" in _vision_config_dict: lowerCamelCase_ ={ str(lowerCAmelCase ): value for key, value in _vision_config_dict['''id2label'''].items() } # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different. for key, value in _vision_config_dict.items(): if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]: # If specified in `vision_config_dict` if key in vision_config_dict: lowerCamelCase_ =( f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different ''' f'''values. The value `vision_config_dict["{key}"]` will be used instead.''' ) # If inferred from default argument values (just to be super careful) else: lowerCamelCase_ =( f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. ''' f'''The value `vision_config["{key}"]` will be overriden.''' ) logger.warning(lowerCAmelCase ) # Update all values in `vision_config` with the ones in `_vision_config_dict`. vision_config.update(_vision_config_dict ) if text_config is None: lowerCamelCase_ ={} logger.info('''`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.''' ) if vision_config is None: lowerCamelCase_ ={} logger.info('''`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.''' ) lowerCamelCase_ =AltCLIPTextConfig(**lowerCAmelCase ) lowerCamelCase_ =AltCLIPVisionConfig(**lowerCAmelCase ) lowerCamelCase_ =projection_dim lowerCamelCase_ =logit_scale_init_value lowerCamelCase_ =1.0 @classmethod def lowercase__ ( cls, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =copy.deepcopy(self.__dict__ ) lowerCamelCase_ =self.text_config.to_dict() lowerCamelCase_ =self.vision_config.to_dict() lowerCamelCase_ =self.__class__.model_type return output
676
0
import unittest from transformers import SqueezeBertConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, ) class lowerCamelCase( __snake_case ): '''simple docstring''' def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=False , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=5 , snake_case_=4 , snake_case_=64 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , snake_case_=2 , snake_case_=2 , snake_case_=2 , snake_case_=2 , snake_case_=4 , snake_case_=1 , ): _A = parent _A = batch_size _A = seq_length _A = is_training _A = use_input_mask _A = use_token_type_ids _A = use_labels _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = type_vocab_size _A = type_sequence_label_size _A = initializer_range _A = num_labels _A = num_choices _A = scope _A = q_groups _A = k_groups _A = v_groups _A = post_attention_groups _A = intermediate_groups _A = output_groups def lowerCAmelCase__ ( self ): _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = None if self.use_input_mask: _A = random_attention_mask([self.batch_size, self.seq_length] ) _A = None _A = None _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _A = ids_tensor([self.batch_size] , self.num_choices ) _A = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase__ ( self ): return SqueezeBertConfig( embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , ) def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _A = SqueezeBertModel(config=snake_case_ ) model.to(snake_case_ ) model.eval() _A = model(snake_case_ , snake_case_ ) _A = model(snake_case_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _A = SqueezeBertForMaskedLM(config=snake_case_ ) model.to(snake_case_ ) model.eval() _A = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _A = SqueezeBertForQuestionAnswering(config=snake_case_ ) model.to(snake_case_ ) model.eval() _A = model( snake_case_ , attention_mask=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _A = self.num_labels _A = SqueezeBertForSequenceClassification(snake_case_ ) model.to(snake_case_ ) model.eval() _A = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _A = self.num_labels _A = SqueezeBertForTokenClassification(config=snake_case_ ) model.to(snake_case_ ) model.eval() _A = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): _A = self.num_choices _A = SqueezeBertForMultipleChoice(config=snake_case_ ) model.to(snake_case_ ) model.eval() _A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _A = model( snake_case_ , attention_mask=snake_case_ , labels=snake_case_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase__ ( self ): _A = self.prepare_config_and_inputs() ((_A), (_A), (_A), (_A), (_A), (_A)) = config_and_inputs _A = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class lowerCamelCase( __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' __magic_name__ = ( ( SqueezeBertModel, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, ) if is_torch_available() else None ) __magic_name__ = ( { 'feature-extraction': SqueezeBertModel, 'fill-mask': SqueezeBertForMaskedLM, 'question-answering': SqueezeBertForQuestionAnswering, 'text-classification': SqueezeBertForSequenceClassification, 'token-classification': SqueezeBertForTokenClassification, 'zero-shot': SqueezeBertForSequenceClassification, } if is_torch_available() else {} ) __magic_name__ = False __magic_name__ = True __magic_name__ = False def lowerCAmelCase__ ( self ): _A = SqueezeBertModelTester(self ) _A = ConfigTester(self , config_class=snake_case_ , dim=37 ) def lowerCAmelCase__ ( self ): self.config_tester.run_common_tests() def lowerCAmelCase__ ( self ): _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_model(*snake_case_ ) def lowerCAmelCase__ ( self ): _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_masked_lm(*snake_case_ ) def lowerCAmelCase__ ( self ): _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_question_answering(*snake_case_ ) def lowerCAmelCase__ ( self ): _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_sequence_classification(*snake_case_ ) def lowerCAmelCase__ ( self ): _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_token_classification(*snake_case_ ) def lowerCAmelCase__ ( self ): _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_multiple_choice(*snake_case_ ) @slow def lowerCAmelCase__ ( self ): for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A = SqueezeBertModel.from_pretrained(snake_case_ ) self.assertIsNotNone(snake_case_ ) @require_sentencepiece @require_tokenizers @require_torch class lowerCamelCase( unittest.TestCase ): '''simple docstring''' @slow def lowerCAmelCase__ ( self ): _A = SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli' ) _A = torch.tensor([[1, 2_9414, 232, 328, 740, 1140, 1_2695, 69, 13, 1588, 2]] ) _A = model(snake_case_ )[0] _A = torch.Size((1, 3) ) self.assertEqual(output.shape , snake_case_ ) _A = torch.tensor([[0.6401, -0.0349, -0.6041]] ) self.assertTrue(torch.allclose(snake_case_ , snake_case_ , atol=1E-4 ) )
27
'''simple docstring''' import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, lowerCAmelCase, lowerCAmelCase=13, lowerCAmelCase=7, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=2, lowerCAmelCase=99, lowerCAmelCase=0, lowerCAmelCase=32, lowerCAmelCase=5, lowerCAmelCase=4, lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=12, lowerCAmelCase=2, lowerCAmelCase=0.0_2, lowerCAmelCase=3, lowerCAmelCase=4, lowerCAmelCase="last", lowerCAmelCase=None, lowerCAmelCase=None, ): """simple docstring""" lowerCamelCase_ =parent lowerCamelCase_ =batch_size lowerCamelCase_ =seq_length lowerCamelCase_ =is_training lowerCamelCase_ =use_input_lengths lowerCamelCase_ =use_token_type_ids lowerCamelCase_ =use_labels lowerCamelCase_ =gelu_activation lowerCamelCase_ =sinusoidal_embeddings lowerCamelCase_ =causal lowerCamelCase_ =asm lowerCamelCase_ =n_langs lowerCamelCase_ =vocab_size lowerCamelCase_ =n_special lowerCamelCase_ =hidden_size lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =attention_probs_dropout_prob lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =type_vocab_size lowerCamelCase_ =type_sequence_label_size lowerCamelCase_ =initializer_range lowerCamelCase_ =num_labels lowerCamelCase_ =num_choices lowerCamelCase_ =summary_type lowerCamelCase_ =use_proj lowerCamelCase_ =scope def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase_ =None if self.use_input_lengths: lowerCamelCase_ =( ids_tensor([self.batch_size], vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length lowerCamelCase_ =None if self.use_token_type_ids: lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.n_langs ) lowerCamelCase_ =None lowerCamelCase_ =None lowerCamelCase_ =None if self.use_labels: lowerCamelCase_ =ids_tensor([self.batch_size], self.type_sequence_label_size ) lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.num_labels ) lowerCamelCase_ =ids_tensor([self.batch_size], 2 ).float() lowerCamelCase_ =ids_tensor([self.batch_size], self.num_choices ) lowerCamelCase_ =self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def lowercase__ ( self ): """simple docstring""" return FlaubertConfig( vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =FlaubertModel(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase, lengths=lowerCAmelCase, langs=lowerCAmelCase ) lowerCamelCase_ =model(lowerCAmelCase, langs=lowerCAmelCase ) lowerCamelCase_ =model(lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =FlaubertWithLMHeadModel(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape, () ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =FlaubertForQuestionAnsweringSimple(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase ) lowerCamelCase_ =model(lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =FlaubertForQuestionAnswering(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase ) lowerCamelCase_ =model( lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase, cls_index=lowerCAmelCase, is_impossible=lowerCAmelCase, p_mask=lowerCAmelCase, ) lowerCamelCase_ =model( lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase, cls_index=lowerCAmelCase, is_impossible=lowerCAmelCase, ) ((lowerCamelCase_), ) =result_with_labels.to_tuple() lowerCamelCase_ =model(lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase ) ((lowerCamelCase_), ) =result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape, () ) self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =FlaubertForSequenceClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase ) lowerCamelCase_ =model(lowerCAmelCase, labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape, () ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =self.num_labels lowerCamelCase_ =FlaubertForTokenClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase, attention_mask=lowerCAmelCase, labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =self.num_choices lowerCamelCase_ =FlaubertForMultipleChoice(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() lowerCamelCase_ =token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() lowerCamelCase_ =input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() lowerCamelCase_ =model( lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.prepare_config_and_inputs() ( ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ) =config_and_inputs lowerCamelCase_ ={ '''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase : List[Any] =( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) lowercase : Tuple =( { 'feature-extraction': FlaubertModel, 'fill-mask': FlaubertWithLMHeadModel, 'question-answering': FlaubertForQuestionAnsweringSimple, 'text-classification': FlaubertForSequenceClassification, 'token-classification': FlaubertForTokenClassification, 'zero-shot': FlaubertForSequenceClassification, } if is_torch_available() else {} ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('''Fast''' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=False ): """simple docstring""" lowerCamelCase_ =super()._prepare_for_class(lowerCAmelCase, lowerCAmelCase, return_labels=lowerCAmelCase ) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": lowerCamelCase_ =torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase ) lowerCamelCase_ =torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase ) return inputs_dict def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =FlaubertModelTester(self ) lowerCamelCase_ =ConfigTester(self, config_class=lowerCAmelCase, emb_dim=37 ) def lowercase__ ( self ): """simple docstring""" self.config_tester.run_common_tests() def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*lowerCAmelCase ) @slow def lowercase__ ( self ): """simple docstring""" for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ =FlaubertModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) @slow @require_torch_gpu def lowercase__ ( self ): """simple docstring""" lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return lowerCamelCase_ =True lowerCamelCase_ =model_class(config=lowerCAmelCase ) lowerCamelCase_ =self._prepare_for_class(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =torch.jit.trace( lowerCAmelCase, (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(lowerCAmelCase, os.path.join(lowerCAmelCase, '''traced_model.pt''' ) ) lowerCamelCase_ =torch.jit.load(os.path.join(lowerCAmelCase, '''traced_model.pt''' ), map_location=lowerCAmelCase ) loaded(inputs_dict['''input_ids'''].to(lowerCAmelCase ), inputs_dict['''attention_mask'''].to(lowerCAmelCase ) ) @require_torch class __UpperCamelCase ( unittest.TestCase ): @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' ) lowerCamelCase_ =torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] ) with torch.no_grad(): lowerCamelCase_ =model(lowerCAmelCase )[0] lowerCamelCase_ =torch.Size((1, 11, 768) ) self.assertEqual(output.shape, lowerCAmelCase ) lowerCamelCase_ =torch.tensor( [[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], lowerCAmelCase, atol=1e-4 ) )
676
0
'''simple docstring''' import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNetaDConditionModel, ) from diffusers.utils import load_image, slow from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' A : int = StableDiffusionDiffEditPipeline A : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''} A : int = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''} A : str = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess A : Union[str, Any] = frozenset([] ) def UpperCamelCase_ ( self ): '''simple docstring''' torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Optional[Any] = UNetaDConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=A, ) SCREAMING_SNAKE_CASE : int = DDIMScheduler( beta_start=0.0_00_85, beta_end=0.0_12, beta_schedule='scaled_linear', clip_sample=A, set_alpha_to_one=A, ) SCREAMING_SNAKE_CASE : str = DDIMInverseScheduler( beta_start=0.0_00_85, beta_end=0.0_12, beta_schedule='scaled_linear', clip_sample=A, set_alpha_to_zero=A, ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Dict = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Tuple = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act='gelu', projection_dim=512, ) SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextModel(A ) SCREAMING_SNAKE_CASE : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) SCREAMING_SNAKE_CASE : int = { 'unet': unet, 'scheduler': scheduler, 'inverse_scheduler': inverse_scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def UpperCamelCase_ ( self, A, A=0 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 16, 16), rng=random.Random(A ) ).to(A ) SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 2, 4, 16, 16), rng=random.Random(A ) ).to(A ) if str(A ).startswith('mps' ): SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(A ) else: SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device=A ).manual_seed(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = { 'prompt': 'a dog and a newt', 'mask_image': mask, 'image_latents': latents, 'generator': generator, 'num_inference_steps': 2, 'inpaint_strength': 1.0, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def UpperCamelCase_ ( self, A, A=0 ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A ) SCREAMING_SNAKE_CASE : Any = image.cpu().permute(0, 2, 3, 1 )[0] SCREAMING_SNAKE_CASE : Optional[int] = Image.fromarray(np.uinta(A ) ).convert('RGB' ) if str(A ).startswith('mps' ): SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(A ) else: SCREAMING_SNAKE_CASE : int = torch.Generator(device=A ).manual_seed(A ) SCREAMING_SNAKE_CASE : Dict = { 'image': image, 'source_prompt': 'a cat and a frog', 'target_prompt': 'a dog and a newt', 'generator': generator, 'num_inference_steps': 2, 'num_maps_per_mask': 2, 'mask_encode_strength': 1.0, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def UpperCamelCase_ ( self, A, A=0 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 32, 32), rng=random.Random(A ) ).to(A ) SCREAMING_SNAKE_CASE : List[Any] = image.cpu().permute(0, 2, 3, 1 )[0] SCREAMING_SNAKE_CASE : int = Image.fromarray(np.uinta(A ) ).convert('RGB' ) if str(A ).startswith('mps' ): SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(A ) else: SCREAMING_SNAKE_CASE : Any = torch.Generator(device=A ).manual_seed(A ) SCREAMING_SNAKE_CASE : Any = { 'image': image, 'prompt': 'a cat and a frog', 'generator': generator, 'num_inference_steps': 2, 'inpaint_strength': 1.0, 'guidance_scale': 6.0, 'decode_latents': True, 'output_type': 'numpy', } return inputs def UpperCamelCase_ ( self ): '''simple docstring''' if not hasattr(self.pipeline_class, '_optional_components' ): return SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_components() SCREAMING_SNAKE_CASE : Optional[int] = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(A, A, A ) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} ) SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(A ) SCREAMING_SNAKE_CASE : Dict = pipe(**A )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(A ) SCREAMING_SNAKE_CASE : List[Any] = self.pipeline_class.from_pretrained(A ) pipe_loaded.to(A ) pipe_loaded.set_progress_bar_config(disable=A ) for optional_component in pipe._optional_components: self.assertTrue( getattr(A, A ) is None, F"`{optional_component}` did not stay set to None after loading.", ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(A ) SCREAMING_SNAKE_CASE : Tuple = pipe_loaded(**A )[0] SCREAMING_SNAKE_CASE : List[str] = np.abs(output - output_loaded ).max() self.assertLess(A, 1E-4 ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = 'cpu' SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components() SCREAMING_SNAKE_CASE : Union[str, Any] = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : str = self.get_dummy_mask_inputs(A ) SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.generate_mask(**A ) SCREAMING_SNAKE_CASE : Dict = mask[0, -3:, -3:] self.assertEqual(mask.shape, (1, 16, 16) ) SCREAMING_SNAKE_CASE : Any = np.array([0] * 9 ) SCREAMING_SNAKE_CASE : Any = np.abs(mask_slice.flatten() - expected_slice ).max() self.assertLessEqual(A, 1E-3 ) self.assertEqual(mask[0, -3, -4], 0 ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = 'cpu' SCREAMING_SNAKE_CASE : Dict = self.get_dummy_components() SCREAMING_SNAKE_CASE : Dict = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inversion_inputs(A ) SCREAMING_SNAKE_CASE : Optional[Any] = pipe.invert(**A ).images SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -1, -3:, -3:] self.assertEqual(image.shape, (2, 32, 32, 3) ) SCREAMING_SNAKE_CASE : Tuple = np.array( [0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99], ) SCREAMING_SNAKE_CASE : Dict = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(A, 1E-3 ) def UpperCamelCase_ ( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=5E-3 ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = 'cpu' SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components() SCREAMING_SNAKE_CASE : Dict = {'beta_start': 0.0_00_85, 'beta_end': 0.0_12, 'beta_schedule': 'scaled_linear'} SCREAMING_SNAKE_CASE : Union[str, Any] = DPMSolverMultistepScheduler(**A ) SCREAMING_SNAKE_CASE : Optional[int] = DPMSolverMultistepInverseScheduler(**A ) SCREAMING_SNAKE_CASE : Tuple = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inversion_inputs(A ) SCREAMING_SNAKE_CASE : List[str] = pipe.invert(**A ).images SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -1, -3:, -3:] self.assertEqual(image.shape, (2, 32, 32, 3) ) SCREAMING_SNAKE_CASE : Tuple = np.array( [0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99], ) SCREAMING_SNAKE_CASE : Any = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(A, 1E-3 ) @require_torch_gpu @slow class _a ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def UpperCamelCase_ ( cls ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' ) SCREAMING_SNAKE_CASE : Optional[int] = raw_image.convert('RGB' ).resize((768, 768) ) SCREAMING_SNAKE_CASE : List[str] = raw_image def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Dict = StableDiffusionDiffEditPipeline.from_pretrained( 'stabilityai/stable-diffusion-2-1', safety_checker=A, torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE : List[Any] = DDIMScheduler.from_config(pipe.scheduler.config ) SCREAMING_SNAKE_CASE : int = DDIMInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : List[Any] = 'a bowl of fruit' SCREAMING_SNAKE_CASE : List[str] = 'a bowl of pears' SCREAMING_SNAKE_CASE : Dict = pipe.generate_mask( image=self.raw_image, source_prompt=A, target_prompt=A, generator=A, ) SCREAMING_SNAKE_CASE : Optional[int] = pipe.invert( prompt=A, image=self.raw_image, inpaint_strength=0.7, generator=A ).latents SCREAMING_SNAKE_CASE : List[str] = pipe( prompt=A, mask_image=A, image_latents=A, generator=A, negative_prompt=A, inpaint_strength=0.7, output_type='numpy', ).images[0] SCREAMING_SNAKE_CASE : List[Any] = ( np.array( load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/diffedit/pears.png' ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5E-1 def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : int = StableDiffusionDiffEditPipeline.from_pretrained( 'stabilityai/stable-diffusion-2-1', safety_checker=A, torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) SCREAMING_SNAKE_CASE : List[str] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=A ) SCREAMING_SNAKE_CASE : str = 'a bowl of fruit' SCREAMING_SNAKE_CASE : Tuple = 'a bowl of pears' SCREAMING_SNAKE_CASE : List[Any] = pipe.generate_mask( image=self.raw_image, source_prompt=A, target_prompt=A, generator=A, ) SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.invert( prompt=A, image=self.raw_image, inpaint_strength=0.7, generator=A, num_inference_steps=25, ).latents SCREAMING_SNAKE_CASE : str = pipe( prompt=A, mask_image=A, image_latents=A, generator=A, negative_prompt=A, inpaint_strength=0.7, num_inference_steps=25, output_type='numpy', ).images[0] SCREAMING_SNAKE_CASE : Tuple = ( np.array( load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/diffedit/pears.png' ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5E-1
28
'''simple docstring''' import os from pickle import UnpicklingError from typing import Dict, Tuple import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict, unflatten_dict import transformers from .utils import logging a_ : List[Any] = logging.get_logger(__name__) def a_ ( __snake_case : Optional[int] , __snake_case : str , __snake_case : List[Any] , __snake_case : int=False ) -> List[str]: """simple docstring""" try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise if not is_sharded: lowerCamelCase_ =os.path.abspath(__snake_case ) logger.info(F'''Loading PyTorch weights from {pt_path}''' ) lowerCamelCase_ =torch.load(__snake_case , map_location='''cpu''' ) logger.info(F'''PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.''' ) lowerCamelCase_ =convert_pytorch_state_dict_to_flax(__snake_case , __snake_case ) else: # model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files lowerCamelCase_ =convert_pytorch_sharded_state_dict_to_flax(__snake_case , __snake_case ) return flax_state_dict def a_ ( __snake_case : Tuple[str] , __snake_case : np.ndarray , __snake_case : Dict[str, jnp.ndarray] , __snake_case : str , ) -> (Tuple[str], np.ndarray): """simple docstring""" def is_key_or_prefix_key_in_dict(__snake_case : Tuple[str] ) -> bool: return len(set(__snake_case ) & {key, (model_prefix,) + key} ) > 0 # layer norm lowerCamelCase_ =pt_tuple_key[:-1] + ('''scale''',) if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(__snake_case ): return renamed_pt_tuple_key, pt_tensor # batch norm layer mean lowerCamelCase_ =pt_tuple_key[:-1] + ('''mean''',) if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(__snake_case ): return renamed_pt_tuple_key, pt_tensor # batch norm layer var lowerCamelCase_ =pt_tuple_key[:-1] + ('''var''',) if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(__snake_case ): return renamed_pt_tuple_key, pt_tensor # embedding lowerCamelCase_ =pt_tuple_key[:-1] + ('''embedding''',) if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(__snake_case ): return renamed_pt_tuple_key, pt_tensor # conv layer lowerCamelCase_ =pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(__snake_case ): lowerCamelCase_ =pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowerCamelCase_ =pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(__snake_case ): lowerCamelCase_ =pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowerCamelCase_ =pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowerCamelCase_ =pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 lowerCamelCase_ =None if pt_tuple_key[-3::2] == ("parametrizations", "original0"): lowerCamelCase_ =pt_tuple_key[-2] + '''_g''' elif pt_tuple_key[-3::2] == ("parametrizations", "original1"): lowerCamelCase_ =pt_tuple_key[-2] + '''_v''' if name is not None: lowerCamelCase_ =pt_tuple_key[:-3] + (name,) return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def a_ ( __snake_case : Union[str, Any] , __snake_case : str ) -> str: """simple docstring""" # convert pytorch tensor to numpy lowerCamelCase_ ={k: v.numpy() for k, v in pt_state_dict.items()} lowerCamelCase_ =flax_model.base_model_prefix # use params dict if the model contains batch norm layers if "params" in flax_model.params: lowerCamelCase_ =flax_model.params['''params'''] else: lowerCamelCase_ =flax_model.params lowerCamelCase_ =flatten_dict(__snake_case ) # add batch_stats keys,values to dict if "batch_stats" in flax_model.params: lowerCamelCase_ =flatten_dict(flax_model.params['''batch_stats'''] ) random_flax_state_dict.update(__snake_case ) lowerCamelCase_ ={} lowerCamelCase_ =(model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) lowerCamelCase_ =(model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCamelCase_ =tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary lowerCamelCase_ =pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: lowerCamelCase_ =pt_tuple_key[1:] # Correctly rename weight parameters lowerCamelCase_, lowerCamelCase_ =rename_key_and_reshape_tensor( __snake_case , __snake_case , __snake_case , __snake_case ) # add model prefix if necessary lowerCamelCase_ =(model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: lowerCamelCase_ =(model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1] or "var" in flax_key[-1]: lowerCamelCase_ =jnp.asarray(__snake_case ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(__snake_case , __snake_case ) continue # also add unexpected weight so that warning is thrown lowerCamelCase_ =jnp.asarray(__snake_case ) else: # also add unexpected weight so that warning is thrown lowerCamelCase_ =jnp.asarray(__snake_case ) return unflatten_dict(__snake_case ) def a_ ( __snake_case : Union[str, Any] , __snake_case : List[Any] ) -> Optional[Any]: """simple docstring""" import torch # Load the index lowerCamelCase_ ={} for shard_file in shard_filenames: # load using msgpack utils lowerCamelCase_ =torch.load(__snake_case ) lowerCamelCase_ ={k: v.numpy() for k, v in pt_state_dict.items()} lowerCamelCase_ =flax_model.base_model_prefix # use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict if "batch_stats" in flax_model.params: lowerCamelCase_ =flax_model.params['''params'''] lowerCamelCase_ =flatten_dict(__snake_case ) random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) ) else: lowerCamelCase_ =flax_model.params lowerCamelCase_ =flatten_dict(__snake_case ) lowerCamelCase_ =(model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) lowerCamelCase_ =(model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCamelCase_ =tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary lowerCamelCase_ =pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: lowerCamelCase_ =pt_tuple_key[1:] # Correctly rename weight parameters lowerCamelCase_, lowerCamelCase_ =rename_key_and_reshape_tensor( __snake_case , __snake_case , __snake_case , __snake_case ) # add model prefix if necessary lowerCamelCase_ =(model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: lowerCamelCase_ =(model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1]: lowerCamelCase_ =jnp.asarray(__snake_case ) continue if "var" in flax_key[-1]: lowerCamelCase_ =jnp.asarray(__snake_case ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(__snake_case , __snake_case ) continue # also add unexpected weight so that warning is thrown lowerCamelCase_ =jnp.asarray(__snake_case ) else: # also add unexpected weight so that warning is thrown lowerCamelCase_ =jnp.asarray(__snake_case ) return unflatten_dict(__snake_case ) def a_ ( __snake_case : List[str] , __snake_case : Dict ) -> str: """simple docstring""" lowerCamelCase_ =os.path.abspath(__snake_case ) logger.info(F'''Loading Flax weights from {flax_checkpoint_path}''' ) # import correct flax class lowerCamelCase_ =getattr(__snake_case , '''Flax''' + model.__class__.__name__ ) # load flax weight dict with open(__snake_case , '''rb''' ) as state_f: try: lowerCamelCase_ =from_bytes(__snake_case , state_f.read() ) except UnpicklingError: raise EnvironmentError(F'''Unable to convert {flax_checkpoint_path} to Flax deserializable object. ''' ) return load_flax_weights_in_pytorch_model(__snake_case , __snake_case ) def a_ ( __snake_case : Optional[Any] , __snake_case : Union[str, Any] ) -> Optional[int]: """simple docstring""" try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise # check if we have bf16 weights lowerCamelCase_ =flatten_dict(jax.tree_util.tree_map(lambda __snake_case : x.dtype == jnp.bfloataa , __snake_case ) ).values() if any(__snake_case ): # convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( '''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` ''' '''before loading those in PyTorch model.''' ) lowerCamelCase_ =jax.tree_util.tree_map( lambda __snake_case : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __snake_case ) lowerCamelCase_ =flatten_dict(__snake_case ) lowerCamelCase_ =pt_model.state_dict() lowerCamelCase_ =(pt_model.base_model_prefix in flax_state) and ( pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) lowerCamelCase_ =(pt_model.base_model_prefix not in flax_state) and ( pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) # keep track of unexpected & missing keys lowerCamelCase_ =[] lowerCamelCase_ =set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): lowerCamelCase_ =flax_key_tuple[0] == pt_model.base_model_prefix lowerCamelCase_ ='''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict # adapt flax_key to prepare for loading from/to base model only if load_model_with_head_into_base_model and has_base_model_prefix: lowerCamelCase_ =flax_key_tuple[1:] elif load_base_model_into_model_with_head and require_base_model_prefix: lowerCamelCase_ =(pt_model.base_model_prefix,) + flax_key_tuple # rename flax weights to PyTorch format if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(__snake_case ) not in pt_model_dict: # conv layer lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',) lowerCamelCase_ =jnp.transpose(__snake_case , (3, 2, 0, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(__snake_case ) not in pt_model_dict: # linear layer lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',) lowerCamelCase_ =flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',) # adding batch stats from flax batch norm to pt elif "mean" in flax_key_tuple[-1]: lowerCamelCase_ =flax_key_tuple[:-1] + ('''running_mean''',) elif "var" in flax_key_tuple[-1]: lowerCamelCase_ =flax_key_tuple[:-1] + ('''running_var''',) if "batch_stats" in flax_state: lowerCamelCase_ ='''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header else: lowerCamelCase_ ='''.'''.join(__snake_case ) # We also need to look at `pt_model_dict` and see if there are keys requiring further transformation. lowerCamelCase_ ={} # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 for key in pt_model_dict: lowerCamelCase_ =key.split('''.''' ) lowerCamelCase_ =None if key_components[-3::2] == ["parametrizations", "original0"]: lowerCamelCase_ =key_components[-2] + '''_g''' elif key_components[-3::2] == ["parametrizations", "original1"]: lowerCamelCase_ =key_components[-2] + '''_v''' if name is not None: lowerCamelCase_ =key_components[:-3] + [name] lowerCamelCase_ ='''.'''.join(__snake_case ) lowerCamelCase_ =key if flax_key in special_pt_names: lowerCamelCase_ =special_pt_names[flax_key] if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( F'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected ''' F'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) else: # add weight to pytorch dict lowerCamelCase_ =np.asarray(__snake_case ) if not isinstance(__snake_case , np.ndarray ) else flax_tensor lowerCamelCase_ =torch.from_numpy(__snake_case ) # remove from missing keys missing_keys.remove(__snake_case ) else: # weight is not expected by PyTorch model unexpected_keys.append(__snake_case ) pt_model.load_state_dict(__snake_case ) # re-transform missing_keys to list lowerCamelCase_ =list(__snake_case ) if len(__snake_case ) > 0: logger.warning( '''Some weights of the Flax model were not used when initializing the PyTorch model''' F''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing''' F''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture''' ''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This''' F''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect''' ''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a''' ''' FlaxBertForSequenceClassification model).''' ) else: logger.warning(F'''All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n''' ) if len(__snake_case ) > 0: logger.warning( F'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly''' F''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to''' ''' use it for predictions and inference.''' ) else: logger.warning( F'''All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n''' '''If your task is similar to the task the model of the checkpoint was trained on, ''' F'''you can already use {pt_model.__class__.__name__} for predictions without further training.''' ) return pt_model
676
0
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers import ( TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, BertConfig, DPRConfig, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, ) class __lowerCamelCase : def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=99 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=4 , UpperCAmelCase=37 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=16 , UpperCAmelCase=2 , UpperCAmelCase=0.0_2 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=None , UpperCAmelCase=0 , ): lowerCamelCase_ = parent lowerCamelCase_ = batch_size lowerCamelCase_ = seq_length lowerCamelCase_ = is_training lowerCamelCase_ = use_input_mask lowerCamelCase_ = use_token_type_ids lowerCamelCase_ = use_labels lowerCamelCase_ = vocab_size lowerCamelCase_ = hidden_size lowerCamelCase_ = num_hidden_layers lowerCamelCase_ = num_attention_heads lowerCamelCase_ = intermediate_size lowerCamelCase_ = hidden_act lowerCamelCase_ = hidden_dropout_prob lowerCamelCase_ = attention_probs_dropout_prob lowerCamelCase_ = max_position_embeddings lowerCamelCase_ = type_vocab_size lowerCamelCase_ = type_sequence_label_size lowerCamelCase_ = initializer_range lowerCamelCase_ = num_labels lowerCamelCase_ = num_choices lowerCamelCase_ = scope lowerCamelCase_ = projection_dim def UpperCAmelCase__ ( self ): lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase_ = None if self.use_input_mask: # follow test_modeling_tf_ctrl.py lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase_ = None if self.use_token_type_ids: lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase_ = None lowerCamelCase_ = None lowerCamelCase_ = None if self.use_labels: lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase_ = BertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , ) lowerCamelCase_ = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ): lowerCamelCase_ = TFDPRContextEncoder(config=UpperCAmelCase ) lowerCamelCase_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase ) lowerCamelCase_ = model(UpperCAmelCase , token_type_ids=UpperCAmelCase ) lowerCamelCase_ = model(UpperCAmelCase ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) ) def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ): lowerCamelCase_ = TFDPRQuestionEncoder(config=UpperCAmelCase ) lowerCamelCase_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase ) lowerCamelCase_ = model(UpperCAmelCase , token_type_ids=UpperCAmelCase ) lowerCamelCase_ = model(UpperCAmelCase ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) ) def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ): lowerCamelCase_ = TFDPRReader(config=UpperCAmelCase ) lowerCamelCase_ = model(UpperCAmelCase , attention_mask=UpperCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) ) def UpperCAmelCase__ ( self ): lowerCamelCase_ = self.prepare_config_and_inputs() ( ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ( lowerCamelCase_ ) , ) = config_and_inputs lowerCamelCase_ = {'''input_ids''': input_ids} return config, inputs_dict @require_tf class __lowerCamelCase ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ): a__: Any = ( ( TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, ) if is_tf_available() else () ) a__: Optional[int] = {'feature-extraction': TFDPRQuestionEncoder} if is_tf_available() else {} a__: str = False a__: Any = False a__: int = False a__: Dict = False a__: Any = False def UpperCAmelCase__ ( self ): lowerCamelCase_ = TFDPRModelTester(self ) lowerCamelCase_ = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 ) def UpperCAmelCase__ ( self ): self.config_tester.run_common_tests() def UpperCAmelCase__ ( self ): lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_context_encoder(*UpperCAmelCase ) def UpperCAmelCase__ ( self ): lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_question_encoder(*UpperCAmelCase ) def UpperCAmelCase__ ( self ): lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_reader(*UpperCAmelCase ) @slow def UpperCAmelCase__ ( self ): for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = TFDPRContextEncoder.from_pretrained(UpperCAmelCase ) self.assertIsNotNone(UpperCAmelCase ) for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = TFDPRContextEncoder.from_pretrained(UpperCAmelCase ) self.assertIsNotNone(UpperCAmelCase ) for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = TFDPRQuestionEncoder.from_pretrained(UpperCAmelCase ) self.assertIsNotNone(UpperCAmelCase ) for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = TFDPRReader.from_pretrained(UpperCAmelCase ) self.assertIsNotNone(UpperCAmelCase ) @require_tf class __lowerCamelCase ( unittest.TestCase ): @slow def UpperCAmelCase__ ( self ): lowerCamelCase_ = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''' ) lowerCamelCase_ = tf.constant( [[101, 7592, 1010, 2003, 2026, 3899, 1_0140, 1029, 102]] ) # [CLS] hello, is my dog cute? [SEP] lowerCamelCase_ = model(UpperCAmelCase )[0] # embedding shape = (1, 768) # compare the actual values for a slice. lowerCamelCase_ = tf.constant( [ [ 0.0_3_2_3_6_2_5_3, 0.1_2_7_5_3_3_3_5, 0.1_6_8_1_8_5_0_9, 0.0_0_2_7_9_7_8_6, 0.3_8_9_6_9_3_3, 0.2_4_2_6_4_9_4_5, 0.2_1_7_8_9_7_1, -0.0_2_3_3_5_2_2_7, -0.0_8_4_8_1_9_5_9, -0.1_4_3_2_4_1_1_7, ] ] ) self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4 ) )
29
'''simple docstring''' def a_ ( __snake_case : str , __snake_case : str ) -> str: """simple docstring""" lowerCamelCase_ =len(__snake_case ) lowerCamelCase_ =len(__snake_case ) lowerCamelCase_ =( first_str_length if first_str_length > second_str_length else second_str_length ) lowerCamelCase_ =[] for char_count in range(__snake_case ): if char_count < first_str_length: output_list.append(first_str[char_count] ) if char_count < second_str_length: output_list.append(second_str[char_count] ) return "".join(__snake_case ) if __name__ == "__main__": print(alternative_string_arrange("""AB""", """XYZ"""), end=""" """)
676
0
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __a = { 'configuration_xmod': [ 'XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XmodConfig', 'XmodOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'XMOD_PRETRAINED_MODEL_ARCHIVE_LIST', 'XmodForCausalLM', 'XmodForMaskedLM', 'XmodForMultipleChoice', 'XmodForQuestionAnswering', 'XmodForSequenceClassification', 'XmodForTokenClassification', 'XmodModel', 'XmodPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
30
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ : Any = {"""configuration_timm_backbone""": ["""TimmBackboneConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Optional[Any] = ["""TimmBackbone"""] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys a_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
676
0
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCamelCase__ : Union[str, Any] = logging.get_logger(__name__) lowerCamelCase__ : Tuple = { 'facebook/convnextv2-tiny-1k-224': 'https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json', } class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = "convnextv2" def __init__( self : Tuple , _lowerCAmelCase : str=3 , _lowerCAmelCase : Optional[int]=4 , _lowerCAmelCase : List[str]=4 , _lowerCAmelCase : Any=None , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : int="gelu" , _lowerCAmelCase : Optional[Any]=0.02 , _lowerCAmelCase : Any=1E-12 , _lowerCAmelCase : Union[str, Any]=0.0 , _lowerCAmelCase : Dict=224 , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : Union[str, Any]=None , **_lowerCAmelCase : Tuple , ): super().__init__(**_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = num_channels SCREAMING_SNAKE_CASE_ = patch_size SCREAMING_SNAKE_CASE_ = num_stages SCREAMING_SNAKE_CASE_ = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes SCREAMING_SNAKE_CASE_ = [3, 3, 9, 3] if depths is None else depths SCREAMING_SNAKE_CASE_ = hidden_act SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = layer_norm_eps SCREAMING_SNAKE_CASE_ = drop_path_rate SCREAMING_SNAKE_CASE_ = image_size SCREAMING_SNAKE_CASE_ = ['stem'] + [F"stage{idx}" for idx in range(1 , len(self.depths ) + 1 )] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = get_aligned_output_features_output_indices( out_features=_lowerCAmelCase , out_indices=_lowerCAmelCase , stage_names=self.stage_names )
31
'''simple docstring''' import functools def a_ ( __snake_case : str , __snake_case : str ) -> int: """simple docstring""" lowerCamelCase_ =len(__snake_case ) lowerCamelCase_ =len(__snake_case ) @functools.cache def min_distance(__snake_case : int , __snake_case : int ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa lowerCamelCase_ =int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , __snake_case ) , 1 + min_distance(__snake_case , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
676
0
import os import time import numpy as np import onnxruntime as ort UpperCAmelCase_ = "1" UpperCAmelCase_ = "0" UpperCAmelCase_ = "1" UpperCAmelCase_ = ort.SessionOptions() UpperCAmelCase_ = ort.GraphOptimizationLevel.ORT_DISABLE_ALL print("Create inference session...") UpperCAmelCase_ = ["TensorrtExecutionProvider", "CUDAExecutionProvider"] UpperCAmelCase_ = ort.InferenceSession("model.onnx", sess_options=sess_opt, providers=execution_provider) UpperCAmelCase_ = ort.RunOptions() UpperCAmelCase_ = 1_28 UpperCAmelCase_ = 1 UpperCAmelCase_ = np.ones((batch, sequence), dtype=np.intaa) UpperCAmelCase_ = np.ones((batch, sequence), dtype=np.intaa) UpperCAmelCase_ = np.ones((batch, sequence), dtype=np.intaa) print("Warm up phase...") sess.run( None, { sess.get_inputs()[0].name: input_ids, sess.get_inputs()[1].name: attention_mask, sess.get_inputs()[2].name: token_type_ids, }, run_options=run_opt, ) print("Start inference...") UpperCAmelCase_ = time.time() UpperCAmelCase_ = 20_00 UpperCAmelCase_ = {} for iter in range(max_iters): UpperCAmelCase_ = sess.run( None, { sess.get_inputs()[0].name: input_ids, sess.get_inputs()[1].name: attention_mask, sess.get_inputs()[2].name: token_type_ids, }, run_options=run_opt, ) print("Average Inference Time = {:.3f} ms".format((time.time() - start_time) * 10_00 / max_iters))
32
'''simple docstring''' def a_ ( __snake_case : int ) -> bool: """simple docstring""" if not isinstance(__snake_case , __snake_case ): lowerCamelCase_ =F'''Input value of [number={number}] must be an integer''' raise TypeError(__snake_case ) if number < 0: return False lowerCamelCase_ =number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
676
0
import inspect import os import re from transformers.configuration_utils import PretrainedConfig from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py lowerCamelCase__ : str = """src/transformers""" # This is to make sure the transformers module imported is the one in the repo. lowerCamelCase__ : Union[str, Any] = direct_transformers_import(PATH_TO_TRANSFORMERS) lowerCamelCase__ : List[str] = transformers.models.auto.configuration_auto.CONFIG_MAPPING lowerCamelCase__ : int = { # used to compute the property `self.chunk_length` """EncodecConfig""": ["""overlap"""], # used as `self.bert_model = BertModel(config, ...)` """DPRConfig""": True, # not used in modeling files, but it's an important information """FSMTConfig""": ["""langs"""], # used internally in the configuration class file """GPTNeoConfig""": ["""attention_types"""], # used internally in the configuration class file """EsmConfig""": ["""is_folding_model"""], # used during training (despite we don't have training script for these models yet) """Mask2FormerConfig""": ["""ignore_value"""], # `ignore_value` used during training (despite we don't have training script for these models yet) # `norm` used in conversion script (despite not using in the modeling file) """OneFormerConfig""": ["""ignore_value""", """norm"""], # used during preprocessing and collation, see `collating_graphormer.py` """GraphormerConfig""": ["""spatial_pos_max"""], # used internally in the configuration class file """T5Config""": ["""feed_forward_proj"""], # used internally in the configuration class file # `tokenizer_class` get default value `T5Tokenizer` intentionally """MT5Config""": ["""feed_forward_proj""", """tokenizer_class"""], """UMT5Config""": ["""feed_forward_proj""", """tokenizer_class"""], # used internally in the configuration class file """LongT5Config""": ["""feed_forward_proj"""], # used internally in the configuration class file """SwitchTransformersConfig""": ["""feed_forward_proj"""], # having default values other than `1e-5` - we can't fix them without breaking """BioGptConfig""": ["""layer_norm_eps"""], # having default values other than `1e-5` - we can't fix them without breaking """GLPNConfig""": ["""layer_norm_eps"""], # having default values other than `1e-5` - we can't fix them without breaking """SegformerConfig""": ["""layer_norm_eps"""], # having default values other than `1e-5` - we can't fix them without breaking """CvtConfig""": ["""layer_norm_eps"""], # having default values other than `1e-5` - we can't fix them without breaking """PerceiverConfig""": ["""layer_norm_eps"""], # used internally to calculate the feature size """InformerConfig""": ["""num_static_real_features""", """num_time_features"""], # used internally to calculate the feature size """TimeSeriesTransformerConfig""": ["""num_static_real_features""", """num_time_features"""], # used internally to calculate the feature size """AutoformerConfig""": ["""num_static_real_features""", """num_time_features"""], # used internally to calculate `mlp_dim` """SamVisionConfig""": ["""mlp_ratio"""], # For (head) training, but so far not implemented """ClapAudioConfig""": ["""num_classes"""], # Not used, but providing useful information to users """SpeechT5HifiGanConfig""": ["""sampling_rate"""], } # TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure SPECIAL_CASES_TO_ALLOW.update( { """CLIPSegConfig""": True, """DeformableDetrConfig""": True, """DetaConfig""": True, """DinatConfig""": True, """DonutSwinConfig""": True, """EfficientFormerConfig""": True, """FSMTConfig""": True, """JukeboxConfig""": True, """LayoutLMv2Config""": True, """MaskFormerSwinConfig""": True, """MT5Config""": True, """NatConfig""": True, """OneFormerConfig""": True, """PerceiverConfig""": True, """RagConfig""": True, """SpeechT5Config""": True, """SwinConfig""": True, """Swin2SRConfig""": True, """Swinv2Config""": True, """SwitchTransformersConfig""": True, """TableTransformerConfig""": True, """TapasConfig""": True, """TransfoXLConfig""": True, """UniSpeechConfig""": True, """UniSpeechSatConfig""": True, """WavLMConfig""": True, """WhisperConfig""": True, # TODO: @Arthur (for `alignment_head` and `alignment_layer`) """JukeboxPriorConfig""": True, # TODO: @Younes (for `is_decoder`) """Pix2StructTextConfig""": True, } ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict: snake_case__ = False for attribute in attributes: for modeling_source in source_strings: # check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)` if ( F"""config.{attribute}""" in modeling_source or F"""getattr(config, \"{attribute}\"""" in modeling_source or F"""getattr(self.config, \"{attribute}\"""" in modeling_source ): snake_case__ = True # Deal with multi-line cases elif ( re.search( rF"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""" , __lowerCAmelCase , ) is not None ): snake_case__ = True # `SequenceSummary` is called with `SequenceSummary(config)` elif attribute in [ "summary_type", "summary_use_proj", "summary_activation", "summary_last_dropout", "summary_proj_to_labels", "summary_first_dropout", ]: if "SequenceSummary" in modeling_source: snake_case__ = True if attribute_used: break if attribute_used: break # common and important attributes, even if they do not always appear in the modeling files snake_case__ = [ '''bos_index''', '''eos_index''', '''pad_index''', '''unk_index''', '''mask_index''', '''image_size''', '''use_cache''', '''out_features''', '''out_indices''', ] snake_case__ = ['''encoder_no_repeat_ngram_size'''] # Special cases to be allowed snake_case__ = True if not attribute_used: snake_case__ = False for attribute in attributes: # Allow if the default value in the configuration class is different from the one in `PretrainedConfig` if attribute in ["is_encoder_decoder"] and default_value is True: snake_case__ = True elif attribute in ["tie_word_embeddings"] and default_value is False: snake_case__ = True # Allow cases without checking the default value in the configuration class elif attribute in attributes_to_allow + attributes_used_in_generation: snake_case__ = True elif attribute.endswith('''_token_id''' ): snake_case__ = True # configuration class specific cases if not case_allowed: snake_case__ = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] ) snake_case__ = allowed_cases is True or attribute in allowed_cases return attribute_used or case_allowed def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> List[str]: snake_case__ = dict(inspect.signature(config_class.__init__ ).parameters ) snake_case__ = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']] snake_case__ = [signature[param].default for param in parameter_names] # If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long # as one variant is used, the test should pass snake_case__ = {} if len(config_class.attribute_map ) > 0: snake_case__ = {v: k for k, v in config_class.attribute_map.items()} # Get the path to modeling source files snake_case__ = inspect.getsourcefile(__lowerCAmelCase ) snake_case__ = os.path.dirname(__lowerCAmelCase ) # Let's check against all frameworks: as long as one framework uses an attribute, we are good. snake_case__ = [os.path.join(__lowerCAmelCase , __lowerCAmelCase ) for fn in os.listdir(__lowerCAmelCase ) if fn.startswith('''modeling_''' )] # Get the source code strings snake_case__ = [] for path in modeling_paths: if os.path.isfile(__lowerCAmelCase ): with open(__lowerCAmelCase ) as fp: modeling_sources.append(fp.read() ) snake_case__ = [] for config_param, default_value in zip(__lowerCAmelCase , __lowerCAmelCase ): # `attributes` here is all the variant names for `config_param` snake_case__ = [config_param] # some configuration classes have non-empty `attribute_map`, and both names could be used in the # corresponding modeling files. As long as one of them appears, it is fine. if config_param in reversed_attribute_map: attributes.append(reversed_attribute_map[config_param] ) if not check_attribute_being_used(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): unused_attributes.append(attributes[0] ) return sorted(__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( ) -> int: snake_case__ = {} for _config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in _config_class.__module__: continue # Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.) snake_case__ = [ cls for name, cls in inspect.getmembers( inspect.getmodule(_config_class ) , lambda __lowerCAmelCase : inspect.isclass(__lowerCAmelCase ) and issubclass(__lowerCAmelCase , __lowerCAmelCase ) and inspect.getmodule(__lowerCAmelCase ) == inspect.getmodule(_config_class ) , ) ] for config_class in config_classes_in_module: snake_case__ = check_config_attributes_being_used(__lowerCAmelCase ) if len(__lowerCAmelCase ) > 0: snake_case__ = unused_attributes if len(__lowerCAmelCase ) > 0: snake_case__ = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n''' for name, attributes in configs_with_unused_attributes.items(): error += F"""{name}: {attributes}\n""" raise ValueError(__lowerCAmelCase ) if __name__ == "__main__": check_config_attributes()
33
'''simple docstring''' from __future__ import annotations a_ : int = list[list[int]] # assigning initial values to the grid a_ : Matrix = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution a_ : Matrix = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def a_ ( __snake_case : Matrix , __snake_case : int , __snake_case : int , __snake_case : int ) -> bool: """simple docstring""" for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def a_ ( __snake_case : Matrix ) -> tuple[int, int] | None: """simple docstring""" for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def a_ ( __snake_case : Matrix ) -> Matrix | None: """simple docstring""" if location := find_empty_location(__snake_case ): lowerCamelCase_, lowerCamelCase_ =location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(__snake_case , __snake_case , __snake_case , __snake_case ): lowerCamelCase_ =digit if sudoku(__snake_case ) is not None: return grid lowerCamelCase_ =0 return None def a_ ( __snake_case : Matrix ) -> None: """simple docstring""" for row in grid: for cell in row: print(__snake_case , end=''' ''' ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print("""\nExample grid:\n""" + """=""" * 20) print_solution(example_grid) print("""\nExample grid solution:""") a_ : Union[str, Any] = sudoku(example_grid) if solution is not None: print_solution(solution) else: print("""Cannot find a solution.""")
676
0
"""simple docstring""" import os import tempfile import unittest import numpy as np from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline @require_flax class snake_case_ ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self) -> Optional[Any]: with tempfile.TemporaryDirectory() as tmpdirname: # pipeline has Flax weights UpperCamelCase = FlaxDiffusionPipeline.from_pretrained( '''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=lowerCamelCase_ , cache_dir=lowerCamelCase_) UpperCamelCase = [t[-1] for t in os.walk(os.path.join(lowerCamelCase_ , os.listdir(lowerCamelCase_)[0] , '''snapshots'''))] UpperCamelCase = [item for sublist in all_root_files for item in sublist] # None of the downloaded files should be a PyTorch file even if we have some here: # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin assert not any(f.endswith('''.bin''') for f in files) @slow @require_flax class snake_case_ ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self) -> Any: UpperCamelCase , UpperCamelCase = FlaxStableDiffusionPipeline.from_pretrained( '''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=lowerCamelCase_) UpperCamelCase = ( '''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of''' ''' field, close up, split lighting, cinematic''' ) UpperCamelCase = jax.random.PRNGKey(0) UpperCamelCase = 4 UpperCamelCase = jax.device_count() UpperCamelCase = num_samples * [prompt] UpperCamelCase = pipeline.prepare_inputs(lowerCamelCase_) # shard inputs and rng UpperCamelCase = replicate(lowerCamelCase_) UpperCamelCase = jax.random.split(lowerCamelCase_ , lowerCamelCase_) UpperCamelCase = shard(lowerCamelCase_) UpperCamelCase = pipeline(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , jit=lowerCamelCase_).images assert images.shape == (num_samples, 1, 6_4, 6_4, 3) if jax.device_count() == 8: assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 4.151_4745) < 1e-3 assert np.abs(np.abs(lowerCamelCase_ , dtype=np.floataa).sum() - 4_9947.875) < 5e-1 UpperCamelCase = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:]))) assert len(lowerCamelCase_) == num_samples def UpperCAmelCase__ ( self) -> Tuple: UpperCamelCase , UpperCamelCase = FlaxStableDiffusionPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''flax''' , safety_checker=lowerCamelCase_) UpperCamelCase = ( '''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of''' ''' field, close up, split lighting, cinematic''' ) UpperCamelCase = jax.random.PRNGKey(0) UpperCamelCase = 5_0 UpperCamelCase = jax.device_count() UpperCamelCase = num_samples * [prompt] UpperCamelCase = pipeline.prepare_inputs(lowerCamelCase_) # shard inputs and rng UpperCamelCase = replicate(lowerCamelCase_) UpperCamelCase = jax.random.split(lowerCamelCase_ , lowerCamelCase_) UpperCamelCase = shard(lowerCamelCase_) UpperCamelCase = pipeline(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , jit=lowerCamelCase_).images assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.0565_2401)) < 1e-3 assert np.abs((np.abs(lowerCamelCase_ , dtype=np.floataa).sum() - 238_3808.2)) < 5e-1 def UpperCAmelCase__ ( self) -> Any: UpperCamelCase , UpperCamelCase = FlaxStableDiffusionPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=lowerCamelCase_) UpperCamelCase = ( '''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of''' ''' field, close up, split lighting, cinematic''' ) UpperCamelCase = jax.random.PRNGKey(0) UpperCamelCase = 5_0 UpperCamelCase = jax.device_count() UpperCamelCase = num_samples * [prompt] UpperCamelCase = pipeline.prepare_inputs(lowerCamelCase_) # shard inputs and rng UpperCamelCase = replicate(lowerCamelCase_) UpperCamelCase = jax.random.split(lowerCamelCase_ , lowerCamelCase_) UpperCamelCase = shard(lowerCamelCase_) UpperCamelCase = pipeline(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , jit=lowerCamelCase_).images assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.0400_3906)) < 1e-3 assert np.abs((np.abs(lowerCamelCase_ , dtype=np.floataa).sum() - 237_3516.75)) < 5e-1 def UpperCAmelCase__ ( self) -> str: UpperCamelCase , UpperCamelCase = FlaxStableDiffusionPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa) UpperCamelCase = ( '''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of''' ''' field, close up, split lighting, cinematic''' ) UpperCamelCase = jax.random.PRNGKey(0) UpperCamelCase = 5_0 UpperCamelCase = jax.device_count() UpperCamelCase = num_samples * [prompt] UpperCamelCase = pipeline.prepare_inputs(lowerCamelCase_) # shard inputs and rng UpperCamelCase = replicate(lowerCamelCase_) UpperCamelCase = jax.random.split(lowerCamelCase_ , lowerCamelCase_) UpperCamelCase = shard(lowerCamelCase_) UpperCamelCase = pipeline(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , jit=lowerCamelCase_).images assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.0400_3906)) < 1e-3 assert np.abs((np.abs(lowerCamelCase_ , dtype=np.floataa).sum() - 237_3516.75)) < 5e-1 def UpperCAmelCase__ ( self) -> str: UpperCamelCase = FlaxDDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , set_alpha_to_one=lowerCamelCase_ , steps_offset=1 , ) UpperCamelCase , UpperCamelCase = FlaxStableDiffusionPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , scheduler=lowerCamelCase_ , safety_checker=lowerCamelCase_ , ) UpperCamelCase = scheduler.create_state() UpperCamelCase = scheduler_state UpperCamelCase = ( '''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of''' ''' field, close up, split lighting, cinematic''' ) UpperCamelCase = jax.random.PRNGKey(0) UpperCamelCase = 5_0 UpperCamelCase = jax.device_count() UpperCamelCase = num_samples * [prompt] UpperCamelCase = pipeline.prepare_inputs(lowerCamelCase_) # shard inputs and rng UpperCamelCase = replicate(lowerCamelCase_) UpperCamelCase = jax.random.split(lowerCamelCase_ , lowerCamelCase_) UpperCamelCase = shard(lowerCamelCase_) UpperCamelCase = pipeline(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , jit=lowerCamelCase_).images assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.0_4504_3945)) < 1e-3 assert np.abs((np.abs(lowerCamelCase_ , dtype=np.floataa).sum() - 234_7693.5)) < 5e-1 def UpperCAmelCase__ ( self) -> List[Any]: UpperCamelCase = ( '''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of''' ''' field, close up, split lighting, cinematic''' ) UpperCamelCase = jax.device_count() UpperCamelCase = num_samples * [prompt] UpperCamelCase = jax.random.split(jax.random.PRNGKey(0) , lowerCamelCase_) UpperCamelCase , UpperCamelCase = FlaxStableDiffusionPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=lowerCamelCase_ , ) UpperCamelCase = replicate(lowerCamelCase_) UpperCamelCase = pipeline.prepare_inputs(lowerCamelCase_) UpperCamelCase = shard(lowerCamelCase_) UpperCamelCase = pipeline(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , jit=lowerCamelCase_).images assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3) UpperCamelCase = images[2, 0, 2_5_6, 1_0:1_7, 1] # With memory efficient attention UpperCamelCase , UpperCamelCase = FlaxStableDiffusionPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=lowerCamelCase_ , use_memory_efficient_attention=lowerCamelCase_ , ) UpperCamelCase = replicate(lowerCamelCase_) UpperCamelCase = pipeline.prepare_inputs(lowerCamelCase_) UpperCamelCase = shard(lowerCamelCase_) UpperCamelCase = pipeline(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , jit=lowerCamelCase_).images assert images_eff.shape == (num_samples, 1, 5_1_2, 5_1_2, 3) UpperCamelCase = images[2, 0, 2_5_6, 1_0:1_7, 1] # I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum` # over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now. assert abs(slice_eff - slice).max() < 1e-2
34
'''simple docstring''' from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : Union[str, Any] = logging.get_logger(__name__) a_ : Tuple = { """huggingface/informer-tourism-monthly""": ( """https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json""" ), # See all Informer models at https://huggingface.co/models?filter=informer } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Union[str, Any] ='informer' lowercase : Union[str, Any] ={ 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', 'num_hidden_layers': 'encoder_layers', } def __init__( self, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = "student_t", lowerCAmelCase = "nll", lowerCAmelCase = 1, lowerCAmelCase = None, lowerCAmelCase = "mean", lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = 64, lowerCAmelCase = 32, lowerCAmelCase = 32, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = True, lowerCAmelCase = "gelu", lowerCAmelCase = 0.0_5, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 100, lowerCAmelCase = 0.0_2, lowerCAmelCase=True, lowerCAmelCase = "prob", lowerCAmelCase = 5, lowerCAmelCase = True, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =prediction_length lowerCamelCase_ =context_length or prediction_length lowerCamelCase_ =distribution_output lowerCamelCase_ =loss lowerCamelCase_ =input_size lowerCamelCase_ =num_time_features lowerCamelCase_ =lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7] lowerCamelCase_ =scaling lowerCamelCase_ =num_dynamic_real_features lowerCamelCase_ =num_static_real_features lowerCamelCase_ =num_static_categorical_features # set cardinality if cardinality and num_static_categorical_features > 0: if len(lowerCAmelCase ) != num_static_categorical_features: raise ValueError( '''The cardinality should be a list of the same length as `num_static_categorical_features`''' ) lowerCamelCase_ =cardinality else: lowerCamelCase_ =[0] # set embedding_dimension if embedding_dimension and num_static_categorical_features > 0: if len(lowerCAmelCase ) != num_static_categorical_features: raise ValueError( '''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' ) lowerCamelCase_ =embedding_dimension else: lowerCamelCase_ =[min(50, (cat + 1) // 2 ) for cat in self.cardinality] lowerCamelCase_ =num_parallel_samples # Transformer architecture configuration lowerCamelCase_ =input_size * len(self.lags_sequence ) + self._number_of_features lowerCamelCase_ =d_model lowerCamelCase_ =encoder_attention_heads lowerCamelCase_ =decoder_attention_heads lowerCamelCase_ =encoder_ffn_dim lowerCamelCase_ =decoder_ffn_dim lowerCamelCase_ =encoder_layers lowerCamelCase_ =decoder_layers lowerCamelCase_ =dropout lowerCamelCase_ =attention_dropout lowerCamelCase_ =activation_dropout lowerCamelCase_ =encoder_layerdrop lowerCamelCase_ =decoder_layerdrop lowerCamelCase_ =activation_function lowerCamelCase_ =init_std lowerCamelCase_ =use_cache # Informer lowerCamelCase_ =attention_type lowerCamelCase_ =sampling_factor lowerCamelCase_ =distil super().__init__(is_encoder_decoder=lowerCAmelCase, **lowerCAmelCase ) @property def lowercase__ ( self ): """simple docstring""" return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
676
0
a_ :Dict = 8.3_1_4_4_6_2 # Unit - J mol-1 K-1 def a ( A__ , A__ , A__ ) -> float: '''simple docstring''' if moles < 0 or kelvin < 0 or volume < 0: raise ValueError('''Invalid inputs. Enter positive value.''' ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume def a ( A__ , A__ , A__ ) -> float: '''simple docstring''' if moles < 0 or kelvin < 0 or pressure < 0: raise ValueError('''Invalid inputs. Enter positive value.''' ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure if __name__ == "__main__": from doctest import testmod testmod()
35
'''simple docstring''' from __future__ import annotations def a_ ( __snake_case : int ) -> list[int]: """simple docstring""" lowerCamelCase_ =[True] * limit lowerCamelCase_ =False lowerCamelCase_ =False lowerCamelCase_ =True for i in range(3 , int(limit**0.5 + 1 ) , 2 ): lowerCamelCase_ =i * 2 while index < limit: lowerCamelCase_ =False lowerCamelCase_ =index + i lowerCamelCase_ =[2] for i in range(3 , __snake_case , 2 ): if is_prime[i]: primes.append(__snake_case ) return primes def a_ ( __snake_case : int = 100_0000 ) -> int: """simple docstring""" lowerCamelCase_ =prime_sieve(__snake_case ) lowerCamelCase_ =0 lowerCamelCase_ =0 for i in range(len(__snake_case ) ): for j in range(i + length , len(__snake_case ) ): lowerCamelCase_ =sum(primes[i:j] ) if sol >= ceiling: break if sol in primes: lowerCamelCase_ =j - i lowerCamelCase_ =sol return largest if __name__ == "__main__": print(F"""{solution() = }""")
676
0
import webbrowser from sys import argv from urllib.parse import parse_qs, quote import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": __lowercase : Tuple = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: '''))) print('''Googling.....''') __lowercase : int = f'''https://www.google.com/search?q={query}&num=100''' __lowercase : int = requests.get( url, headers={'''User-Agent''': str(UserAgent().random)}, ) try: __lowercase : Tuple = ( BeautifulSoup(res.text, '''html.parser''') .find('''div''', attrs={'''class''': '''yuRUbf'''}) .find('''a''') .get('''href''') ) except AttributeError: __lowercase : Union[str, Any] = parse_qs( BeautifulSoup(res.text, '''html.parser''') .find('''div''', attrs={'''class''': '''kCrYT'''}) .find('''a''') .get('''href''') )['''url'''][0] webbrowser.open(link)
36
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" super().__init__() # make sure scheduler can always be converted to DDIM lowerCamelCase_ =DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=lowerCAmelCase, scheduler=lowerCAmelCase ) @torch.no_grad() def __call__( self, lowerCAmelCase = 1, lowerCAmelCase = None, lowerCAmelCase = 0.0, lowerCAmelCase = 50, lowerCAmelCase = None, lowerCAmelCase = "pil", lowerCAmelCase = True, ): """simple docstring""" if isinstance(self.unet.config.sample_size, lowerCAmelCase ): lowerCamelCase_ =( batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size, ) else: lowerCamelCase_ =(batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) if isinstance(lowerCAmelCase, lowerCAmelCase ) and len(lowerCAmelCase ) != batch_size: raise ValueError( f'''You have passed a list of generators of length {len(lowerCAmelCase )}, but requested an effective batch''' f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) lowerCamelCase_ =randn_tensor(lowerCAmelCase, generator=lowerCAmelCase, device=self.device, dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(lowerCAmelCase ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output lowerCamelCase_ =self.unet(lowerCAmelCase, lowerCAmelCase ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 lowerCamelCase_ =self.scheduler.step( lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, eta=lowerCAmelCase, use_clipped_model_output=lowerCAmelCase, generator=lowerCAmelCase ).prev_sample lowerCamelCase_ =(image / 2 + 0.5).clamp(0, 1 ) lowerCamelCase_ =image.cpu().permute(0, 2, 3, 1 ).numpy() if output_type == "pil": lowerCamelCase_ =self.numpy_to_pil(lowerCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowerCAmelCase )
676
0
import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class A__ ( A__ ): """simple docstring""" _lowercase = '' _lowercase = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) _lowercase = None # compression type in fsspec. ex: "gzip" _lowercase = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self : List[str] , lowerCamelCase__ : str = "" , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[dict] = None , **lowerCamelCase__ : List[str] ): super().__init__(self , **lowerCamelCase__ ) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode a__ : str = fsspec.open( lowerCamelCase__ , mode="rb" , protocol=lowerCamelCase__ , compression=self.compression , client_kwargs={ "requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459 "trust_env": True, # Enable reading proxy env variables. **(target_options or {}).pop("client_kwargs" , {} ), # To avoid issues if it was already passed. } , **(target_options or {}) , ) a__ : Optional[int] = os.path.basename(self.file.path.split("::" )[0] ) a__ : int = ( self.compressed_name[: self.compressed_name.rindex("." )] if "." in self.compressed_name else self.compressed_name ) a__ : List[Any] = None @classmethod def _UpperCamelCase( cls : int , lowerCamelCase__ : int ): # compressed file paths are always relative to the archive root return super()._strip_protocol(lowerCamelCase__ ).lstrip("/" ) def _UpperCamelCase( self : Dict ): if self.dir_cache is None: a__ : Dict = {**self.file.fs.info(self.file.path ), "name": self.uncompressed_name} a__ : int = {f["name"]: f} def _UpperCamelCase( self : Tuple , lowerCamelCase__ : str ): return self.file.open().read() def _UpperCamelCase( self : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : str = "rb" , lowerCamelCase__ : int=None , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : List[str]=None , **lowerCamelCase__ : Optional[Any] , ): a__ : Optional[int] = self._strip_protocol(lowerCamelCase__ ) if mode != "rb": raise ValueError(f'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' ) return self.file.open() class A__ ( A__ ): """simple docstring""" _lowercase = 'bz2' _lowercase = 'bz2' _lowercase = '.bz2' class A__ ( A__ ): """simple docstring""" _lowercase = 'gzip' _lowercase = 'gzip' _lowercase = '.gz' class A__ ( A__ ): """simple docstring""" _lowercase = 'lz4' _lowercase = 'lz4' _lowercase = '.lz4' class A__ ( A__ ): """simple docstring""" _lowercase = 'xz' _lowercase = 'xz' _lowercase = '.xz' class A__ ( A__ ): """simple docstring""" _lowercase = 'zstd' _lowercase = 'zstd' _lowercase = '.zst' def __init__( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : str = "rb" , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[dict] = None , lowerCamelCase__ : int = DEFAULT_BLOCK_SIZE , **lowerCamelCase__ : Tuple , ): super().__init__( fo=lowerCamelCase__ , mode=lowerCamelCase__ , target_protocol=lowerCamelCase__ , target_options=lowerCamelCase__ , block_size=lowerCamelCase__ , **lowerCamelCase__ , ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 a__ : Any = self.file.__enter__ class A__ : """simple docstring""" def __init__( self : Optional[int] , lowerCamelCase__ : str ): a__ : List[Any] = file_ def __enter__( self : str ): self._file.__enter__() return self def __exit__( self : int , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : int ): self._file.__exit__(*lowerCamelCase__ , **lowerCamelCase__ ) def __iter__( self : List[str] ): return iter(self._file ) def _UpperCamelCase( self : Any ): return next(self._file ) def __getattr__( self : Optional[Any] , lowerCamelCase__ : Tuple ): return getattr(self._file , lowerCamelCase__ ) def fixed_enter(*lowerCamelCase__ : List[str] , **lowerCamelCase__ : str ): return WrappedFile(_enter(*lowerCamelCase__ , **lowerCamelCase__ ) ) a__ : Any = fixed_enter
37
'''simple docstring''' from maths.prime_check import is_prime def a_ ( __snake_case : int ) -> int: """simple docstring""" if not isinstance(__snake_case , __snake_case ): lowerCamelCase_ =F'''Input value of [number={number}] must be an integer''' raise TypeError(__snake_case ) if is_prime(__snake_case ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
676
0
'''simple docstring''' from manim import * class __snake_case ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __UpperCamelCase ( self ): snake_case__ : Tuple = Rectangle(height=0.5 , width=0.5 ) snake_case__ : Optional[Any] = Rectangle(height=0.25 , width=0.25 ) snake_case__ : Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) snake_case__ : Optional[int] = [mem.copy() for i in range(6 )] snake_case__ : Tuple = [mem.copy() for i in range(6 )] snake_case__ : Optional[Any] = VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 ) snake_case__ : Optional[Any] = VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 ) snake_case__ : Union[str, Any] = VGroup(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 ) snake_case__ : Any = Text("""CPU""" , font_size=2_4 ) snake_case__ : Dict = Group(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=__SCREAMING_SNAKE_CASE ) cpu.move_to([-2.5, -0.5, 0] ) self.add(__SCREAMING_SNAKE_CASE ) snake_case__ : List[str] = [mem.copy() for i in range(4 )] snake_case__ : Optional[int] = VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 ) snake_case__ : str = Text("""GPU""" , font_size=2_4 ) snake_case__ : Tuple = Group(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=__SCREAMING_SNAKE_CASE ) gpu.move_to([-1, -1, 0] ) self.add(__SCREAMING_SNAKE_CASE ) snake_case__ : Tuple = [mem.copy() for i in range(6 )] snake_case__ : Union[str, Any] = VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 ) snake_case__ : Union[str, Any] = Text("""Model""" , font_size=2_4 ) snake_case__ : List[Any] = Group(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=__SCREAMING_SNAKE_CASE ) model.move_to([3, -1.0, 0] ) self.add(__SCREAMING_SNAKE_CASE ) snake_case__ : str = [] snake_case__ : List[str] = [] snake_case__ : Any = [] for i, rect in enumerate(__SCREAMING_SNAKE_CASE ): rect.set_stroke(__SCREAMING_SNAKE_CASE ) snake_case__ : Dict = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__SCREAMING_SNAKE_CASE , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__SCREAMING_SNAKE_CASE ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(model_cpu_arr[0] , direction=__SCREAMING_SNAKE_CASE , buff=0.0 ) else: cpu_target.next_to(model_cpu_arr[i - 1] , direction=__SCREAMING_SNAKE_CASE , buff=0.0 ) self.add(__SCREAMING_SNAKE_CASE ) model_cpu_arr.append(__SCREAMING_SNAKE_CASE ) self.add(*__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ) snake_case__ : Dict = [mem.copy() for i in range(6 )] snake_case__ : Optional[Any] = VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 ) snake_case__ : List[Any] = Text("""Loaded Checkpoint""" , font_size=2_4 ) snake_case__ : Any = Group(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=__SCREAMING_SNAKE_CASE ) checkpoint.move_to([3, 0.5, 0] ) self.add(__SCREAMING_SNAKE_CASE ) snake_case__ : str = [] snake_case__ : int = [] for i, rect in enumerate(__SCREAMING_SNAKE_CASE ): snake_case__ : List[Any] = fill.copy().set_fill(__SCREAMING_SNAKE_CASE , opacity=0.7 ) target.move_to(__SCREAMING_SNAKE_CASE ) ckpt_arr.append(__SCREAMING_SNAKE_CASE ) snake_case__ : Optional[Any] = target.copy() if i < 5: cpu_target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.move_to(cpu_right_col_base[i - 5] ) ckpt_cpu_arr.append(__SCREAMING_SNAKE_CASE ) self.add(*__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ) snake_case__ : Any = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) snake_case__ : Tuple = MarkupText( f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=1_8 , ) key_text.move_to([-5, 2.4, 0] ) self.add(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) snake_case__ : int = MarkupText( f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=1_8 , ) blue_text.next_to(__SCREAMING_SNAKE_CASE , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(__SCREAMING_SNAKE_CASE ) snake_case__ : List[str] = MarkupText( f"Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device." , font_size=2_4 , ) step_a.move_to([2, 2, 0] ) snake_case__ : List[Any] = [meta_mem.copy() for i in range(6 )] snake_case__ : Union[str, Any] = [meta_mem.copy() for i in range(6 )] snake_case__ : Optional[int] = VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 ) snake_case__ : str = VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 ) snake_case__ : int = VGroup(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 ) snake_case__ : str = Text("""Disk""" , font_size=2_4 ) snake_case__ : Union[str, Any] = Group(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=__SCREAMING_SNAKE_CASE ) disk.move_to([-4.0, -1.25, 0] ) self.play(Write(__SCREAMING_SNAKE_CASE , run_time=3 ) , Write(__SCREAMING_SNAKE_CASE , run_time=1 ) , Create(__SCREAMING_SNAKE_CASE , run_time=1 ) ) snake_case__ : Optional[int] = [] for i, rect in enumerate(__SCREAMING_SNAKE_CASE ): snake_case__ : int = rect.copy() target.generate_target() target.target.move_to(disk_left_col_base[i] ).scale(0.5 ) animations.append(MoveToTarget(__SCREAMING_SNAKE_CASE , run_time=1.5 ) ) self.play(*__SCREAMING_SNAKE_CASE ) self.play(FadeOut(__SCREAMING_SNAKE_CASE ) ) snake_case__ : Tuple = MarkupText(f"Then, the checkpoint is removed from memory\nthrough garbage collection." , font_size=2_4 ) step_a.move_to([2, 2, 0] ) self.play(Write(__SCREAMING_SNAKE_CASE , run_time=3 ) ) self.play( FadeOut(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ) , ) self.wait()
38
'''simple docstring''' # DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin, SchedulerOutput @dataclass class __UpperCamelCase ( lowerCamelCase__ ): lowercase : torch.FloatTensor lowercase : torch.FloatTensor class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): lowercase : Tuple =1 @register_to_config def __init__( self, lowerCAmelCase = 2_000, lowerCAmelCase = 0.1_5, lowerCAmelCase = 0.0_1, lowerCAmelCase = 1_3_4_8.0, lowerCAmelCase = 1e-5, lowerCAmelCase = 1, ): """simple docstring""" lowerCamelCase_ =sigma_max # setable values lowerCamelCase_ =None self.set_sigmas(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ): """simple docstring""" return sample def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None ): """simple docstring""" lowerCamelCase_ =sampling_eps if sampling_eps is not None else self.config.sampling_eps lowerCamelCase_ =torch.linspace(1, lowerCAmelCase, lowerCAmelCase, device=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None ): """simple docstring""" lowerCamelCase_ =sigma_min if sigma_min is not None else self.config.sigma_min lowerCamelCase_ =sigma_max if sigma_max is not None else self.config.sigma_max lowerCamelCase_ =sampling_eps if sampling_eps is not None else self.config.sampling_eps if self.timesteps is None: self.set_timesteps(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps) lowerCamelCase_ =torch.exp(torch.linspace(math.log(lowerCAmelCase ), math.log(lowerCAmelCase ), lowerCAmelCase ) ) lowerCamelCase_ =torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" return torch.where( timesteps == 0, torch.zeros_like(t.to(timesteps.device ) ), self.discrete_sigmas[timesteps - 1].to(timesteps.device ), ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = True, ): """simple docstring""" if self.timesteps is None: raise ValueError( '''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' ) lowerCamelCase_ =timestep * torch.ones( sample.shape[0], device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0]) lowerCamelCase_ =(timestep * (len(self.timesteps ) - 1)).long() # mps requires indices to be in the same device, so we use cpu as is the default with cuda lowerCamelCase_ =timesteps.to(self.discrete_sigmas.device ) lowerCamelCase_ =self.discrete_sigmas[timesteps].to(sample.device ) lowerCamelCase_ =self.get_adjacent_sigma(lowerCAmelCase, lowerCAmelCase ).to(sample.device ) lowerCamelCase_ =torch.zeros_like(lowerCAmelCase ) lowerCamelCase_ =(sigma**2 - adjacent_sigma**2) ** 0.5 # equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x) # also equation 47 shows the analog from SDE models to ancestral sampling methods lowerCamelCase_ =diffusion.flatten() while len(diffusion.shape ) < len(sample.shape ): lowerCamelCase_ =diffusion.unsqueeze(-1 ) lowerCamelCase_ =drift - diffusion**2 * model_output # equation 6: sample noise for the diffusion term of lowerCamelCase_ =randn_tensor( sample.shape, layout=sample.layout, generator=lowerCAmelCase, device=sample.device, dtype=sample.dtype ) lowerCamelCase_ =sample - drift # subtract because `dt` is a small negative timestep # TODO is the variable diffusion the correct scaling term for the noise? lowerCamelCase_ =prev_sample_mean + diffusion * noise # add impact of diffusion field g if not return_dict: return (prev_sample, prev_sample_mean) return SdeVeOutput(prev_sample=lowerCAmelCase, prev_sample_mean=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = True, ): """simple docstring""" if self.timesteps is None: raise ValueError( '''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' ) # For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z" # sample noise for correction lowerCamelCase_ =randn_tensor(sample.shape, layout=sample.layout, generator=lowerCAmelCase ).to(sample.device ) # compute step size from the model_output, the noise, and the snr lowerCamelCase_ =torch.norm(model_output.reshape(model_output.shape[0], -1 ), dim=-1 ).mean() lowerCamelCase_ =torch.norm(noise.reshape(noise.shape[0], -1 ), dim=-1 ).mean() lowerCamelCase_ =(self.config.snr * noise_norm / grad_norm) ** 2 * 2 lowerCamelCase_ =step_size * torch.ones(sample.shape[0] ).to(sample.device ) # self.repeat_scalar(step_size, sample.shape[0]) # compute corrected sample: model_output term and noise term lowerCamelCase_ =step_size.flatten() while len(step_size.shape ) < len(sample.shape ): lowerCamelCase_ =step_size.unsqueeze(-1 ) lowerCamelCase_ =sample + step_size * model_output lowerCamelCase_ =prev_sample_mean + ((step_size * 2) ** 0.5) * noise if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =timesteps.to(original_samples.device ) lowerCamelCase_ =self.discrete_sigmas.to(original_samples.device )[timesteps] lowerCamelCase_ =( noise * sigmas[:, None, None, None] if noise is not None else torch.randn_like(lowerCAmelCase ) * sigmas[:, None, None, None] ) lowerCamelCase_ =noise + original_samples return noisy_samples def __len__( self ): """simple docstring""" return self.config.num_train_timesteps
676
0
import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class snake_case_ : '''simple docstring''' def __init__( self : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str=3 , _UpperCamelCase : List[Any]=3_2 , _UpperCamelCase : Union[str, Any]=3 , _UpperCamelCase : Union[str, Any]=1_0 , _UpperCamelCase : Any=[8, 1_6, 3_2, 6_4] , _UpperCamelCase : Any=[1, 1, 2, 1] , _UpperCamelCase : List[Any]=True , _UpperCamelCase : Any=True , _UpperCamelCase : Any="relu" , _UpperCamelCase : List[Any]=3 , _UpperCamelCase : Any=None , _UpperCamelCase : Dict=["stage2", "stage3", "stage4"] , _UpperCamelCase : str=[2, 3, 4] , _UpperCamelCase : List[str]=1 , ) ->Dict: snake_case_ = parent snake_case_ = batch_size snake_case_ = image_size snake_case_ = num_channels snake_case_ = embeddings_size snake_case_ = hidden_sizes snake_case_ = depths snake_case_ = is_training snake_case_ = use_labels snake_case_ = hidden_act snake_case_ = num_labels snake_case_ = scope snake_case_ = len(_UpperCamelCase ) snake_case_ = out_features snake_case_ = out_indices snake_case_ = num_groups def snake_case__( self : Optional[int] ) ->Optional[int]: snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case_ = None if self.use_labels: snake_case_ = ids_tensor([self.batch_size] , self.num_labels ) snake_case_ = self.get_config() return config, pixel_values, labels def snake_case__( self : Tuple ) ->List[str]: return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def snake_case__( self : Dict , _UpperCamelCase : Tuple , _UpperCamelCase : str , _UpperCamelCase : Any ) ->Tuple: snake_case_ = BitModel(config=_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() snake_case_ = model(_UpperCamelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , ) def snake_case__( self : int , _UpperCamelCase : Tuple , _UpperCamelCase : List[str] , _UpperCamelCase : List[str] ) ->Optional[Any]: snake_case_ = self.num_labels snake_case_ = BitForImageClassification(_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() snake_case_ = model(_UpperCamelCase , labels=_UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def snake_case__( self : List[str] , _UpperCamelCase : int , _UpperCamelCase : Tuple , _UpperCamelCase : str ) ->Optional[Any]: snake_case_ = BitBackbone(config=_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() snake_case_ = model(_UpperCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None snake_case_ = None snake_case_ = BitBackbone(config=_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() snake_case_ = model(_UpperCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def snake_case__( self : str ) ->Optional[Any]: snake_case_ = self.prepare_config_and_inputs() snake_case_, snake_case_, snake_case_ = config_and_inputs snake_case_ = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class snake_case_ ( __A , __A , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () SCREAMING_SNAKE_CASE : List[str] = ( {"feature-extraction": BitModel, "image-classification": BitForImageClassification} if is_torch_available() else {} ) SCREAMING_SNAKE_CASE : List[str] = False SCREAMING_SNAKE_CASE : Any = False SCREAMING_SNAKE_CASE : List[str] = False SCREAMING_SNAKE_CASE : Tuple = False SCREAMING_SNAKE_CASE : Dict = False def snake_case__( self : Union[str, Any] ) ->List[Any]: snake_case_ = BitModelTester(self ) snake_case_ = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase ) def snake_case__( self : List[Any] ) ->Dict: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def snake_case__( self : Any ) ->Union[str, Any]: return @unittest.skip(reason='''Bit does not output attentions''' ) def snake_case__( self : int ) ->Tuple: pass @unittest.skip(reason='''Bit does not use inputs_embeds''' ) def snake_case__( self : str ) ->Optional[Any]: pass @unittest.skip(reason='''Bit does not support input and output embeddings''' ) def snake_case__( self : Union[str, Any] ) ->Optional[Any]: pass def snake_case__( self : Optional[Any] ) ->Optional[int]: snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ = model_class(_UpperCamelCase ) snake_case_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ = [*signature.parameters.keys()] snake_case_ = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _UpperCamelCase ) def snake_case__( self : Tuple ) ->str: snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCamelCase ) def snake_case__( self : str ) ->Optional[Any]: snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_UpperCamelCase ) def snake_case__( self : Tuple ) ->int: snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ = model_class(config=_UpperCamelCase ) for name, module in model.named_modules(): if isinstance(_UpperCamelCase , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) def snake_case__( self : Union[str, Any] ) ->str: def check_hidden_states_output(_UpperCamelCase : List[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] ): snake_case_ = model_class(_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() with torch.no_grad(): snake_case_ = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) ) snake_case_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states snake_case_ = self.model_tester.num_stages self.assertEqual(len(_UpperCamelCase ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) snake_case_, snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ = ['''preactivation''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: snake_case_ = layer_type snake_case_ = True check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case_ = True check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) @unittest.skip(reason='''Bit does not use feedforward chunking''' ) def snake_case__( self : str ) ->Optional[Any]: pass def snake_case__( self : Dict ) ->Dict: snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase ) @slow def snake_case__( self : Optional[int] ) ->Any: for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ = BitModel.from_pretrained(_UpperCamelCase ) self.assertIsNotNone(_UpperCamelCase ) def __SCREAMING_SNAKE_CASE (): snake_case_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class snake_case_ ( unittest.TestCase ): '''simple docstring''' @cached_property def snake_case__( self : Optional[Any] ) ->int: return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def snake_case__( self : str ) ->Optional[int]: snake_case_ = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_UpperCamelCase ) snake_case_ = self.default_image_processor snake_case_ = prepare_img() snake_case_ = image_processor(images=_UpperCamelCase , return_tensors='''pt''' ).to(_UpperCamelCase ) # forward pass with torch.no_grad(): snake_case_ = model(**_UpperCamelCase ) # verify the logits snake_case_ = torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , _UpperCamelCase ) snake_case_ = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(_UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1e-4 ) ) @require_torch class snake_case_ ( __A , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = (BitBackbone,) if is_torch_available() else () SCREAMING_SNAKE_CASE : Optional[Any] = BitConfig SCREAMING_SNAKE_CASE : Optional[int] = False def snake_case__( self : Optional[Any] ) ->Tuple: snake_case_ = BitModelTester(self )
39
'''simple docstring''' def a_ ( __snake_case : int , __snake_case : int ) -> str: """simple docstring""" if not isinstance(__snake_case , __snake_case ): raise ValueError('''iterations must be defined as integers''' ) if not isinstance(__snake_case , __snake_case ) or not number >= 1: raise ValueError( '''starting number must be and integer and be more than 0''' ) if not iterations >= 1: raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' ) lowerCamelCase_ ='''''' while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(__snake_case ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
676
0