code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' import json import logging import os import socket import git import numpy as np import torch logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO, ) a_ : Tuple = logging.getLogger(__name__) def a_ ( __snake_case : str ) -> Optional[int]: """simple docstring""" lowerCamelCase_ =git.Repo(search_parent_directories=__snake_case ) lowerCamelCase_ ={ '''repo_id''': str(__snake_case ), '''repo_sha''': str(repo.head.object.hexsha ), '''repo_branch''': str(repo.active_branch ), } with open(os.path.join(__snake_case , '''git_log.json''' ) , '''w''' ) as f: json.dump(__snake_case , __snake_case , indent=4 ) def a_ ( __snake_case : str ) -> List[str]: """simple docstring""" if params.n_gpu <= 0: lowerCamelCase_ =0 lowerCamelCase_ =-1 lowerCamelCase_ =True lowerCamelCase_ =False return assert torch.cuda.is_available() logger.info('''Initializing GPUs''' ) if params.n_gpu > 1: assert params.local_rank != -1 lowerCamelCase_ =int(os.environ['''WORLD_SIZE'''] ) lowerCamelCase_ =int(os.environ['''N_GPU_NODE'''] ) lowerCamelCase_ =int(os.environ['''RANK'''] ) # number of nodes / node ID lowerCamelCase_ =params.world_size // params.n_gpu_per_node lowerCamelCase_ =params.global_rank // params.n_gpu_per_node lowerCamelCase_ =True assert params.n_nodes == int(os.environ['''N_NODES'''] ) assert params.node_id == int(os.environ['''NODE_RANK'''] ) # local job (single GPU) else: assert params.local_rank == -1 lowerCamelCase_ =1 lowerCamelCase_ =0 lowerCamelCase_ =0 lowerCamelCase_ =0 lowerCamelCase_ =1 lowerCamelCase_ =1 lowerCamelCase_ =False # sanity checks assert params.n_nodes >= 1 assert 0 <= params.node_id < params.n_nodes assert 0 <= params.local_rank <= params.global_rank < params.world_size assert params.world_size == params.n_nodes * params.n_gpu_per_node # define whether this is the master process / if we are in multi-node distributed mode lowerCamelCase_ =params.node_id == 0 and params.local_rank == 0 lowerCamelCase_ =params.n_nodes > 1 # summary lowerCamelCase_ =F'''--- Global rank: {params.global_rank} - ''' logger.info(PREFIX + '''Number of nodes: %i''' % params.n_nodes ) logger.info(PREFIX + '''Node ID : %i''' % params.node_id ) logger.info(PREFIX + '''Local rank : %i''' % params.local_rank ) logger.info(PREFIX + '''World size : %i''' % params.world_size ) logger.info(PREFIX + '''GPUs per node : %i''' % params.n_gpu_per_node ) logger.info(PREFIX + '''Master : %s''' % str(params.is_master ) ) logger.info(PREFIX + '''Multi-node : %s''' % str(params.multi_node ) ) logger.info(PREFIX + '''Multi-GPU : %s''' % str(params.multi_gpu ) ) logger.info(PREFIX + '''Hostname : %s''' % socket.gethostname() ) # set GPU device torch.cuda.set_device(params.local_rank ) # initialize multi-GPU if params.multi_gpu: logger.info('''Initializing PyTorch distributed''' ) torch.distributed.init_process_group( init_method='''env://''' , backend='''nccl''' , ) def a_ ( __snake_case : List[str] ) -> str: """simple docstring""" np.random.seed(args.seed ) torch.manual_seed(args.seed ) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed )
676
'''simple docstring''' # DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin, SchedulerOutput @dataclass class __UpperCamelCase ( lowerCamelCase__ ): lowercase : torch.FloatTensor lowercase : torch.FloatTensor class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): lowercase : Tuple =1 @register_to_config def __init__( self, lowerCAmelCase = 2_000, lowerCAmelCase = 0.1_5, lowerCAmelCase = 0.0_1, lowerCAmelCase = 1_3_4_8.0, lowerCAmelCase = 1e-5, lowerCAmelCase = 1, ): """simple docstring""" lowerCamelCase_ =sigma_max # setable values lowerCamelCase_ =None self.set_sigmas(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ): """simple docstring""" return sample def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None ): """simple docstring""" lowerCamelCase_ =sampling_eps if sampling_eps is not None else self.config.sampling_eps lowerCamelCase_ =torch.linspace(1, lowerCAmelCase, lowerCAmelCase, device=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None ): """simple docstring""" lowerCamelCase_ =sigma_min if sigma_min is not None else self.config.sigma_min lowerCamelCase_ =sigma_max if sigma_max is not None else self.config.sigma_max lowerCamelCase_ =sampling_eps if sampling_eps is not None else self.config.sampling_eps if self.timesteps is None: self.set_timesteps(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps) lowerCamelCase_ =torch.exp(torch.linspace(math.log(lowerCAmelCase ), math.log(lowerCAmelCase ), lowerCAmelCase ) ) lowerCamelCase_ =torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" return torch.where( timesteps == 0, torch.zeros_like(t.to(timesteps.device ) ), self.discrete_sigmas[timesteps - 1].to(timesteps.device ), ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = True, ): """simple docstring""" if self.timesteps is None: raise ValueError( '''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' ) lowerCamelCase_ =timestep * torch.ones( sample.shape[0], device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0]) lowerCamelCase_ =(timestep * (len(self.timesteps ) - 1)).long() # mps requires indices to be in the same device, so we use cpu as is the default with cuda lowerCamelCase_ =timesteps.to(self.discrete_sigmas.device ) lowerCamelCase_ =self.discrete_sigmas[timesteps].to(sample.device ) lowerCamelCase_ =self.get_adjacent_sigma(lowerCAmelCase, lowerCAmelCase ).to(sample.device ) lowerCamelCase_ =torch.zeros_like(lowerCAmelCase ) lowerCamelCase_ =(sigma**2 - adjacent_sigma**2) ** 0.5 # equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x) # also equation 47 shows the analog from SDE models to ancestral sampling methods lowerCamelCase_ =diffusion.flatten() while len(diffusion.shape ) < len(sample.shape ): lowerCamelCase_ =diffusion.unsqueeze(-1 ) lowerCamelCase_ =drift - diffusion**2 * model_output # equation 6: sample noise for the diffusion term of lowerCamelCase_ =randn_tensor( sample.shape, layout=sample.layout, generator=lowerCAmelCase, device=sample.device, dtype=sample.dtype ) lowerCamelCase_ =sample - drift # subtract because `dt` is a small negative timestep # TODO is the variable diffusion the correct scaling term for the noise? lowerCamelCase_ =prev_sample_mean + diffusion * noise # add impact of diffusion field g if not return_dict: return (prev_sample, prev_sample_mean) return SdeVeOutput(prev_sample=lowerCAmelCase, prev_sample_mean=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = True, ): """simple docstring""" if self.timesteps is None: raise ValueError( '''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' ) # For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z" # sample noise for correction lowerCamelCase_ =randn_tensor(sample.shape, layout=sample.layout, generator=lowerCAmelCase ).to(sample.device ) # compute step size from the model_output, the noise, and the snr lowerCamelCase_ =torch.norm(model_output.reshape(model_output.shape[0], -1 ), dim=-1 ).mean() lowerCamelCase_ =torch.norm(noise.reshape(noise.shape[0], -1 ), dim=-1 ).mean() lowerCamelCase_ =(self.config.snr * noise_norm / grad_norm) ** 2 * 2 lowerCamelCase_ =step_size * torch.ones(sample.shape[0] ).to(sample.device ) # self.repeat_scalar(step_size, sample.shape[0]) # compute corrected sample: model_output term and noise term lowerCamelCase_ =step_size.flatten() while len(step_size.shape ) < len(sample.shape ): lowerCamelCase_ =step_size.unsqueeze(-1 ) lowerCamelCase_ =sample + step_size * model_output lowerCamelCase_ =prev_sample_mean + ((step_size * 2) ** 0.5) * noise if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =timesteps.to(original_samples.device ) lowerCamelCase_ =self.discrete_sigmas.to(original_samples.device )[timesteps] lowerCamelCase_ =( noise * sigmas[:, None, None, None] if noise is not None else torch.randn_like(lowerCAmelCase ) * sigmas[:, None, None, None] ) lowerCamelCase_ =noise + original_samples return noisy_samples def __len__( self ): """simple docstring""" return self.config.num_train_timesteps
676
1
'''simple docstring''' import math import random def a_ ( __snake_case : float , __snake_case : bool = False ) -> float: """simple docstring""" if deriv: return value * (1 - value) return 1 / (1 + math.exp(-value )) # Initial Value a_ : Tuple = 0.02 def a_ ( __snake_case : int , __snake_case : int ) -> float: """simple docstring""" lowerCamelCase_ =float(2 * (random.randint(1 , 100 )) - 1 ) for _ in range(__snake_case ): # Forward propagation lowerCamelCase_ =sigmoid_function(INITIAL_VALUE * weight ) # How much did we miss? lowerCamelCase_ =(expected / 100) - layer_a # Error delta lowerCamelCase_ =layer_1_error * sigmoid_function(__snake_case , __snake_case ) # Update weight weight += INITIAL_VALUE * layer_1_delta return layer_a * 100 if __name__ == "__main__": import doctest doctest.testmod() a_ : Optional[int] = int(input("""Expected value: """)) a_ : Union[str, Any] = int(input("""Number of propagations: """)) print(forward_propagation(expected, number_propagations))
676
'''simple docstring''' def a_ ( __snake_case : int , __snake_case : int ) -> str: """simple docstring""" if not isinstance(__snake_case , __snake_case ): raise ValueError('''iterations must be defined as integers''' ) if not isinstance(__snake_case , __snake_case ) or not number >= 1: raise ValueError( '''starting number must be and integer and be more than 0''' ) if not iterations >= 1: raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' ) lowerCamelCase_ ='''''' while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(__snake_case ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
676
1
'''simple docstring''' def a_ ( __snake_case : Optional[Any] , __snake_case : List[Any] ) -> List[Any]: """simple docstring""" print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' ) for i in range(__snake_case ): for j in range(__snake_case ): if dist[i][j] != float('''inf''' ): print(int(dist[i][j] ) , end='''\t''' ) else: print('''INF''' , end='''\t''' ) print() def a_ ( __snake_case : int , __snake_case : Any ) -> Any: """simple docstring""" lowerCamelCase_ =[[float('''inf''' ) for _ in range(__snake_case )] for _ in range(__snake_case )] for i in range(__snake_case ): for j in range(__snake_case ): lowerCamelCase_ =graph[i][j] # check vertex k against all other vertices (i, j) for k in range(__snake_case ): # looping through rows of graph array for i in range(__snake_case ): # looping through columns of graph array for j in range(__snake_case ): if ( dist[i][k] != float('''inf''' ) and dist[k][j] != float('''inf''' ) and dist[i][k] + dist[k][j] < dist[i][j] ): lowerCamelCase_ =dist[i][k] + dist[k][j] _print_dist(__snake_case , __snake_case ) return dist, v if __name__ == "__main__": a_ : Optional[Any] = int(input("""Enter number of vertices: """)) a_ : List[Any] = int(input("""Enter number of edges: """)) a_ : str = [[float("""inf""") for i in range(v)] for j in range(v)] for i in range(v): a_ : str = 0.0 # src and dst are indices that must be within the array size graph[e][v] # failure to follow this will result in an error for i in range(e): print("""\nEdge """, i + 1) a_ : List[Any] = int(input("""Enter source:""")) a_ : Union[str, Any] = int(input("""Enter destination:""")) a_ : str = float(input("""Enter weight:""")) a_ : Union[str, Any] = weight floyd_warshall(graph, v) # Example Input # Enter number of vertices: 3 # Enter number of edges: 2 # # generated graph from vertex and edge inputs # [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]] # [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]] # specify source, destination and weight for edge #1 # Edge 1 # Enter source:1 # Enter destination:2 # Enter weight:2 # specify source, destination and weight for edge #2 # Edge 2 # Enter source:2 # Enter destination:1 # Enter weight:1 # # Expected Output from the vertice, edge and src, dst, weight inputs!! # 0 INF INF # INF 0 2 # INF 1 0
676
'''simple docstring''' from typing import List import numpy as np def a_ ( __snake_case : dict ) -> int: """simple docstring""" lowerCamelCase_ ={key: len(__snake_case ) for key, value in gen_kwargs.items() if isinstance(__snake_case , __snake_case )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( '''Sharding is ambiguous for this dataset: ''' + '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n''' + '''\n'''.join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() ) + '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, ''' + '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.''' ) ) lowerCamelCase_ =max(lists_lengths.values() , default=0 ) return max(1 , __snake_case ) def a_ ( __snake_case : int , __snake_case : int ) -> List[range]: """simple docstring""" lowerCamelCase_ =[] for group_idx in range(__snake_case ): lowerCamelCase_ =num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break lowerCamelCase_ =shards_indices_per_group[-1].stop if shards_indices_per_group else 0 lowerCamelCase_ =range(__snake_case , start + num_shards_to_add ) shards_indices_per_group.append(__snake_case ) return shards_indices_per_group def a_ ( __snake_case : dict , __snake_case : int ) -> List[dict]: """simple docstring""" lowerCamelCase_ =_number_of_shards_in_gen_kwargs(__snake_case ) if num_shards == 1: return [dict(__snake_case )] else: lowerCamelCase_ =_distribute_shards(num_shards=__snake_case , max_num_jobs=__snake_case ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(__snake_case , __snake_case ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(__snake_case ) ) ] def a_ ( __snake_case : List[dict] ) -> dict: """simple docstring""" return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , __snake_case ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def a_ ( __snake_case : np.random.Generator , __snake_case : dict ) -> dict: """simple docstring""" lowerCamelCase_ ={len(__snake_case ) for value in gen_kwargs.values() if isinstance(__snake_case , __snake_case )} lowerCamelCase_ ={} for size in list_sizes: lowerCamelCase_ =list(range(__snake_case ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes lowerCamelCase_ =dict(__snake_case ) for key, value in shuffled_kwargs.items(): if isinstance(__snake_case , __snake_case ): lowerCamelCase_ =[value[i] for i in indices_per_size[len(__snake_case )]] return shuffled_kwargs
676
1
'''simple docstring''' # A Bipartite Graph is a graph whose vertices can be divided into two independent sets, # U and V such that every edge (u, v) either connects a vertex from U to V or a vertex # from V to U. In other words, for every edge (u, v), either u belongs to U and v to V, # or u belongs to V and v to U. We can also say that there is no edge that connects # vertices of same set. def a_ ( __snake_case : int ) -> str: """simple docstring""" lowerCamelCase_ =[False] * len(__snake_case ) lowerCamelCase_ =[-1] * len(__snake_case ) def dfs(__snake_case : Union[str, Any] , __snake_case : Dict ): lowerCamelCase_ =True lowerCamelCase_ =c for u in graph[v]: if not visited[u]: dfs(__snake_case , 1 - c ) for i in range(len(__snake_case ) ): if not visited[i]: dfs(__snake_case , 0 ) for i in range(len(__snake_case ) ): for j in graph[i]: if color[i] == color[j]: return False return True # Adjacency list of graph a_ : Optional[int] = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []} print(check_bipartite_dfs(graph))
676
'''simple docstring''' import argparse import logging import pickle import random import time import numpy as np from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO ) a_ : int = logging.getLogger(__name__) def a_ ( ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ =argparse.ArgumentParser( description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' ) parser.add_argument('''--file_path''' , type=__snake_case , default='''data/dump.txt''' , help='''The path to the data.''' ) parser.add_argument('''--tokenizer_type''' , type=__snake_case , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] ) parser.add_argument('''--tokenizer_name''' , type=__snake_case , default='''bert-base-uncased''' , help='''The tokenizer to use.''' ) parser.add_argument('''--dump_file''' , type=__snake_case , default='''data/dump''' , help='''The dump file prefix.''' ) lowerCamelCase_ =parser.parse_args() logger.info(F'''Loading Tokenizer ({args.tokenizer_name})''' ) if args.tokenizer_type == "bert": lowerCamelCase_ =BertTokenizer.from_pretrained(args.tokenizer_name ) lowerCamelCase_ =tokenizer.special_tokens_map['''cls_token'''] # `[CLS]` lowerCamelCase_ =tokenizer.special_tokens_map['''sep_token'''] # `[SEP]` elif args.tokenizer_type == "roberta": lowerCamelCase_ =RobertaTokenizer.from_pretrained(args.tokenizer_name ) lowerCamelCase_ =tokenizer.special_tokens_map['''cls_token'''] # `<s>` lowerCamelCase_ =tokenizer.special_tokens_map['''sep_token'''] # `</s>` elif args.tokenizer_type == "gpt2": lowerCamelCase_ =GPTaTokenizer.from_pretrained(args.tokenizer_name ) lowerCamelCase_ =tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>` lowerCamelCase_ =tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>` logger.info(F'''Loading text from {args.file_path}''' ) with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp: lowerCamelCase_ =fp.readlines() logger.info('''Start encoding''' ) logger.info(F'''{len(__snake_case )} examples to process.''' ) lowerCamelCase_ =[] lowerCamelCase_ =0 lowerCamelCase_ =1_0000 lowerCamelCase_ =time.time() for text in data: lowerCamelCase_ =F'''{bos} {text.strip()} {sep}''' lowerCamelCase_ =tokenizer.encode(__snake_case , add_special_tokens=__snake_case ) rslt.append(__snake_case ) iter += 1 if iter % interval == 0: lowerCamelCase_ =time.time() logger.info(F'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' ) lowerCamelCase_ =time.time() logger.info('''Finished binarization''' ) logger.info(F'''{len(__snake_case )} examples processed.''' ) lowerCamelCase_ =F'''{args.dump_file}.{args.tokenizer_name}.pickle''' lowerCamelCase_ =tokenizer.vocab_size if vocab_size < (1 << 16): lowerCamelCase_ =[np.uintaa(__snake_case ) for d in rslt] else: lowerCamelCase_ =[np.intaa(__snake_case ) for d in rslt] random.shuffle(rslt_ ) logger.info(F'''Dump to {dp_file}''' ) with open(__snake_case , '''wb''' ) as handle: pickle.dump(rslt_ , __snake_case , protocol=pickle.HIGHEST_PROTOCOL ) if __name__ == "__main__": main()
676
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class __UpperCamelCase ( unittest.TestCase ): @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =TFCamembertModel.from_pretrained('''jplu/tf-camembert-base''' ) lowerCamelCase_ =tf.convert_to_tensor( [[5, 121, 11, 660, 16, 730, 25_543, 110, 83, 6]], dtype=tf.intaa, ) # J'aime le camembert !" lowerCamelCase_ =model(lowerCAmelCase )['''last_hidden_state'''] lowerCamelCase_ =tf.TensorShape((1, 10, 768) ) self.assertEqual(output.shape, lowerCAmelCase ) # compare the actual values for a slice. lowerCamelCase_ =tf.convert_to_tensor( [[[-0.0_2_5_4, 0.0_2_3_5, 0.1_0_2_7], [0.0_6_0_6, -0.1_8_1_1, -0.0_4_1_8], [-0.1_5_6_1, -0.1_1_2_7, 0.2_6_8_7]]], dtype=tf.floataa, ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy(), expected_slice.numpy(), atol=1e-4 ) )
676
'''simple docstring''' import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : str = logging.get_logger(__name__) a_ : int = { """RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""", } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : List[str] ='mvp' lowercase : List[str] =['past_key_values'] lowercase : Dict ={'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self, lowerCAmelCase=50_267, lowerCAmelCase=1_024, lowerCAmelCase=12, lowerCAmelCase=4_096, lowerCAmelCase=16, lowerCAmelCase=12, lowerCAmelCase=4_096, lowerCAmelCase=16, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase="gelu", lowerCAmelCase=1_024, lowerCAmelCase=0.1, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=0.0, lowerCAmelCase=False, lowerCAmelCase=True, lowerCAmelCase=1, lowerCAmelCase=0, lowerCAmelCase=2, lowerCAmelCase=True, lowerCAmelCase=2, lowerCAmelCase=2, lowerCAmelCase=False, lowerCAmelCase=100, lowerCAmelCase=800, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =vocab_size lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =d_model lowerCamelCase_ =encoder_ffn_dim lowerCamelCase_ =encoder_layers lowerCamelCase_ =encoder_attention_heads lowerCamelCase_ =decoder_ffn_dim lowerCamelCase_ =decoder_layers lowerCamelCase_ =decoder_attention_heads lowerCamelCase_ =dropout lowerCamelCase_ =attention_dropout lowerCamelCase_ =activation_dropout lowerCamelCase_ =activation_function lowerCamelCase_ =init_std lowerCamelCase_ =encoder_layerdrop lowerCamelCase_ =decoder_layerdrop lowerCamelCase_ =classifier_dropout lowerCamelCase_ =use_cache lowerCamelCase_ =encoder_layers lowerCamelCase_ =scale_embedding # scale factor will be sqrt(d_model) if True lowerCamelCase_ =use_prompt lowerCamelCase_ =prompt_length lowerCamelCase_ =prompt_mid_dim super().__init__( pad_token_id=lowerCAmelCase, bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, is_encoder_decoder=lowerCAmelCase, decoder_start_token_id=lowerCAmelCase, forced_eos_token_id=lowerCAmelCase, **lowerCAmelCase, ) if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''', lowerCAmelCase ): lowerCamelCase_ =self.bos_token_id warnings.warn( f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. ''' '''The config can simply be saved and uploaded again to be fixed.''' )
676
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import _LazyModule a_ : List[Any] = {"""processing_wav2vec2_with_lm""": ["""Wav2Vec2ProcessorWithLM"""]} if TYPE_CHECKING: from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM else: import sys a_ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
676
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a_ : int = logging.get_logger(__name__) a_ : str = {"""vocab_file""": """spiece.model"""} a_ : Optional[int] = { """vocab_file""": { """bert_for_seq_generation""": ( """https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model""" ), } } a_ : List[Any] = {"""bert_for_seq_generation""": 5_12} class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[int] =VOCAB_FILES_NAMES lowercase : Optional[int] =PRETRAINED_VOCAB_FILES_MAP lowercase : Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : List[int] =[] lowercase : str =['input_ids', 'attention_mask'] def __init__( self, lowerCAmelCase, lowerCAmelCase="<s>", lowerCAmelCase="</s>", lowerCAmelCase="<unk>", lowerCAmelCase="<pad>", lowerCAmelCase="<::::>", lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ ={} if sp_model_kwargs is None else sp_model_kwargs # Add extra_ids to the special token list super().__init__( bos_token=lowerCAmelCase, eos_token=lowerCAmelCase, unk_token=lowerCAmelCase, pad_token=lowerCAmelCase, sep_token=lowerCAmelCase, sp_model_kwargs=self.sp_model_kwargs, **lowerCAmelCase, ) lowerCamelCase_ =vocab_file lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowerCAmelCase ) @property def lowercase__ ( self ): """simple docstring""" return self.sp_model.get_piece_size() def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ={self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): """simple docstring""" lowerCamelCase_ =self.__dict__.copy() lowerCamelCase_ =None return state def __setstate__( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =d # for backward compatibility if not hasattr(self, '''sp_model_kwargs''' ): lowerCamelCase_ ={} lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return self.sp_model.encode(lowerCAmelCase, out_type=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return self.sp_model.piece_to_id(lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.sp_model.IdToPiece(lowerCAmelCase ) return token def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =[] lowerCamelCase_ ='''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(lowerCAmelCase ) + token lowerCamelCase_ =[] else: current_sub_tokens.append(lowerCAmelCase ) out_string += self.sp_model.decode(lowerCAmelCase ) return out_string.strip() def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ): """simple docstring""" if not os.path.isdir(lowerCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCamelCase_ =os.path.join( lowerCAmelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file, lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(lowerCAmelCase, '''wb''' ) as fi: lowerCamelCase_ =self.sp_model.serialized_model_proto() fi.write(lowerCAmelCase ) return (out_vocab_file,)
676
1
'''simple docstring''' import argparse import os from pathlib import Path from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params a_ : int = [ # replace left string with right string to get the relevant state_dict key (identical state dict to bart) ["""memory_attention""", """encoder_attn"""], ["""attention""", """attn"""], ["""/""", """."""], [""".LayerNorm.gamma""", """_layer_norm.weight"""], [""".LayerNorm.beta""", """_layer_norm.bias"""], ["""r.layer_""", """r.layers."""], ["""output_proj""", """out_proj"""], ["""ffn.dense_1.""", """fc2."""], ["""ffn.dense.""", """fc1."""], ["""ffn_layer_norm""", """final_layer_norm"""], ["""kernel""", """weight"""], ["""encoder_layer_norm.""", """encoder.layer_norm."""], ["""decoder_layer_norm.""", """decoder.layer_norm."""], ["""embeddings.weights""", """shared.weight"""], ] def a_ ( __snake_case : Any ) -> Union[str, Any]: """simple docstring""" for pegasus_name, hf_name in PATTERNS: lowerCamelCase_ =k.replace(__snake_case , __snake_case ) return k def a_ ( __snake_case : dict , __snake_case : dict ) -> PegasusForConditionalGeneration: """simple docstring""" lowerCamelCase_ =DEFAULTS.copy() cfg_kwargs.update(__snake_case ) lowerCamelCase_ =PegasusConfig(**__snake_case ) lowerCamelCase_ =PegasusForConditionalGeneration(__snake_case ) lowerCamelCase_ =torch_model.model.state_dict() lowerCamelCase_ ={} for k, v in tf_weights.items(): lowerCamelCase_ =rename_state_dict_key(__snake_case ) if new_k not in sd: raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' ) if "dense" in k or "proj" in new_k: lowerCamelCase_ =v.T lowerCamelCase_ =torch.tensor(__snake_case , dtype=sd[new_k].dtype ) assert v.shape == sd[new_k].shape, F'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}''' # make sure embedding.padding_idx is respected lowerCamelCase_ =torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] ) lowerCamelCase_ =mapping['''shared.weight'''] lowerCamelCase_ =mapping['''shared.weight'''] lowerCamelCase_ ={k: torch.zeros_like(__snake_case ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping} mapping.update(**__snake_case ) lowerCamelCase_, lowerCamelCase_ =torch_model.model.load_state_dict(__snake_case , strict=__snake_case ) lowerCamelCase_ =[ k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight'''] ] assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}''' assert extra == [], F'''no matches found for the following tf keys {extra}''' return torch_model def a_ ( __snake_case : Tuple="./ckpt/aeslc/model.ckpt-32000" ) -> Dict: """simple docstring""" lowerCamelCase_ =tf.train.list_variables(__snake_case ) lowerCamelCase_ ={} lowerCamelCase_ =['''Adafactor''', '''global_step'''] for name, shape in tqdm(__snake_case , desc='''converting tf checkpoint to dict''' ): lowerCamelCase_ =any(pat in name for pat in ignore_name ) if skip_key: continue lowerCamelCase_ =tf.train.load_variable(__snake_case , __snake_case ) lowerCamelCase_ =array return tf_weights def a_ ( __snake_case : str , __snake_case : str ) -> Dict: """simple docstring""" # save tokenizer first lowerCamelCase_ =Path(__snake_case ).parent.name lowerCamelCase_ =task_specific_params[F'''summarization_{dataset}''']['''max_position_embeddings'''] lowerCamelCase_ =PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' , model_max_length=__snake_case ) assert tok.model_max_length == desired_max_model_length tok.save_pretrained(__snake_case ) # convert model lowerCamelCase_ =get_tf_weights_as_numpy(__snake_case ) lowerCamelCase_ =task_specific_params[F'''summarization_{dataset}'''] if dataset == "large": lowerCamelCase_ =task_specific_params lowerCamelCase_ =convert_pegasus(__snake_case , __snake_case ) torch_model.save_pretrained(__snake_case ) lowerCamelCase_ =torch_model.state_dict() sd.pop('''model.decoder.embed_positions.weight''' ) sd.pop('''model.encoder.embed_positions.weight''' ) torch.save(__snake_case , Path(__snake_case ) / '''pytorch_model.bin''' ) if __name__ == "__main__": a_ : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument("""tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""") parser.add_argument("""save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""") a_ : Union[str, Any] = parser.parse_args() if args.save_dir is None: a_ : Any = Path(args.tf_ckpt_path).parent.name a_ : Tuple = os.path.join("""pegasus""", dataset) convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
676
'''simple docstring''' from collections.abc import Sequence def a_ ( __snake_case : Sequence[float] , __snake_case : float ) -> float: """simple docstring""" return sum(c * (x**i) for i, c in enumerate(__snake_case ) ) def a_ ( __snake_case : Sequence[float] , __snake_case : float ) -> float: """simple docstring""" lowerCamelCase_ =0.0 for coeff in reversed(__snake_case ): lowerCamelCase_ =result * x + coeff return result if __name__ == "__main__": a_ : Optional[int] = (0.0, 0.0, 5.0, 9.3, 7.0) a_ : Tuple = 10.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
676
1
'''simple docstring''' import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def a_ ( __snake_case : Optional[Any] ) -> Dict: """simple docstring""" lowerCamelCase_ =args.pruning_method lowerCamelCase_ =args.threshold lowerCamelCase_ =args.model_name_or_path.rstrip('''/''' ) lowerCamelCase_ =args.target_model_path print(F'''Load fine-pruned model from {model_name_or_path}''' ) lowerCamelCase_ =torch.load(os.path.join(__snake_case , '''pytorch_model.bin''' ) ) lowerCamelCase_ ={} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: lowerCamelCase_ =tensor print(F'''Copied layer {name}''' ) elif "classifier" in name or "qa_output" in name: lowerCamelCase_ =tensor print(F'''Copied layer {name}''' ) elif "bias" in name: lowerCamelCase_ =tensor print(F'''Copied layer {name}''' ) else: if pruning_method == "magnitude": lowerCamelCase_ =MagnitudeBinarizer.apply(inputs=__snake_case , threshold=__snake_case ) lowerCamelCase_ =tensor * mask print(F'''Pruned layer {name}''' ) elif pruning_method == "topK": if "mask_scores" in name: continue lowerCamelCase_ =name[:-6] lowerCamelCase_ =model[F'''{prefix_}mask_scores'''] lowerCamelCase_ =TopKBinarizer.apply(__snake_case , __snake_case ) lowerCamelCase_ =tensor * mask print(F'''Pruned layer {name}''' ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue lowerCamelCase_ =name[:-6] lowerCamelCase_ =model[F'''{prefix_}mask_scores'''] lowerCamelCase_ =ThresholdBinarizer.apply(__snake_case , __snake_case , __snake_case ) lowerCamelCase_ =tensor * mask print(F'''Pruned layer {name}''' ) elif pruning_method == "l0": if "mask_scores" in name: continue lowerCamelCase_ =name[:-6] lowerCamelCase_ =model[F'''{prefix_}mask_scores'''] lowerCamelCase_, lowerCamelCase_ =-0.1, 1.1 lowerCamelCase_ =torch.sigmoid(__snake_case ) lowerCamelCase_ =s * (r - l) + l lowerCamelCase_ =s_bar.clamp(min=0.0 , max=1.0 ) lowerCamelCase_ =tensor * mask print(F'''Pruned layer {name}''' ) else: raise ValueError('''Unknown pruning method''' ) if target_model_path is None: lowerCamelCase_ =os.path.join( os.path.dirname(__snake_case ) , F'''bertarized_{os.path.basename(__snake_case )}''' ) if not os.path.isdir(__snake_case ): shutil.copytree(__snake_case , __snake_case ) print(F'''\nCreated folder {target_model_path}''' ) torch.save(__snake_case , os.path.join(__snake_case , '''pytorch_model.bin''' ) ) print('''\nPruned model saved! See you later!''' ) if __name__ == "__main__": a_ : List[str] = argparse.ArgumentParser() parser.add_argument( """--pruning_method""", choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""], type=str, required=True, help=( """Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,""" """ sigmoied_threshold = Soft movement pruning)""" ), ) parser.add_argument( """--threshold""", type=float, required=False, help=( """For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.""" """For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.""" """Not needed for `l0`""" ), ) parser.add_argument( """--model_name_or_path""", type=str, required=True, help="""Folder containing the model that was previously fine-pruned""", ) parser.add_argument( """--target_model_path""", default=None, type=str, required=False, help="""Folder containing the model that was previously fine-pruned""", ) a_ : Dict = parser.parse_args() main(args)
676
'''simple docstring''' import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[int] =['image_processor', 'tokenizer'] lowercase : str ='CLIPImageProcessor' lowercase : Optional[Any] =('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast') def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''', lowerCAmelCase, ) lowerCamelCase_ =kwargs.pop('''feature_extractor''' ) lowerCamelCase_ =image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(lowerCAmelCase, lowerCAmelCase ) def __call__( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ): """simple docstring""" if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: lowerCamelCase_ =self.tokenizer(lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase ) if images is not None: lowerCamelCase_ =self.image_processor(lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase ) if text is not None and images is not None: lowerCamelCase_ =image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**lowerCAmelCase ), tensor_type=lowerCAmelCase ) def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase ) @property def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.tokenizer.model_input_names lowerCamelCase_ =self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
676
1
'''simple docstring''' from __future__ import annotations def a_ ( __snake_case : int ) -> list[int]: """simple docstring""" lowerCamelCase_ =[True] * limit lowerCamelCase_ =False lowerCamelCase_ =False lowerCamelCase_ =True for i in range(3 , int(limit**0.5 + 1 ) , 2 ): lowerCamelCase_ =i * 2 while index < limit: lowerCamelCase_ =False lowerCamelCase_ =index + i lowerCamelCase_ =[2] for i in range(3 , __snake_case , 2 ): if is_prime[i]: primes.append(__snake_case ) return primes def a_ ( __snake_case : int = 100_0000 ) -> int: """simple docstring""" lowerCamelCase_ =prime_sieve(__snake_case ) lowerCamelCase_ =0 lowerCamelCase_ =0 for i in range(len(__snake_case ) ): for j in range(i + length , len(__snake_case ) ): lowerCamelCase_ =sum(primes[i:j] ) if sol >= ceiling: break if sol in primes: lowerCamelCase_ =j - i lowerCamelCase_ =sol return largest if __name__ == "__main__": print(F"""{solution() = }""")
676
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING a_ : Optional[Any] = logging.get_logger(__name__) @add_end_docstrings(lowerCamelCase__ ) class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" super().__init__(*lowerCAmelCase, **lowerCAmelCase ) requires_backends(self, '''vision''' ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING ) def lowercase__ ( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None ): """simple docstring""" lowerCamelCase_ ={} lowerCamelCase_ ={} if prompt is not None: lowerCamelCase_ =prompt if generate_kwargs is not None: lowerCamelCase_ =generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: lowerCamelCase_ ={} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( '''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,''' ''' please use only one''' ) lowerCamelCase_ =max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return super().__call__(lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ): """simple docstring""" lowerCamelCase_ =load_image(lowerCAmelCase ) if prompt is not None: if not isinstance(lowerCAmelCase, lowerCAmelCase ): raise ValueError( f'''Received an invalid text input, got - {type(lowerCAmelCase )} - but expected a single string. ''' '''Note also that one single text can be provided for conditional image to text generation.''' ) lowerCamelCase_ =self.model.config.model_type if model_type == "git": lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework ) lowerCamelCase_ =self.tokenizer(text=lowerCAmelCase, add_special_tokens=lowerCAmelCase ).input_ids lowerCamelCase_ =[self.tokenizer.cls_token_id] + input_ids lowerCamelCase_ =torch.tensor(lowerCAmelCase ).unsqueeze(0 ) model_inputs.update({'''input_ids''': input_ids} ) elif model_type == "pix2struct": lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, header_text=lowerCAmelCase, return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework ) lowerCamelCase_ =self.tokenizer(lowerCAmelCase, return_tensors=self.framework ) model_inputs.update(lowerCAmelCase ) else: raise ValueError(f'''Model type {model_type} does not support conditional text generation''' ) else: lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: lowerCamelCase_ =None return model_inputs def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ): """simple docstring""" if ( "input_ids" in model_inputs and isinstance(model_inputs['''input_ids'''], lowerCAmelCase ) and all(x is None for x in model_inputs['''input_ids'''] ) ): lowerCamelCase_ =None if generate_kwargs is None: lowerCamelCase_ ={} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. lowerCamelCase_ =model_inputs.pop(self.model.main_input_name ) lowerCamelCase_ =self.model.generate(lowerCAmelCase, **lowerCAmelCase, **lowerCAmelCase ) return model_outputs def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =[] for output_ids in model_outputs: lowerCamelCase_ ={ '''generated_text''': self.tokenizer.decode( lowerCAmelCase, skip_special_tokens=lowerCAmelCase, ) } records.append(lowerCAmelCase ) return records
676
1
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path import torch from ...utils import is_npu_available, is_xpu_available from .config_args import ClusterConfig, default_json_config_file from .config_utils import SubcommandHelpFormatter a_ : Optional[Any] = """Create a default config file for Accelerate with only a few flags set.""" def a_ ( __snake_case : Dict="no" , __snake_case : str = default_json_config_file , __snake_case : bool = False ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ =Path(__snake_case ) path.parent.mkdir(parents=__snake_case , exist_ok=__snake_case ) if path.exists(): print( F'''Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.''' ) return False lowerCamelCase_ =mixed_precision.lower() if mixed_precision not in ["no", "fp16", "bf16", "fp8"]: raise ValueError( F'''`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}''' ) lowerCamelCase_ ={ '''compute_environment''': '''LOCAL_MACHINE''', '''mixed_precision''': mixed_precision, } if torch.cuda.is_available(): lowerCamelCase_ =torch.cuda.device_count() lowerCamelCase_ =num_gpus lowerCamelCase_ =False if num_gpus > 1: lowerCamelCase_ ='''MULTI_GPU''' else: lowerCamelCase_ ='''NO''' elif is_xpu_available() and use_xpu: lowerCamelCase_ =torch.xpu.device_count() lowerCamelCase_ =num_xpus lowerCamelCase_ =False if num_xpus > 1: lowerCamelCase_ ='''MULTI_XPU''' else: lowerCamelCase_ ='''NO''' elif is_npu_available(): lowerCamelCase_ =torch.npu.device_count() lowerCamelCase_ =num_npus lowerCamelCase_ =False if num_npus > 1: lowerCamelCase_ ='''MULTI_NPU''' else: lowerCamelCase_ ='''NO''' else: lowerCamelCase_ =0 lowerCamelCase_ =True lowerCamelCase_ =1 lowerCamelCase_ ='''NO''' lowerCamelCase_ =ClusterConfig(**__snake_case ) config.to_json_file(__snake_case ) return path def a_ ( __snake_case : List[Any] , __snake_case : List[str] ) -> Optional[int]: """simple docstring""" lowerCamelCase_ =parser.add_parser('''default''' , parents=__snake_case , help=__snake_case , formatter_class=__snake_case ) parser.add_argument( '''--config_file''' , default=__snake_case , help=( '''The path to use to store the config file. Will default to a file named default_config.yaml in the cache ''' '''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ''' '''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ''' '''with \'huggingface\'.''' ) , dest='''save_location''' , ) parser.add_argument( '''--mixed_precision''' , choices=['''no''', '''fp16''', '''bf16'''] , type=__snake_case , help='''Whether or not to use mixed precision training. ''' '''Choose between FP16 and BF16 (bfloat16) training. ''' '''BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.''' , default='''no''' , ) parser.set_defaults(func=__snake_case ) return parser def a_ ( __snake_case : int ) -> str: """simple docstring""" lowerCamelCase_ =write_basic_config(args.mixed_precision , args.save_location ) if config_file: print(F'''accelerate configuration saved at {config_file}''' )
676
'''simple docstring''' import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def a_ ( __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Any ) -> str: """simple docstring""" # Initialise PyTorch model lowerCamelCase_ =BertConfig.from_json_file(__snake_case ) print(F'''Building PyTorch model from configuration: {config}''' ) lowerCamelCase_ =BertForPreTraining(__snake_case ) # Load weights from tf checkpoint load_tf_weights_in_bert(__snake_case , __snake_case , __snake_case ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , __snake_case ) if __name__ == "__main__": a_ : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--bert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained BERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) a_ : Optional[Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
676
1
'''simple docstring''' import functools from typing import Any def a_ ( __snake_case : str , __snake_case : list[str] ) -> bool: """simple docstring""" # Validation if not isinstance(__snake_case , __snake_case ) or len(__snake_case ) == 0: raise ValueError('''the string should be not empty string''' ) if not isinstance(__snake_case , __snake_case ) or not all( isinstance(__snake_case , __snake_case ) and len(__snake_case ) > 0 for item in words ): raise ValueError('''the words should be a list of non-empty strings''' ) # Build trie lowerCamelCase_ ={} lowerCamelCase_ ='''WORD_KEEPER''' for word in words: lowerCamelCase_ =trie for c in word: if c not in trie_node: lowerCamelCase_ ={} lowerCamelCase_ =trie_node[c] lowerCamelCase_ =True lowerCamelCase_ =len(__snake_case ) # Dynamic programming method @functools.cache def is_breakable(__snake_case : int ) -> bool: if index == len_string: return True lowerCamelCase_ =trie for i in range(__snake_case , __snake_case ): lowerCamelCase_ =trie_node.get(string[i] , __snake_case ) if trie_node is None: return False if trie_node.get(__snake_case , __snake_case ) and is_breakable(i + 1 ): return True return False return is_breakable(0 ) if __name__ == "__main__": import doctest doctest.testmod()
676
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : Union[str, Any] = logging.get_logger(__name__) a_ : Optional[int] = { """BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""", # See all AltCLIP models at https://huggingface.co/models?filter=altclip } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[Any] ='altclip_text_model' def __init__( self, lowerCAmelCase=250_002, lowerCAmelCase=1_024, lowerCAmelCase=24, lowerCAmelCase=16, lowerCAmelCase=4_096, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=514, lowerCAmelCase=1, lowerCAmelCase=0.0_2, lowerCAmelCase=0.0_2, lowerCAmelCase=1e-05, lowerCAmelCase=1, lowerCAmelCase=0, lowerCAmelCase=2, lowerCAmelCase="absolute", lowerCAmelCase=True, lowerCAmelCase=768, **lowerCAmelCase, ): """simple docstring""" super().__init__(pad_token_id=lowerCAmelCase, bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, **lowerCAmelCase ) lowerCamelCase_ =vocab_size lowerCamelCase_ =hidden_size lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =hidden_act lowerCamelCase_ =intermediate_size lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =attention_probs_dropout_prob lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =type_vocab_size lowerCamelCase_ =initializer_range lowerCamelCase_ =initializer_factor lowerCamelCase_ =layer_norm_eps lowerCamelCase_ =position_embedding_type lowerCamelCase_ =use_cache lowerCamelCase_ =project_dim class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Dict ='altclip_vision_model' def __init__( self, lowerCAmelCase=768, lowerCAmelCase=3_072, lowerCAmelCase=512, lowerCAmelCase=12, lowerCAmelCase=12, lowerCAmelCase=3, lowerCAmelCase=224, lowerCAmelCase=32, lowerCAmelCase="quick_gelu", lowerCAmelCase=1e-5, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=1.0, **lowerCAmelCase, ): """simple docstring""" super().__init__(**lowerCAmelCase ) lowerCamelCase_ =hidden_size lowerCamelCase_ =intermediate_size lowerCamelCase_ =projection_dim lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =num_channels lowerCamelCase_ =patch_size lowerCamelCase_ =image_size lowerCamelCase_ =initializer_range lowerCamelCase_ =initializer_factor lowerCamelCase_ =attention_dropout lowerCamelCase_ =layer_norm_eps lowerCamelCase_ =hidden_act @classmethod def lowercase__ ( cls, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" cls._set_token_in_kwargs(lowerCAmelCase ) lowerCamelCase_, lowerCamelCase_ =cls.get_config_dict(lowerCAmelCase, **lowerCAmelCase ) # get the vision config dict if we are loading from AltCLIPConfig if config_dict.get('''model_type''' ) == "altclip": lowerCamelCase_ =config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls, '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(lowerCAmelCase, **lowerCAmelCase ) class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Dict ='altclip' lowercase : str =True def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=768, lowerCAmelCase=2.6_5_9_2, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =kwargs.pop('''text_config_dict''', lowerCAmelCase ) lowerCamelCase_ =kwargs.pop('''vision_config_dict''', lowerCAmelCase ) super().__init__(**lowerCAmelCase ) # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`. if text_config_dict is not None: if text_config is None: lowerCamelCase_ ={} # This is the complete result when using `text_config_dict`. lowerCamelCase_ =AltCLIPTextConfig(**lowerCAmelCase ).to_dict() # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different. for key, value in _text_config_dict.items(): if key in text_config and value != text_config[key] and key not in ["transformers_version"]: # If specified in `text_config_dict` if key in text_config_dict: lowerCamelCase_ =( f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. ''' f'''The value `text_config_dict["{key}"]` will be used instead.''' ) # If inferred from default argument values (just to be super careful) else: lowerCamelCase_ =( f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The ''' f'''value `text_config["{key}"]` will be overriden.''' ) logger.warning(lowerCAmelCase ) # Update all values in `text_config` with the ones in `_text_config_dict`. text_config.update(_text_config_dict ) if vision_config_dict is not None: if vision_config is None: lowerCamelCase_ ={} # This is the complete result when using `vision_config_dict`. lowerCamelCase_ =AltCLIPVisionConfig(**lowerCAmelCase ).to_dict() # convert keys to string instead of integer if "id2label" in _vision_config_dict: lowerCamelCase_ ={ str(lowerCAmelCase ): value for key, value in _vision_config_dict['''id2label'''].items() } # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different. for key, value in _vision_config_dict.items(): if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]: # If specified in `vision_config_dict` if key in vision_config_dict: lowerCamelCase_ =( f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different ''' f'''values. The value `vision_config_dict["{key}"]` will be used instead.''' ) # If inferred from default argument values (just to be super careful) else: lowerCamelCase_ =( f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. ''' f'''The value `vision_config["{key}"]` will be overriden.''' ) logger.warning(lowerCAmelCase ) # Update all values in `vision_config` with the ones in `_vision_config_dict`. vision_config.update(_vision_config_dict ) if text_config is None: lowerCamelCase_ ={} logger.info('''`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.''' ) if vision_config is None: lowerCamelCase_ ={} logger.info('''`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.''' ) lowerCamelCase_ =AltCLIPTextConfig(**lowerCAmelCase ) lowerCamelCase_ =AltCLIPVisionConfig(**lowerCAmelCase ) lowerCamelCase_ =projection_dim lowerCamelCase_ =logit_scale_init_value lowerCamelCase_ =1.0 @classmethod def lowercase__ ( cls, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =copy.deepcopy(self.__dict__ ) lowerCamelCase_ =self.text_config.to_dict() lowerCamelCase_ =self.vision_config.to_dict() lowerCamelCase_ =self.__class__.model_type return output
676
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : Any = logging.get_logger(__name__) a_ : Tuple = { """microsoft/trocr-base-handwritten""": ( """https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json""" ), # See all TrOCR models at https://huggingface.co/models?filter=trocr } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[int] ='trocr' lowercase : Union[str, Any] =['past_key_values'] lowercase : Dict ={ 'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model', 'num_hidden_layers': 'decoder_layers', } def __init__( self, lowerCAmelCase=50_265, lowerCAmelCase=1_024, lowerCAmelCase=12, lowerCAmelCase=16, lowerCAmelCase=4_096, lowerCAmelCase="gelu", lowerCAmelCase=512, lowerCAmelCase=0.1, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase=2, lowerCAmelCase=0.0_2, lowerCAmelCase=0.0, lowerCAmelCase=True, lowerCAmelCase=False, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=1, lowerCAmelCase=0, lowerCAmelCase=2, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =vocab_size lowerCamelCase_ =d_model lowerCamelCase_ =decoder_layers lowerCamelCase_ =decoder_attention_heads lowerCamelCase_ =decoder_ffn_dim lowerCamelCase_ =activation_function lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =dropout lowerCamelCase_ =attention_dropout lowerCamelCase_ =activation_dropout lowerCamelCase_ =init_std lowerCamelCase_ =decoder_layerdrop lowerCamelCase_ =use_cache lowerCamelCase_ =scale_embedding lowerCamelCase_ =use_learned_position_embeddings lowerCamelCase_ =layernorm_embedding super().__init__( pad_token_id=lowerCAmelCase, bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, decoder_start_token_id=lowerCAmelCase, **lowerCAmelCase, )
676
'''simple docstring''' import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, lowerCAmelCase, lowerCAmelCase=13, lowerCAmelCase=7, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=2, lowerCAmelCase=99, lowerCAmelCase=0, lowerCAmelCase=32, lowerCAmelCase=5, lowerCAmelCase=4, lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=12, lowerCAmelCase=2, lowerCAmelCase=0.0_2, lowerCAmelCase=3, lowerCAmelCase=4, lowerCAmelCase="last", lowerCAmelCase=None, lowerCAmelCase=None, ): """simple docstring""" lowerCamelCase_ =parent lowerCamelCase_ =batch_size lowerCamelCase_ =seq_length lowerCamelCase_ =is_training lowerCamelCase_ =use_input_lengths lowerCamelCase_ =use_token_type_ids lowerCamelCase_ =use_labels lowerCamelCase_ =gelu_activation lowerCamelCase_ =sinusoidal_embeddings lowerCamelCase_ =causal lowerCamelCase_ =asm lowerCamelCase_ =n_langs lowerCamelCase_ =vocab_size lowerCamelCase_ =n_special lowerCamelCase_ =hidden_size lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =attention_probs_dropout_prob lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =type_vocab_size lowerCamelCase_ =type_sequence_label_size lowerCamelCase_ =initializer_range lowerCamelCase_ =num_labels lowerCamelCase_ =num_choices lowerCamelCase_ =summary_type lowerCamelCase_ =use_proj lowerCamelCase_ =scope def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase_ =None if self.use_input_lengths: lowerCamelCase_ =( ids_tensor([self.batch_size], vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length lowerCamelCase_ =None if self.use_token_type_ids: lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.n_langs ) lowerCamelCase_ =None lowerCamelCase_ =None lowerCamelCase_ =None if self.use_labels: lowerCamelCase_ =ids_tensor([self.batch_size], self.type_sequence_label_size ) lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.num_labels ) lowerCamelCase_ =ids_tensor([self.batch_size], 2 ).float() lowerCamelCase_ =ids_tensor([self.batch_size], self.num_choices ) lowerCamelCase_ =self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def lowercase__ ( self ): """simple docstring""" return FlaubertConfig( vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =FlaubertModel(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase, lengths=lowerCAmelCase, langs=lowerCAmelCase ) lowerCamelCase_ =model(lowerCAmelCase, langs=lowerCAmelCase ) lowerCamelCase_ =model(lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =FlaubertWithLMHeadModel(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape, () ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =FlaubertForQuestionAnsweringSimple(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase ) lowerCamelCase_ =model(lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =FlaubertForQuestionAnswering(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase ) lowerCamelCase_ =model( lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase, cls_index=lowerCAmelCase, is_impossible=lowerCAmelCase, p_mask=lowerCAmelCase, ) lowerCamelCase_ =model( lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase, cls_index=lowerCAmelCase, is_impossible=lowerCAmelCase, ) ((lowerCamelCase_), ) =result_with_labels.to_tuple() lowerCamelCase_ =model(lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase ) ((lowerCamelCase_), ) =result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape, () ) self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =FlaubertForSequenceClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase ) lowerCamelCase_ =model(lowerCAmelCase, labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape, () ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =self.num_labels lowerCamelCase_ =FlaubertForTokenClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase, attention_mask=lowerCAmelCase, labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =self.num_choices lowerCamelCase_ =FlaubertForMultipleChoice(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() lowerCamelCase_ =token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() lowerCamelCase_ =input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() lowerCamelCase_ =model( lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.prepare_config_and_inputs() ( ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ) =config_and_inputs lowerCamelCase_ ={ '''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase : List[Any] =( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) lowercase : Tuple =( { 'feature-extraction': FlaubertModel, 'fill-mask': FlaubertWithLMHeadModel, 'question-answering': FlaubertForQuestionAnsweringSimple, 'text-classification': FlaubertForSequenceClassification, 'token-classification': FlaubertForTokenClassification, 'zero-shot': FlaubertForSequenceClassification, } if is_torch_available() else {} ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('''Fast''' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=False ): """simple docstring""" lowerCamelCase_ =super()._prepare_for_class(lowerCAmelCase, lowerCAmelCase, return_labels=lowerCAmelCase ) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": lowerCamelCase_ =torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase ) lowerCamelCase_ =torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase ) return inputs_dict def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =FlaubertModelTester(self ) lowerCamelCase_ =ConfigTester(self, config_class=lowerCAmelCase, emb_dim=37 ) def lowercase__ ( self ): """simple docstring""" self.config_tester.run_common_tests() def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*lowerCAmelCase ) @slow def lowercase__ ( self ): """simple docstring""" for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ =FlaubertModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) @slow @require_torch_gpu def lowercase__ ( self ): """simple docstring""" lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return lowerCamelCase_ =True lowerCamelCase_ =model_class(config=lowerCAmelCase ) lowerCamelCase_ =self._prepare_for_class(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =torch.jit.trace( lowerCAmelCase, (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(lowerCAmelCase, os.path.join(lowerCAmelCase, '''traced_model.pt''' ) ) lowerCamelCase_ =torch.jit.load(os.path.join(lowerCAmelCase, '''traced_model.pt''' ), map_location=lowerCAmelCase ) loaded(inputs_dict['''input_ids'''].to(lowerCAmelCase ), inputs_dict['''attention_mask'''].to(lowerCAmelCase ) ) @require_torch class __UpperCamelCase ( unittest.TestCase ): @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' ) lowerCamelCase_ =torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] ) with torch.no_grad(): lowerCamelCase_ =model(lowerCAmelCase )[0] lowerCamelCase_ =torch.Size((1, 11, 768) ) self.assertEqual(output.shape, lowerCAmelCase ) lowerCamelCase_ =torch.tensor( [[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], lowerCAmelCase, atol=1e-4 ) )
676
1
'''simple docstring''' import argparse import logging import pickle import random import time import numpy as np from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO ) a_ : int = logging.getLogger(__name__) def a_ ( ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ =argparse.ArgumentParser( description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' ) parser.add_argument('''--file_path''' , type=__snake_case , default='''data/dump.txt''' , help='''The path to the data.''' ) parser.add_argument('''--tokenizer_type''' , type=__snake_case , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] ) parser.add_argument('''--tokenizer_name''' , type=__snake_case , default='''bert-base-uncased''' , help='''The tokenizer to use.''' ) parser.add_argument('''--dump_file''' , type=__snake_case , default='''data/dump''' , help='''The dump file prefix.''' ) lowerCamelCase_ =parser.parse_args() logger.info(F'''Loading Tokenizer ({args.tokenizer_name})''' ) if args.tokenizer_type == "bert": lowerCamelCase_ =BertTokenizer.from_pretrained(args.tokenizer_name ) lowerCamelCase_ =tokenizer.special_tokens_map['''cls_token'''] # `[CLS]` lowerCamelCase_ =tokenizer.special_tokens_map['''sep_token'''] # `[SEP]` elif args.tokenizer_type == "roberta": lowerCamelCase_ =RobertaTokenizer.from_pretrained(args.tokenizer_name ) lowerCamelCase_ =tokenizer.special_tokens_map['''cls_token'''] # `<s>` lowerCamelCase_ =tokenizer.special_tokens_map['''sep_token'''] # `</s>` elif args.tokenizer_type == "gpt2": lowerCamelCase_ =GPTaTokenizer.from_pretrained(args.tokenizer_name ) lowerCamelCase_ =tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>` lowerCamelCase_ =tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>` logger.info(F'''Loading text from {args.file_path}''' ) with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp: lowerCamelCase_ =fp.readlines() logger.info('''Start encoding''' ) logger.info(F'''{len(__snake_case )} examples to process.''' ) lowerCamelCase_ =[] lowerCamelCase_ =0 lowerCamelCase_ =1_0000 lowerCamelCase_ =time.time() for text in data: lowerCamelCase_ =F'''{bos} {text.strip()} {sep}''' lowerCamelCase_ =tokenizer.encode(__snake_case , add_special_tokens=__snake_case ) rslt.append(__snake_case ) iter += 1 if iter % interval == 0: lowerCamelCase_ =time.time() logger.info(F'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' ) lowerCamelCase_ =time.time() logger.info('''Finished binarization''' ) logger.info(F'''{len(__snake_case )} examples processed.''' ) lowerCamelCase_ =F'''{args.dump_file}.{args.tokenizer_name}.pickle''' lowerCamelCase_ =tokenizer.vocab_size if vocab_size < (1 << 16): lowerCamelCase_ =[np.uintaa(__snake_case ) for d in rslt] else: lowerCamelCase_ =[np.intaa(__snake_case ) for d in rslt] random.shuffle(rslt_ ) logger.info(F'''Dump to {dp_file}''' ) with open(__snake_case , '''wb''' ) as handle: pickle.dump(rslt_ , __snake_case , protocol=pickle.HIGHEST_PROTOCOL ) if __name__ == "__main__": main()
676
'''simple docstring''' import os from pickle import UnpicklingError from typing import Dict, Tuple import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict, unflatten_dict import transformers from .utils import logging a_ : List[Any] = logging.get_logger(__name__) def a_ ( __snake_case : Optional[int] , __snake_case : str , __snake_case : List[Any] , __snake_case : int=False ) -> List[str]: """simple docstring""" try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise if not is_sharded: lowerCamelCase_ =os.path.abspath(__snake_case ) logger.info(F'''Loading PyTorch weights from {pt_path}''' ) lowerCamelCase_ =torch.load(__snake_case , map_location='''cpu''' ) logger.info(F'''PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.''' ) lowerCamelCase_ =convert_pytorch_state_dict_to_flax(__snake_case , __snake_case ) else: # model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files lowerCamelCase_ =convert_pytorch_sharded_state_dict_to_flax(__snake_case , __snake_case ) return flax_state_dict def a_ ( __snake_case : Tuple[str] , __snake_case : np.ndarray , __snake_case : Dict[str, jnp.ndarray] , __snake_case : str , ) -> (Tuple[str], np.ndarray): """simple docstring""" def is_key_or_prefix_key_in_dict(__snake_case : Tuple[str] ) -> bool: return len(set(__snake_case ) & {key, (model_prefix,) + key} ) > 0 # layer norm lowerCamelCase_ =pt_tuple_key[:-1] + ('''scale''',) if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(__snake_case ): return renamed_pt_tuple_key, pt_tensor # batch norm layer mean lowerCamelCase_ =pt_tuple_key[:-1] + ('''mean''',) if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(__snake_case ): return renamed_pt_tuple_key, pt_tensor # batch norm layer var lowerCamelCase_ =pt_tuple_key[:-1] + ('''var''',) if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(__snake_case ): return renamed_pt_tuple_key, pt_tensor # embedding lowerCamelCase_ =pt_tuple_key[:-1] + ('''embedding''',) if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(__snake_case ): return renamed_pt_tuple_key, pt_tensor # conv layer lowerCamelCase_ =pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(__snake_case ): lowerCamelCase_ =pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowerCamelCase_ =pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(__snake_case ): lowerCamelCase_ =pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowerCamelCase_ =pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowerCamelCase_ =pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 lowerCamelCase_ =None if pt_tuple_key[-3::2] == ("parametrizations", "original0"): lowerCamelCase_ =pt_tuple_key[-2] + '''_g''' elif pt_tuple_key[-3::2] == ("parametrizations", "original1"): lowerCamelCase_ =pt_tuple_key[-2] + '''_v''' if name is not None: lowerCamelCase_ =pt_tuple_key[:-3] + (name,) return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def a_ ( __snake_case : Union[str, Any] , __snake_case : str ) -> str: """simple docstring""" # convert pytorch tensor to numpy lowerCamelCase_ ={k: v.numpy() for k, v in pt_state_dict.items()} lowerCamelCase_ =flax_model.base_model_prefix # use params dict if the model contains batch norm layers if "params" in flax_model.params: lowerCamelCase_ =flax_model.params['''params'''] else: lowerCamelCase_ =flax_model.params lowerCamelCase_ =flatten_dict(__snake_case ) # add batch_stats keys,values to dict if "batch_stats" in flax_model.params: lowerCamelCase_ =flatten_dict(flax_model.params['''batch_stats'''] ) random_flax_state_dict.update(__snake_case ) lowerCamelCase_ ={} lowerCamelCase_ =(model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) lowerCamelCase_ =(model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCamelCase_ =tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary lowerCamelCase_ =pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: lowerCamelCase_ =pt_tuple_key[1:] # Correctly rename weight parameters lowerCamelCase_, lowerCamelCase_ =rename_key_and_reshape_tensor( __snake_case , __snake_case , __snake_case , __snake_case ) # add model prefix if necessary lowerCamelCase_ =(model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: lowerCamelCase_ =(model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1] or "var" in flax_key[-1]: lowerCamelCase_ =jnp.asarray(__snake_case ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(__snake_case , __snake_case ) continue # also add unexpected weight so that warning is thrown lowerCamelCase_ =jnp.asarray(__snake_case ) else: # also add unexpected weight so that warning is thrown lowerCamelCase_ =jnp.asarray(__snake_case ) return unflatten_dict(__snake_case ) def a_ ( __snake_case : Union[str, Any] , __snake_case : List[Any] ) -> Optional[Any]: """simple docstring""" import torch # Load the index lowerCamelCase_ ={} for shard_file in shard_filenames: # load using msgpack utils lowerCamelCase_ =torch.load(__snake_case ) lowerCamelCase_ ={k: v.numpy() for k, v in pt_state_dict.items()} lowerCamelCase_ =flax_model.base_model_prefix # use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict if "batch_stats" in flax_model.params: lowerCamelCase_ =flax_model.params['''params'''] lowerCamelCase_ =flatten_dict(__snake_case ) random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) ) else: lowerCamelCase_ =flax_model.params lowerCamelCase_ =flatten_dict(__snake_case ) lowerCamelCase_ =(model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) lowerCamelCase_ =(model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCamelCase_ =tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary lowerCamelCase_ =pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: lowerCamelCase_ =pt_tuple_key[1:] # Correctly rename weight parameters lowerCamelCase_, lowerCamelCase_ =rename_key_and_reshape_tensor( __snake_case , __snake_case , __snake_case , __snake_case ) # add model prefix if necessary lowerCamelCase_ =(model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: lowerCamelCase_ =(model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1]: lowerCamelCase_ =jnp.asarray(__snake_case ) continue if "var" in flax_key[-1]: lowerCamelCase_ =jnp.asarray(__snake_case ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(__snake_case , __snake_case ) continue # also add unexpected weight so that warning is thrown lowerCamelCase_ =jnp.asarray(__snake_case ) else: # also add unexpected weight so that warning is thrown lowerCamelCase_ =jnp.asarray(__snake_case ) return unflatten_dict(__snake_case ) def a_ ( __snake_case : List[str] , __snake_case : Dict ) -> str: """simple docstring""" lowerCamelCase_ =os.path.abspath(__snake_case ) logger.info(F'''Loading Flax weights from {flax_checkpoint_path}''' ) # import correct flax class lowerCamelCase_ =getattr(__snake_case , '''Flax''' + model.__class__.__name__ ) # load flax weight dict with open(__snake_case , '''rb''' ) as state_f: try: lowerCamelCase_ =from_bytes(__snake_case , state_f.read() ) except UnpicklingError: raise EnvironmentError(F'''Unable to convert {flax_checkpoint_path} to Flax deserializable object. ''' ) return load_flax_weights_in_pytorch_model(__snake_case , __snake_case ) def a_ ( __snake_case : Optional[Any] , __snake_case : Union[str, Any] ) -> Optional[int]: """simple docstring""" try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise # check if we have bf16 weights lowerCamelCase_ =flatten_dict(jax.tree_util.tree_map(lambda __snake_case : x.dtype == jnp.bfloataa , __snake_case ) ).values() if any(__snake_case ): # convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( '''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` ''' '''before loading those in PyTorch model.''' ) lowerCamelCase_ =jax.tree_util.tree_map( lambda __snake_case : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __snake_case ) lowerCamelCase_ =flatten_dict(__snake_case ) lowerCamelCase_ =pt_model.state_dict() lowerCamelCase_ =(pt_model.base_model_prefix in flax_state) and ( pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) lowerCamelCase_ =(pt_model.base_model_prefix not in flax_state) and ( pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) # keep track of unexpected & missing keys lowerCamelCase_ =[] lowerCamelCase_ =set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): lowerCamelCase_ =flax_key_tuple[0] == pt_model.base_model_prefix lowerCamelCase_ ='''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict # adapt flax_key to prepare for loading from/to base model only if load_model_with_head_into_base_model and has_base_model_prefix: lowerCamelCase_ =flax_key_tuple[1:] elif load_base_model_into_model_with_head and require_base_model_prefix: lowerCamelCase_ =(pt_model.base_model_prefix,) + flax_key_tuple # rename flax weights to PyTorch format if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(__snake_case ) not in pt_model_dict: # conv layer lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',) lowerCamelCase_ =jnp.transpose(__snake_case , (3, 2, 0, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(__snake_case ) not in pt_model_dict: # linear layer lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',) lowerCamelCase_ =flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',) # adding batch stats from flax batch norm to pt elif "mean" in flax_key_tuple[-1]: lowerCamelCase_ =flax_key_tuple[:-1] + ('''running_mean''',) elif "var" in flax_key_tuple[-1]: lowerCamelCase_ =flax_key_tuple[:-1] + ('''running_var''',) if "batch_stats" in flax_state: lowerCamelCase_ ='''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header else: lowerCamelCase_ ='''.'''.join(__snake_case ) # We also need to look at `pt_model_dict` and see if there are keys requiring further transformation. lowerCamelCase_ ={} # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 for key in pt_model_dict: lowerCamelCase_ =key.split('''.''' ) lowerCamelCase_ =None if key_components[-3::2] == ["parametrizations", "original0"]: lowerCamelCase_ =key_components[-2] + '''_g''' elif key_components[-3::2] == ["parametrizations", "original1"]: lowerCamelCase_ =key_components[-2] + '''_v''' if name is not None: lowerCamelCase_ =key_components[:-3] + [name] lowerCamelCase_ ='''.'''.join(__snake_case ) lowerCamelCase_ =key if flax_key in special_pt_names: lowerCamelCase_ =special_pt_names[flax_key] if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( F'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected ''' F'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) else: # add weight to pytorch dict lowerCamelCase_ =np.asarray(__snake_case ) if not isinstance(__snake_case , np.ndarray ) else flax_tensor lowerCamelCase_ =torch.from_numpy(__snake_case ) # remove from missing keys missing_keys.remove(__snake_case ) else: # weight is not expected by PyTorch model unexpected_keys.append(__snake_case ) pt_model.load_state_dict(__snake_case ) # re-transform missing_keys to list lowerCamelCase_ =list(__snake_case ) if len(__snake_case ) > 0: logger.warning( '''Some weights of the Flax model were not used when initializing the PyTorch model''' F''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing''' F''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture''' ''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This''' F''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect''' ''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a''' ''' FlaxBertForSequenceClassification model).''' ) else: logger.warning(F'''All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n''' ) if len(__snake_case ) > 0: logger.warning( F'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly''' F''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to''' ''' use it for predictions and inference.''' ) else: logger.warning( F'''All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n''' '''If your task is similar to the task the model of the checkpoint was trained on, ''' F'''you can already use {pt_model.__class__.__name__} for predictions without further training.''' ) return pt_model
676
1
'''simple docstring''' import argparse import json import os import tensorstore as ts import torch from flax import serialization from flax.traverse_util import flatten_dict, unflatten_dict from tensorflow.io import gfile from transformers.modeling_utils import dtype_byte_size from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import ( rename_keys, ) from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME from transformers.utils.hub import convert_file_size_to_int def a_ ( __snake_case : Optional[Any] , __snake_case : Tuple ) -> Tuple: """simple docstring""" if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3: # expert layer lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',) lowerCamelCase_ =torch.permute(__snake_case , (0, 2, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(__snake_case ): # linear layer lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',) lowerCamelCase_ =flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',) return flax_key_tuple, flax_tensor def a_ ( __snake_case : Optional[Any] , __snake_case : Optional[int] , __snake_case : List[Any] ) -> Union[str, Any]: """simple docstring""" if "metadata" in layer: lowerCamelCase_ =layer.split('''metadata''' ) lowerCamelCase_ =''''''.join(split_layer[0] )[:-1] lowerCamelCase_ =[tuple(('''metadata''' + split_layer[1]).split('''/''' ) )] elif "kvstore" in layer: lowerCamelCase_ =layer.split('''kvstore''' ) lowerCamelCase_ =''''''.join(split_layer[0] )[:-1] lowerCamelCase_ =[tuple(('''kvstore''' + split_layer[1]).split('''/''' ) )] else: lowerCamelCase_ =layer.split('''/''' ) lowerCamelCase_ ='''/'''.join(split_layer[:-1] ) lowerCamelCase_ =(split_layer[-1],) if "kvstore/path" in layer: lowerCamelCase_ =F'''{switch_checkpoint_path}/{checkpoint_info[layer]}''' elif "kvstore/driver" in layer: lowerCamelCase_ ='''file''' else: lowerCamelCase_ =checkpoint_info[layer] return curr_real_layer_name, split_layer, content def a_ ( __snake_case : List[Any] , __snake_case : Dict ) -> Tuple: """simple docstring""" lowerCamelCase_ =rename_keys(__snake_case ) lowerCamelCase_ ={} for k, v in current_block.items(): lowerCamelCase_ =v lowerCamelCase_ =new_current_block torch.save(__snake_case , __snake_case ) def a_ ( __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Optional[int] , __snake_case : Any , __snake_case : str = WEIGHTS_NAME ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ =convert_file_size_to_int(__snake_case ) lowerCamelCase_ =[] lowerCamelCase_ ={} lowerCamelCase_ =0 lowerCamelCase_ =0 os.makedirs(__snake_case , exist_ok=__snake_case ) with gfile.GFile(switch_checkpoint_path + '''/checkpoint''' , '''rb''' ) as fp: lowerCamelCase_ =serialization.msgpack_restore(fp.read() )['''optimizer''']['''target'''] lowerCamelCase_ =flatten_dict(__snake_case , sep='''/''' ) lowerCamelCase_ ={} for layer in checkpoint_info.keys(): lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =get_key_and_tensorstore_dict( __snake_case , __snake_case , __snake_case ) if curr_real_layer_name in all_layers: lowerCamelCase_ =content else: lowerCamelCase_ ={split_layer[-1]: content} for key in all_layers.keys(): # open tensorstore file lowerCamelCase_ =ts.open(unflatten_dict(all_layers[key] ) ).result().read().result() lowerCamelCase_ =torch.tensor(__snake_case ) lowerCamelCase_ =raw_weights.numel() * dtype_byte_size(raw_weights.dtype ) # use the renaming pattern from the small conversion scripts lowerCamelCase_, lowerCamelCase_ =rename_base_flax_keys(tuple(key.split('''/''' ) ) , __snake_case ) lowerCamelCase_ ='''/'''.join(__snake_case ) # If this weight is going to tip up over the maximal size, we split. if current_block_size + weight_size > max_shard_size: lowerCamelCase_ =os.path.join( __snake_case , weights_name.replace('''.bin''' , F'''-{len(__snake_case )+1:05d}-of-???.bin''' ) ) rename_and_save_block(__snake_case , __snake_case ) sharded_state_dicts.append(current_block.keys() ) del current_block lowerCamelCase_ ={} lowerCamelCase_ =0 lowerCamelCase_ =raw_weights.to(getattr(__snake_case , __snake_case ) ) current_block_size += weight_size total_size += weight_size # Add the last block lowerCamelCase_ =os.path.join(__snake_case , weights_name.replace('''.bin''' , F'''-{len(__snake_case )+1:05d}-of-???.bin''' ) ) rename_and_save_block(__snake_case , __snake_case ) sharded_state_dicts.append(current_block.keys() ) # If we only have one shard, we return it if len(__snake_case ) == 1: return {weights_name: sharded_state_dicts[0]}, None # Otherwise, let's build the index lowerCamelCase_ ={} lowerCamelCase_ ={} for idx, shard in enumerate(__snake_case ): lowerCamelCase_ =weights_name.replace( '''.bin''' , F'''-{idx+1:05d}-of-{len(__snake_case ):05d}.bin''' ) # len(sharded_state_dicts):05d} lowerCamelCase_ =os.path.join(__snake_case , weights_name.replace('''.bin''' , F'''-{idx+1:05d}-of-???.bin''' ) ) os.rename(__snake_case , os.path.join(__snake_case , __snake_case ) ) lowerCamelCase_ =shard for key in shard: lowerCamelCase_ =shard_file # Add the metadata lowerCamelCase_ ={'''total_size''': total_size} lowerCamelCase_ ={'''metadata''': metadata, '''weight_map''': weight_map} with open(os.path.join(__snake_case , __snake_case ) , '''w''' , encoding='''utf-8''' ) as f: lowerCamelCase_ =json.dumps(__snake_case , indent=2 , sort_keys=__snake_case ) + '''\n''' f.write(__snake_case ) return metadata, index if __name__ == "__main__": a_ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( """--switch_t5x_checkpoint_path""", default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600""", type=str, required=False, help="""Path to a directory containing a folder per layer. Follows the original Google format.""", ) parser.add_argument("""--max_shard_size""", default="""10GB""", required=False, help="""Max shard size""") parser.add_argument("""--dtype""", default="""bfloat16""", type=str, required=False, help="""dtype of the saved model""") parser.add_argument( """--pytorch_dump_folder_path""", default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted""", type=str, required=False, help="""Path to the output pytorch model.""", ) a_ : str = parser.parse_args() shard_on_the_fly( args.switch_tax_checkpoint_path, args.pytorch_dump_folder_path, args.max_shard_size, args.dtype, ) def a_ ( ) -> Tuple: """simple docstring""" from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer lowerCamelCase_ =SwitchTransformersConfig.from_pretrained('''google/switch-base-8''' ) config.save_pretrained('''/home/arthur_huggingface_co/transformers/switch_converted''' ) lowerCamelCase_ =SwitchTransformersForConditionalGeneration.from_pretrained( '''/home/arthur_huggingface_co/transformers/switch_converted''' , device_map='''auto''' ) lowerCamelCase_ =TaTokenizer.from_pretrained('''t5-small''' ) lowerCamelCase_ ='''A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''' lowerCamelCase_ =tokenizer(__snake_case , return_tensors='''pt''' ).input_ids lowerCamelCase_ =model.generate(__snake_case , decoder_start_token_id=0 ) print(tokenizer.decode(out[0] ) )
676
'''simple docstring''' def a_ ( __snake_case : str , __snake_case : str ) -> str: """simple docstring""" lowerCamelCase_ =len(__snake_case ) lowerCamelCase_ =len(__snake_case ) lowerCamelCase_ =( first_str_length if first_str_length > second_str_length else second_str_length ) lowerCamelCase_ =[] for char_count in range(__snake_case ): if char_count < first_str_length: output_list.append(first_str[char_count] ) if char_count < second_str_length: output_list.append(second_str[char_count] ) return "".join(__snake_case ) if __name__ == "__main__": print(alternative_string_arrange("""AB""", """XYZ"""), end=""" """)
676
1
'''simple docstring''' import sys a_ : Any = ( """73167176531330624919225119674426574742355349194934""" """96983520312774506326239578318016984801869478851843""" """85861560789112949495459501737958331952853208805511""" """12540698747158523863050715693290963295227443043557""" """66896648950445244523161731856403098711121722383113""" """62229893423380308135336276614282806444486645238749""" """30358907296290491560440772390713810515859307960866""" """70172427121883998797908792274921901699720888093776""" """65727333001053367881220235421809751254540594752243""" """52584907711670556013604839586446706324415722155397""" """53697817977846174064955149290862569321978468622482""" """83972241375657056057490261407972968652414535100474""" """82166370484403199890008895243450658541227588666881""" """16427171479924442928230863465674813919123162824586""" """17866458359124566529476545682848912883142607690042""" """24219022671055626321111109370544217506941658960408""" """07198403850962455444362981230987879927244284909188""" """84580156166097919133875499200524063689912560717606""" """05886116467109405077541002256983155200055935729725""" """71636269561882670428252483600823257530420752963450""" ) def a_ ( __snake_case : str = N ) -> int: """simple docstring""" lowerCamelCase_ =-sys.maxsize - 1 for i in range(len(__snake_case ) - 12 ): lowerCamelCase_ =1 for j in range(13 ): product *= int(n[i + j] ) if product > largest_product: lowerCamelCase_ =product return largest_product if __name__ == "__main__": print(F"""{solution() = }""")
676
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ : Any = {"""configuration_timm_backbone""": ["""TimmBackboneConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Optional[Any] = ["""TimmBackbone"""] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys a_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
676
1
'''simple docstring''' # coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import platform import sys a_ : str = """3""" print("""Python version:""", sys.version) print("""OS platform:""", platform.platform()) print("""OS architecture:""", platform.machine()) try: import torch print("""Torch version:""", torch.__version__) print("""Cuda available:""", torch.cuda.is_available()) print("""Cuda version:""", torch.version.cuda) print("""CuDNN version:""", torch.backends.cudnn.version()) print("""Number of GPUs available:""", torch.cuda.device_count()) except ImportError: print("""Torch version:""", None) try: import transformers print("""transformers version:""", transformers.__version__) except ImportError: print("""transformers version:""", None)
676
'''simple docstring''' import functools def a_ ( __snake_case : str , __snake_case : str ) -> int: """simple docstring""" lowerCamelCase_ =len(__snake_case ) lowerCamelCase_ =len(__snake_case ) @functools.cache def min_distance(__snake_case : int , __snake_case : int ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa lowerCamelCase_ =int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , __snake_case ) , 1 + min_distance(__snake_case , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
676
1
'''simple docstring''' from transformers import BertTokenizerFast from .custom_tokenization import CustomTokenizer class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Tuple =CustomTokenizer pass
676
'''simple docstring''' def a_ ( __snake_case : int ) -> bool: """simple docstring""" if not isinstance(__snake_case , __snake_case ): lowerCamelCase_ =F'''Input value of [number={number}] must be an integer''' raise TypeError(__snake_case ) if number < 0: return False lowerCamelCase_ =number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
676
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) a_ : Tuple = {"""configuration_xglm""": ["""XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XGLMConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Dict = ["""XGLMTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Dict = ["""XGLMTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : int = [ """XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """XGLMForCausalLM""", """XGLMModel""", """XGLMPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Union[str, Any] = [ """FlaxXGLMForCausalLM""", """FlaxXGLMModel""", """FlaxXGLMPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : List[Any] = [ """TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFXGLMForCausalLM""", """TFXGLMModel""", """TFXGLMPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys a_ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
676
'''simple docstring''' from __future__ import annotations a_ : int = list[list[int]] # assigning initial values to the grid a_ : Matrix = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution a_ : Matrix = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def a_ ( __snake_case : Matrix , __snake_case : int , __snake_case : int , __snake_case : int ) -> bool: """simple docstring""" for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def a_ ( __snake_case : Matrix ) -> tuple[int, int] | None: """simple docstring""" for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def a_ ( __snake_case : Matrix ) -> Matrix | None: """simple docstring""" if location := find_empty_location(__snake_case ): lowerCamelCase_, lowerCamelCase_ =location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(__snake_case , __snake_case , __snake_case , __snake_case ): lowerCamelCase_ =digit if sudoku(__snake_case ) is not None: return grid lowerCamelCase_ =0 return None def a_ ( __snake_case : Matrix ) -> None: """simple docstring""" for row in grid: for cell in row: print(__snake_case , end=''' ''' ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print("""\nExample grid:\n""" + """=""" * 20) print_solution(example_grid) print("""\nExample grid solution:""") a_ : Union[str, Any] = sudoku(example_grid) if solution is not None: print_solution(solution) else: print("""Cannot find a solution.""")
676
1
'''simple docstring''' from unittest import TestCase from datasets import Sequence, Value from datasets.arrow_dataset import Dataset class __UpperCamelCase ( lowerCamelCase__ ): def lowercase__ ( self ): """simple docstring""" return [ {"col_1": 3, "col_2": "a"}, {"col_1": 2, "col_2": "b"}, {"col_1": 1, "col_2": "c"}, {"col_1": 0, "col_2": "d"}, ] def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ={'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']} return Dataset.from_dict(lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self._create_example_records() lowerCamelCase_ =Dataset.from_list(lowerCAmelCase ) self.assertListEqual(dset.column_names, ['''col_1''', '''col_2'''] ) for i, r in enumerate(lowerCAmelCase ): self.assertDictEqual(lowerCAmelCase, example_records[i] ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self._create_example_records() lowerCamelCase_ =Dataset.from_list(lowerCAmelCase ) lowerCamelCase_ =Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} ) self.assertEqual(dset.info, dset_from_dict.info ) def lowercase__ ( self ): # checks what happens with missing columns """simple docstring""" lowerCamelCase_ =[{'''col_1''': 1}, {'''col_2''': '''x'''}] lowerCamelCase_ =Dataset.from_list(lowerCAmelCase ) self.assertDictEqual(dset[0], {'''col_1''': 1} ) self.assertDictEqual(dset[1], {'''col_1''': None} ) # NB: first record is used for columns def lowercase__ ( self ): # checks if the type can be inferred from the second record """simple docstring""" lowerCamelCase_ =[{'''col_1''': []}, {'''col_1''': [1, 2]}] lowerCamelCase_ =Dataset.from_list(lowerCAmelCase ) self.assertEqual(dset.info.features['''col_1'''], Sequence(Value('''int64''' ) ) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =Dataset.from_list([] ) self.assertEqual(len(lowerCAmelCase ), 0 ) self.assertListEqual(dset.column_names, [] )
676
'''simple docstring''' from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : Union[str, Any] = logging.get_logger(__name__) a_ : Tuple = { """huggingface/informer-tourism-monthly""": ( """https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json""" ), # See all Informer models at https://huggingface.co/models?filter=informer } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Union[str, Any] ='informer' lowercase : Union[str, Any] ={ 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', 'num_hidden_layers': 'encoder_layers', } def __init__( self, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = "student_t", lowerCAmelCase = "nll", lowerCAmelCase = 1, lowerCAmelCase = None, lowerCAmelCase = "mean", lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = 64, lowerCAmelCase = 32, lowerCAmelCase = 32, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = True, lowerCAmelCase = "gelu", lowerCAmelCase = 0.0_5, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 100, lowerCAmelCase = 0.0_2, lowerCAmelCase=True, lowerCAmelCase = "prob", lowerCAmelCase = 5, lowerCAmelCase = True, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =prediction_length lowerCamelCase_ =context_length or prediction_length lowerCamelCase_ =distribution_output lowerCamelCase_ =loss lowerCamelCase_ =input_size lowerCamelCase_ =num_time_features lowerCamelCase_ =lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7] lowerCamelCase_ =scaling lowerCamelCase_ =num_dynamic_real_features lowerCamelCase_ =num_static_real_features lowerCamelCase_ =num_static_categorical_features # set cardinality if cardinality and num_static_categorical_features > 0: if len(lowerCAmelCase ) != num_static_categorical_features: raise ValueError( '''The cardinality should be a list of the same length as `num_static_categorical_features`''' ) lowerCamelCase_ =cardinality else: lowerCamelCase_ =[0] # set embedding_dimension if embedding_dimension and num_static_categorical_features > 0: if len(lowerCAmelCase ) != num_static_categorical_features: raise ValueError( '''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' ) lowerCamelCase_ =embedding_dimension else: lowerCamelCase_ =[min(50, (cat + 1) // 2 ) for cat in self.cardinality] lowerCamelCase_ =num_parallel_samples # Transformer architecture configuration lowerCamelCase_ =input_size * len(self.lags_sequence ) + self._number_of_features lowerCamelCase_ =d_model lowerCamelCase_ =encoder_attention_heads lowerCamelCase_ =decoder_attention_heads lowerCamelCase_ =encoder_ffn_dim lowerCamelCase_ =decoder_ffn_dim lowerCamelCase_ =encoder_layers lowerCamelCase_ =decoder_layers lowerCamelCase_ =dropout lowerCamelCase_ =attention_dropout lowerCamelCase_ =activation_dropout lowerCamelCase_ =encoder_layerdrop lowerCamelCase_ =decoder_layerdrop lowerCamelCase_ =activation_function lowerCamelCase_ =init_std lowerCamelCase_ =use_cache # Informer lowerCamelCase_ =attention_type lowerCamelCase_ =sampling_factor lowerCamelCase_ =distil super().__init__(is_encoder_decoder=lowerCAmelCase, **lowerCAmelCase ) @property def lowercase__ ( self ): """simple docstring""" return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
676
1
'''simple docstring''' from dataclasses import dataclass from typing import Optional import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .modeling_utils import ModelMixin @dataclass class __UpperCamelCase ( lowerCamelCase__ ): lowercase : torch.FloatTensor class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): @register_to_config def __init__( self, lowerCAmelCase = 16, lowerCAmelCase = 88, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = 1, lowerCAmelCase = 0.0, lowerCAmelCase = 32, lowerCAmelCase = None, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = "geglu", lowerCAmelCase = True, lowerCAmelCase = True, ): """simple docstring""" super().__init__() lowerCamelCase_ =num_attention_heads lowerCamelCase_ =attention_head_dim lowerCamelCase_ =num_attention_heads * attention_head_dim lowerCamelCase_ =in_channels lowerCamelCase_ =torch.nn.GroupNorm(num_groups=lowerCAmelCase, num_channels=lowerCAmelCase, eps=1e-6, affine=lowerCAmelCase ) lowerCamelCase_ =nn.Linear(lowerCAmelCase, lowerCAmelCase ) # 3. Define transformers blocks lowerCamelCase_ =nn.ModuleList( [ BasicTransformerBlock( lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, dropout=lowerCAmelCase, cross_attention_dim=lowerCAmelCase, activation_fn=lowerCAmelCase, attention_bias=lowerCAmelCase, double_self_attention=lowerCAmelCase, norm_elementwise_affine=lowerCAmelCase, ) for d in range(lowerCAmelCase ) ] ) lowerCamelCase_ =nn.Linear(lowerCAmelCase, lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=1, lowerCAmelCase=None, lowerCAmelCase = True, ): """simple docstring""" lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =hidden_states.shape lowerCamelCase_ =batch_frames // num_frames lowerCamelCase_ =hidden_states lowerCamelCase_ =hidden_states[None, :].reshape(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =hidden_states.permute(0, 2, 1, 3, 4 ) lowerCamelCase_ =self.norm(lowerCAmelCase ) lowerCamelCase_ =hidden_states.permute(0, 3, 4, 2, 1 ).reshape(batch_size * height * width, lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =self.proj_in(lowerCAmelCase ) # 2. Blocks for block in self.transformer_blocks: lowerCamelCase_ =block( lowerCAmelCase, encoder_hidden_states=lowerCAmelCase, timestep=lowerCAmelCase, cross_attention_kwargs=lowerCAmelCase, class_labels=lowerCAmelCase, ) # 3. Output lowerCamelCase_ =self.proj_out(lowerCAmelCase ) lowerCamelCase_ =( hidden_states[None, None, :] .reshape(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) .permute(0, 3, 4, 1, 2 ) .contiguous() ) lowerCamelCase_ =hidden_states.reshape(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =hidden_states + residual if not return_dict: return (output,) return TransformerTemporalModelOutput(sample=lowerCAmelCase )
676
'''simple docstring''' from __future__ import annotations def a_ ( __snake_case : int ) -> list[int]: """simple docstring""" lowerCamelCase_ =[True] * limit lowerCamelCase_ =False lowerCamelCase_ =False lowerCamelCase_ =True for i in range(3 , int(limit**0.5 + 1 ) , 2 ): lowerCamelCase_ =i * 2 while index < limit: lowerCamelCase_ =False lowerCamelCase_ =index + i lowerCamelCase_ =[2] for i in range(3 , __snake_case , 2 ): if is_prime[i]: primes.append(__snake_case ) return primes def a_ ( __snake_case : int = 100_0000 ) -> int: """simple docstring""" lowerCamelCase_ =prime_sieve(__snake_case ) lowerCamelCase_ =0 lowerCamelCase_ =0 for i in range(len(__snake_case ) ): for j in range(i + length , len(__snake_case ) ): lowerCamelCase_ =sum(primes[i:j] ) if sol >= ceiling: break if sol in primes: lowerCamelCase_ =j - i lowerCamelCase_ =sol return largest if __name__ == "__main__": print(F"""{solution() = }""")
676
1
'''simple docstring''' import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch a_ : Any = logging.get_logger(__name__) class __UpperCamelCase : def __init__( self, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase=None, lowerCAmelCase=None ): """simple docstring""" if not conversation_id: lowerCamelCase_ =uuid.uuida() if past_user_inputs is None: lowerCamelCase_ =[] if generated_responses is None: lowerCamelCase_ =[] lowerCamelCase_ =conversation_id lowerCamelCase_ =past_user_inputs lowerCamelCase_ =generated_responses lowerCamelCase_ =text def __eq__( self, lowerCAmelCase ): """simple docstring""" if not isinstance(lowerCAmelCase, lowerCAmelCase ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = False ): """simple docstring""" if self.new_user_input: if overwrite: logger.warning( f'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten ''' f'''with: "{text}".''' ) lowerCamelCase_ =text else: logger.warning( f'''User input added while unprocessed input was existing: "{self.new_user_input}" new input ''' f'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' ) else: lowerCamelCase_ =text def lowercase__ ( self ): """simple docstring""" if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) lowerCamelCase_ =None def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" self.generated_responses.append(lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" for user_input, generated_response in zip(self.past_user_inputs, self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self ): """simple docstring""" lowerCamelCase_ =f'''Conversation id: {self.uuid} \n''' for is_user, text in self.iter_texts(): lowerCamelCase_ ='''user''' if is_user else '''bot''' output += f'''{name} >> {text} \n''' return output @add_end_docstrings( lowerCamelCase__ , r'\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ' , ) class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" super().__init__(*lowerCAmelCase, **lowerCAmelCase ) if self.tokenizer.pad_token_id is None: lowerCamelCase_ =self.tokenizer.eos_token def lowercase__ ( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ ={} lowerCamelCase_ ={} lowerCamelCase_ ={} if min_length_for_response is not None: lowerCamelCase_ =min_length_for_response if minimum_tokens is not None: lowerCamelCase_ =minimum_tokens if "max_length" in generate_kwargs: lowerCamelCase_ =generate_kwargs['''max_length'''] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: lowerCamelCase_ =clean_up_tokenization_spaces if generate_kwargs: forward_params.update(lowerCAmelCase ) return preprocess_params, forward_params, postprocess_params def __call__( self, lowerCAmelCase, lowerCAmelCase=0, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =super().__call__(lowerCAmelCase, num_workers=lowerCAmelCase, **lowerCAmelCase ) if isinstance(lowerCAmelCase, lowerCAmelCase ) and len(lowerCAmelCase ) == 1: return outputs[0] return outputs def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=32 ): """simple docstring""" if not isinstance(lowerCAmelCase, lowerCAmelCase ): raise ValueError('''ConversationalPipeline, expects Conversation as inputs''' ) if conversation.new_user_input is None: raise ValueError( f'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. ''' '''Add user inputs with the conversation\'s `add_user_input` method''' ) if hasattr(self.tokenizer, '''_build_conversation_input_ids''' ): lowerCamelCase_ =self.tokenizer._build_conversation_input_ids(lowerCAmelCase ) else: # If the tokenizer cannot handle conversations, we default to only the old version lowerCamelCase_ =self._legacy_parse_and_tokenize(lowerCAmelCase ) if self.framework == "pt": lowerCamelCase_ =torch.LongTensor([input_ids] ) elif self.framework == "tf": lowerCamelCase_ =tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=10, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =generate_kwargs.get('''max_length''', self.model.config.max_length ) lowerCamelCase_ =model_inputs['''input_ids'''].shape[1] if max_length - minimum_tokens < n: logger.warning(f'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' ) lowerCamelCase_ =max_length - minimum_tokens lowerCamelCase_ =model_inputs['''input_ids'''][:, -trim:] if "attention_mask" in model_inputs: lowerCamelCase_ =model_inputs['''attention_mask'''][:, -trim:] lowerCamelCase_ =model_inputs.pop('''conversation''' ) lowerCamelCase_ =max_length lowerCamelCase_ =self.model.generate(**lowerCAmelCase, **lowerCAmelCase ) if self.model.config.is_encoder_decoder: lowerCamelCase_ =1 else: lowerCamelCase_ =n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=True ): """simple docstring""" lowerCamelCase_ =model_outputs['''output_ids'''] lowerCamelCase_ =self.tokenizer.decode( output_ids[0], skip_special_tokens=lowerCAmelCase, clean_up_tokenization_spaces=lowerCAmelCase, ) lowerCamelCase_ =model_outputs['''conversation'''] conversation.mark_processed() conversation.append_response(lowerCAmelCase ) return conversation def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.tokenizer.eos_token_id lowerCamelCase_ =[] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(lowerCAmelCase, add_special_tokens=lowerCAmelCase ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(lowerCAmelCase, add_special_tokens=lowerCAmelCase ) ) if len(lowerCAmelCase ) > self.tokenizer.model_max_length: lowerCamelCase_ =input_ids[-self.tokenizer.model_max_length :] return input_ids
676
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" super().__init__() # make sure scheduler can always be converted to DDIM lowerCamelCase_ =DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=lowerCAmelCase, scheduler=lowerCAmelCase ) @torch.no_grad() def __call__( self, lowerCAmelCase = 1, lowerCAmelCase = None, lowerCAmelCase = 0.0, lowerCAmelCase = 50, lowerCAmelCase = None, lowerCAmelCase = "pil", lowerCAmelCase = True, ): """simple docstring""" if isinstance(self.unet.config.sample_size, lowerCAmelCase ): lowerCamelCase_ =( batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size, ) else: lowerCamelCase_ =(batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) if isinstance(lowerCAmelCase, lowerCAmelCase ) and len(lowerCAmelCase ) != batch_size: raise ValueError( f'''You have passed a list of generators of length {len(lowerCAmelCase )}, but requested an effective batch''' f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) lowerCamelCase_ =randn_tensor(lowerCAmelCase, generator=lowerCAmelCase, device=self.device, dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(lowerCAmelCase ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output lowerCamelCase_ =self.unet(lowerCAmelCase, lowerCAmelCase ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 lowerCamelCase_ =self.scheduler.step( lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, eta=lowerCAmelCase, use_clipped_model_output=lowerCAmelCase, generator=lowerCAmelCase ).prev_sample lowerCamelCase_ =(image / 2 + 0.5).clamp(0, 1 ) lowerCamelCase_ =image.cpu().permute(0, 2, 3, 1 ).numpy() if output_type == "pil": lowerCamelCase_ =self.numpy_to_pil(lowerCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowerCAmelCase )
676
1
'''simple docstring''' import os def a_ ( ) -> Any: """simple docstring""" lowerCamelCase_ =os.path.dirname(os.path.realpath(__snake_case ) ) lowerCamelCase_ =os.path.join(__snake_case , '''triangle.txt''' ) with open(__snake_case ) as f: lowerCamelCase_ =f.readlines() lowerCamelCase_ =[] for line in triangle: lowerCamelCase_ =[] for number in line.strip().split(''' ''' ): numbers_from_line.append(int(__snake_case ) ) a.append(__snake_case ) for i in range(1 , len(__snake_case ) ): for j in range(len(a[i] ) ): lowerCamelCase_ =a[i - 1][j] if j != len(a[i - 1] ) else 0 lowerCamelCase_ =a[i - 1][j - 1] if j > 0 else 0 a[i][j] += max(__snake_case , __snake_case ) return max(a[-1] ) if __name__ == "__main__": print(solution())
676
'''simple docstring''' from maths.prime_check import is_prime def a_ ( __snake_case : int ) -> int: """simple docstring""" if not isinstance(__snake_case , __snake_case ): lowerCamelCase_ =F'''Input value of [number={number}] must be an integer''' raise TypeError(__snake_case ) if is_prime(__snake_case ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
676
1
'''simple docstring''' def a_ ( __snake_case : int ) -> int: """simple docstring""" lowerCamelCase_ =abs(__snake_case ) lowerCamelCase_ =0 while n > 0: res += n % 10 n //= 10 return res def a_ ( __snake_case : int ) -> int: """simple docstring""" lowerCamelCase_ =abs(__snake_case ) return n if n < 10 else n % 10 + sum_of_digits(n // 10 ) def a_ ( __snake_case : int ) -> int: """simple docstring""" return sum(int(__snake_case ) for c in str(abs(__snake_case ) ) ) def a_ ( ) -> None: """simple docstring""" from collections.abc import Callable from timeit import timeit def benchmark_a_function(__snake_case : Callable , __snake_case : int ) -> None: lowerCamelCase_ =F'''{func.__name__}({value})''' lowerCamelCase_ =timeit(F'''__main__.{call}''' , setup='''import __main__''' ) print(F'''{call:56} = {func(__snake_case )} -- {timing:.4f} seconds''' ) for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376): for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact): benchmark_a_function(__snake_case , __snake_case ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
676
'''simple docstring''' # DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin, SchedulerOutput @dataclass class __UpperCamelCase ( lowerCamelCase__ ): lowercase : torch.FloatTensor lowercase : torch.FloatTensor class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): lowercase : Tuple =1 @register_to_config def __init__( self, lowerCAmelCase = 2_000, lowerCAmelCase = 0.1_5, lowerCAmelCase = 0.0_1, lowerCAmelCase = 1_3_4_8.0, lowerCAmelCase = 1e-5, lowerCAmelCase = 1, ): """simple docstring""" lowerCamelCase_ =sigma_max # setable values lowerCamelCase_ =None self.set_sigmas(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ): """simple docstring""" return sample def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None ): """simple docstring""" lowerCamelCase_ =sampling_eps if sampling_eps is not None else self.config.sampling_eps lowerCamelCase_ =torch.linspace(1, lowerCAmelCase, lowerCAmelCase, device=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None ): """simple docstring""" lowerCamelCase_ =sigma_min if sigma_min is not None else self.config.sigma_min lowerCamelCase_ =sigma_max if sigma_max is not None else self.config.sigma_max lowerCamelCase_ =sampling_eps if sampling_eps is not None else self.config.sampling_eps if self.timesteps is None: self.set_timesteps(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps) lowerCamelCase_ =torch.exp(torch.linspace(math.log(lowerCAmelCase ), math.log(lowerCAmelCase ), lowerCAmelCase ) ) lowerCamelCase_ =torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" return torch.where( timesteps == 0, torch.zeros_like(t.to(timesteps.device ) ), self.discrete_sigmas[timesteps - 1].to(timesteps.device ), ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = True, ): """simple docstring""" if self.timesteps is None: raise ValueError( '''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' ) lowerCamelCase_ =timestep * torch.ones( sample.shape[0], device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0]) lowerCamelCase_ =(timestep * (len(self.timesteps ) - 1)).long() # mps requires indices to be in the same device, so we use cpu as is the default with cuda lowerCamelCase_ =timesteps.to(self.discrete_sigmas.device ) lowerCamelCase_ =self.discrete_sigmas[timesteps].to(sample.device ) lowerCamelCase_ =self.get_adjacent_sigma(lowerCAmelCase, lowerCAmelCase ).to(sample.device ) lowerCamelCase_ =torch.zeros_like(lowerCAmelCase ) lowerCamelCase_ =(sigma**2 - adjacent_sigma**2) ** 0.5 # equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x) # also equation 47 shows the analog from SDE models to ancestral sampling methods lowerCamelCase_ =diffusion.flatten() while len(diffusion.shape ) < len(sample.shape ): lowerCamelCase_ =diffusion.unsqueeze(-1 ) lowerCamelCase_ =drift - diffusion**2 * model_output # equation 6: sample noise for the diffusion term of lowerCamelCase_ =randn_tensor( sample.shape, layout=sample.layout, generator=lowerCAmelCase, device=sample.device, dtype=sample.dtype ) lowerCamelCase_ =sample - drift # subtract because `dt` is a small negative timestep # TODO is the variable diffusion the correct scaling term for the noise? lowerCamelCase_ =prev_sample_mean + diffusion * noise # add impact of diffusion field g if not return_dict: return (prev_sample, prev_sample_mean) return SdeVeOutput(prev_sample=lowerCAmelCase, prev_sample_mean=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = True, ): """simple docstring""" if self.timesteps is None: raise ValueError( '''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' ) # For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z" # sample noise for correction lowerCamelCase_ =randn_tensor(sample.shape, layout=sample.layout, generator=lowerCAmelCase ).to(sample.device ) # compute step size from the model_output, the noise, and the snr lowerCamelCase_ =torch.norm(model_output.reshape(model_output.shape[0], -1 ), dim=-1 ).mean() lowerCamelCase_ =torch.norm(noise.reshape(noise.shape[0], -1 ), dim=-1 ).mean() lowerCamelCase_ =(self.config.snr * noise_norm / grad_norm) ** 2 * 2 lowerCamelCase_ =step_size * torch.ones(sample.shape[0] ).to(sample.device ) # self.repeat_scalar(step_size, sample.shape[0]) # compute corrected sample: model_output term and noise term lowerCamelCase_ =step_size.flatten() while len(step_size.shape ) < len(sample.shape ): lowerCamelCase_ =step_size.unsqueeze(-1 ) lowerCamelCase_ =sample + step_size * model_output lowerCamelCase_ =prev_sample_mean + ((step_size * 2) ** 0.5) * noise if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =timesteps.to(original_samples.device ) lowerCamelCase_ =self.discrete_sigmas.to(original_samples.device )[timesteps] lowerCamelCase_ =( noise * sigmas[:, None, None, None] if noise is not None else torch.randn_like(lowerCAmelCase ) * sigmas[:, None, None, None] ) lowerCamelCase_ =noise + original_samples return noisy_samples def __len__( self ): """simple docstring""" return self.config.num_train_timesteps
676
1
'''simple docstring''' import os import re import shutil from argparse import ArgumentParser, Namespace from datasets.commands import BaseDatasetsCLICommand from datasets.utils.logging import get_logger a_ : List[Any] = """<<<<<<< This should probably be modified because it mentions: """ a_ : List[str] = """======= >>>>>>> """ a_ : int = [ """TextEncoderConfig""", """ByteTextEncoder""", """SubwordTextEncoder""", """encoder_config""", """maybe_build_from_corpus""", """manual_dir""", ] a_ : int = [ # (pattern, replacement) # Order is important here for some replacements (R"""tfds\.core""", R"""datasets"""), (R"""tf\.io\.gfile\.GFile""", R"""open"""), (R"""tf\.([\w\d]+)""", R"""datasets.Value('\1')"""), (R"""tfds\.features\.Text\(\)""", R"""datasets.Value('string')"""), (R"""tfds\.features\.Text\(""", R"""datasets.Value('string'),"""), (R"""features\s*=\s*tfds.features.FeaturesDict\(""", R"""features=datasets.Features("""), (R"""tfds\.features\.FeaturesDict\(""", R"""dict("""), (R"""The TensorFlow Datasets Authors""", R"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""), (R"""tfds\.""", R"""datasets."""), (R"""dl_manager\.manual_dir""", R"""self.config.data_dir"""), (R"""self\.builder_config""", R"""self.config"""), ] def a_ ( __snake_case : Namespace ) -> int: """simple docstring""" return ConvertCommand(args.tfds_path , args.datasets_directory ) class __UpperCamelCase ( lowerCamelCase__ ): @staticmethod def lowercase__ ( lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =parser.add_parser( '''convert''', help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''', ) train_parser.add_argument( '''--tfds_path''', type=lowerCAmelCase, required=lowerCAmelCase, help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''', ) train_parser.add_argument( '''--datasets_directory''', type=lowerCAmelCase, required=lowerCAmelCase, help='''Path to the HuggingFace Datasets folder.''' ) train_parser.set_defaults(func=lowerCAmelCase ) def __init__( self, lowerCAmelCase, lowerCAmelCase, *lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =get_logger('''datasets-cli/converting''' ) lowerCamelCase_ =tfds_path lowerCamelCase_ =datasets_directory def lowercase__ ( self ): """simple docstring""" if os.path.isdir(self._tfds_path ): lowerCamelCase_ =os.path.abspath(self._tfds_path ) elif os.path.isfile(self._tfds_path ): lowerCamelCase_ =os.path.dirname(self._tfds_path ) else: raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' ) lowerCamelCase_ =os.path.abspath(self._datasets_directory ) self._logger.info(f'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' ) lowerCamelCase_ =[] lowerCamelCase_ =[] lowerCamelCase_ ={} if os.path.isdir(self._tfds_path ): lowerCamelCase_ =os.listdir(lowerCAmelCase ) else: lowerCamelCase_ =[os.path.basename(self._tfds_path )] for f_name in file_names: self._logger.info(f'''Looking at file {f_name}''' ) lowerCamelCase_ =os.path.join(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =os.path.join(lowerCAmelCase, lowerCAmelCase ) if not os.path.isfile(lowerCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name: self._logger.info('''Skipping file''' ) continue with open(lowerCAmelCase, encoding='''utf-8''' ) as f: lowerCamelCase_ =f.readlines() lowerCamelCase_ =[] lowerCamelCase_ =False lowerCamelCase_ =False lowerCamelCase_ =[] for line in lines: lowerCamelCase_ =line # Convert imports if "import tensorflow.compat.v2 as tf" in out_line: continue elif "@tfds.core" in out_line: continue elif "builder=self" in out_line: continue elif "import tensorflow_datasets.public_api as tfds" in out_line: lowerCamelCase_ ='''import datasets\n''' elif "import tensorflow" in out_line: # order is important here lowerCamelCase_ ='''''' continue elif "from absl import logging" in out_line: lowerCamelCase_ ='''from datasets import logging\n''' elif "getLogger" in out_line: lowerCamelCase_ =out_line.replace('''getLogger''', '''get_logger''' ) elif any(expression in out_line for expression in TO_HIGHLIGHT ): lowerCamelCase_ =True lowerCamelCase_ =list(filter(lambda lowerCAmelCase : e in out_line, lowerCAmelCase ) ) out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCAmelCase ) + '''\n''' ) out_lines.append(lowerCAmelCase ) out_lines.append(lowerCAmelCase ) continue else: for pattern, replacement in TO_CONVERT: lowerCamelCase_ =re.sub(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) # Take care of saving utilities (to later move them together with main script) if "tensorflow_datasets" in out_line: lowerCamelCase_ =re.match(R'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''', lowerCAmelCase ) tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) ) lowerCamelCase_ ='''from . import ''' + match.group(1 ) # Check we have not forget anything if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line: raise ValueError(f'''Error converting {out_line.strip()}''' ) if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line: lowerCamelCase_ =True out_lines.append(lowerCAmelCase ) if is_builder or "wmt" in f_name: # We create a new directory for each dataset lowerCamelCase_ =f_name.replace('''.py''', '''''' ) lowerCamelCase_ =os.path.join(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =os.path.join(lowerCAmelCase, lowerCAmelCase ) os.makedirs(lowerCAmelCase, exist_ok=lowerCAmelCase ) self._logger.info(f'''Adding directory {output_dir}''' ) imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} ) else: # Utilities will be moved at the end utils_files.append(lowerCAmelCase ) if needs_manual_update: with_manual_update.append(lowerCAmelCase ) with open(lowerCAmelCase, '''w''', encoding='''utf-8''' ) as f: f.writelines(lowerCAmelCase ) self._logger.info(f'''Converted in {output_file}''' ) for utils_file in utils_files: try: lowerCamelCase_ =os.path.basename(lowerCAmelCase ) lowerCamelCase_ =imports_to_builder_map[f_name.replace('''.py''', '''''' )] self._logger.info(f'''Moving {dest_folder} to {utils_file}''' ) shutil.copy(lowerCAmelCase, lowerCAmelCase ) except KeyError: self._logger.error(f'''Cannot find destination folder for {utils_file}. Please copy manually.''' ) if with_manual_update: for file_path in with_manual_update: self._logger.warning( f'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
676
'''simple docstring''' def a_ ( __snake_case : int , __snake_case : int ) -> str: """simple docstring""" if not isinstance(__snake_case , __snake_case ): raise ValueError('''iterations must be defined as integers''' ) if not isinstance(__snake_case , __snake_case ) or not number >= 1: raise ValueError( '''starting number must be and integer and be more than 0''' ) if not iterations >= 1: raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' ) lowerCamelCase_ ='''''' while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(__snake_case ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
676
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase : List[str] =CycleDiffusionPipeline lowercase : Optional[int] =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { 'negative_prompt', 'height', 'width', 'negative_prompt_embeds', } lowercase : str =PipelineTesterMixin.required_optional_params - {'latents'} lowercase : Any =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'source_prompt'} ) lowercase : Dict =IMAGE_TO_IMAGE_IMAGE_PARAMS lowercase : List[str] =IMAGE_TO_IMAGE_IMAGE_PARAMS def lowercase__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCamelCase_ =UNetaDConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D'''), up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D'''), cross_attention_dim=32, ) lowerCamelCase_ =DDIMScheduler( beta_start=0.0_0_0_8_5, beta_end=0.0_1_2, beta_schedule='''scaled_linear''', num_train_timesteps=1_000, clip_sample=lowerCAmelCase, set_alpha_to_one=lowerCAmelCase, ) torch.manual_seed(0 ) lowerCamelCase_ =AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], latent_channels=4, ) torch.manual_seed(0 ) lowerCamelCase_ =CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, ) lowerCamelCase_ =CLIPTextModel(lowerCAmelCase ) lowerCamelCase_ =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) lowerCamelCase_ ={ '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=0 ): """simple docstring""" lowerCamelCase_ =floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase ) lowerCamelCase_ =image / 2 + 0.5 if str(lowerCAmelCase ).startswith('''mps''' ): lowerCamelCase_ =torch.manual_seed(lowerCAmelCase ) else: lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase ) lowerCamelCase_ ={ '''prompt''': '''An astronaut riding an elephant''', '''source_prompt''': '''An astronaut riding a horse''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 2, '''eta''': 0.1, '''strength''': 0.8, '''guidance_scale''': 3, '''source_guidance_scale''': 1, '''output_type''': '''numpy''', } return inputs def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCamelCase_ =self.get_dummy_components() lowerCamelCase_ =CycleDiffusionPipeline(**lowerCAmelCase ) lowerCamelCase_ =pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase ) lowerCamelCase_ =pipe(**lowerCAmelCase ) lowerCamelCase_ =output.images lowerCamelCase_ =images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) lowerCamelCase_ =np.array([0.4_4_5_9, 0.4_9_4_3, 0.4_5_4_4, 0.6_6_4_3, 0.5_4_7_4, 0.4_3_2_7, 0.5_7_0_1, 0.5_9_5_9, 0.5_1_7_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @unittest.skipIf(torch_device != '''cuda''', '''This test requires a GPU''' ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.get_dummy_components() for name, module in components.items(): if hasattr(lowerCAmelCase, '''half''' ): lowerCamelCase_ =module.half() lowerCamelCase_ =CycleDiffusionPipeline(**lowerCAmelCase ) lowerCamelCase_ =pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase ) lowerCamelCase_ =pipe(**lowerCAmelCase ) lowerCamelCase_ =output.images lowerCamelCase_ =images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) lowerCamelCase_ =np.array([0.3_5_0_6, 0.4_5_4_3, 0.4_4_6, 0.4_5_7_5, 0.5_1_9_5, 0.4_1_5_5, 0.5_2_7_3, 0.5_1_8, 0.4_1_1_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @skip_mps def lowercase__ ( self ): """simple docstring""" return super().test_save_load_local() @unittest.skip('''non-deterministic pipeline''' ) def lowercase__ ( self ): """simple docstring""" return super().test_inference_batch_single_identical() @skip_mps def lowercase__ ( self ): """simple docstring""" return super().test_dict_tuple_outputs_equivalent() @skip_mps def lowercase__ ( self ): """simple docstring""" return super().test_save_load_optional_components() @skip_mps def lowercase__ ( self ): """simple docstring""" return super().test_attention_slicing_forward_pass() @slow @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): def lowercase__ ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/cycle-diffusion/black_colored_car.png''' ) lowerCamelCase_ =load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' ) lowerCamelCase_ =init_image.resize((512, 512) ) lowerCamelCase_ ='''CompVis/stable-diffusion-v1-4''' lowerCamelCase_ =DDIMScheduler.from_pretrained(lowerCAmelCase, subfolder='''scheduler''' ) lowerCamelCase_ =CycleDiffusionPipeline.from_pretrained( lowerCAmelCase, scheduler=lowerCAmelCase, safety_checker=lowerCAmelCase, torch_dtype=torch.floataa, revision='''fp16''' ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) pipe.enable_attention_slicing() lowerCamelCase_ ='''A black colored car''' lowerCamelCase_ ='''A blue colored car''' lowerCamelCase_ =torch.manual_seed(0 ) lowerCamelCase_ =pipe( prompt=lowerCAmelCase, source_prompt=lowerCAmelCase, image=lowerCAmelCase, num_inference_steps=100, eta=0.1, strength=0.8_5, guidance_scale=3, source_guidance_scale=1, generator=lowerCAmelCase, output_type='''np''', ) lowerCamelCase_ =output.images # the values aren't exactly equal, but the images look the same visually assert np.abs(image - expected_image ).max() < 5e-1 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/cycle-diffusion/black_colored_car.png''' ) lowerCamelCase_ =load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' ) lowerCamelCase_ =init_image.resize((512, 512) ) lowerCamelCase_ ='''CompVis/stable-diffusion-v1-4''' lowerCamelCase_ =DDIMScheduler.from_pretrained(lowerCAmelCase, subfolder='''scheduler''' ) lowerCamelCase_ =CycleDiffusionPipeline.from_pretrained(lowerCAmelCase, scheduler=lowerCAmelCase, safety_checker=lowerCAmelCase ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) pipe.enable_attention_slicing() lowerCamelCase_ ='''A black colored car''' lowerCamelCase_ ='''A blue colored car''' lowerCamelCase_ =torch.manual_seed(0 ) lowerCamelCase_ =pipe( prompt=lowerCAmelCase, source_prompt=lowerCAmelCase, image=lowerCAmelCase, num_inference_steps=100, eta=0.1, strength=0.8_5, guidance_scale=3, source_guidance_scale=1, generator=lowerCAmelCase, output_type='''np''', ) lowerCamelCase_ =output.images assert np.abs(image - expected_image ).max() < 2e-2
676
'''simple docstring''' from typing import List import numpy as np def a_ ( __snake_case : dict ) -> int: """simple docstring""" lowerCamelCase_ ={key: len(__snake_case ) for key, value in gen_kwargs.items() if isinstance(__snake_case , __snake_case )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( '''Sharding is ambiguous for this dataset: ''' + '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n''' + '''\n'''.join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() ) + '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, ''' + '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.''' ) ) lowerCamelCase_ =max(lists_lengths.values() , default=0 ) return max(1 , __snake_case ) def a_ ( __snake_case : int , __snake_case : int ) -> List[range]: """simple docstring""" lowerCamelCase_ =[] for group_idx in range(__snake_case ): lowerCamelCase_ =num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break lowerCamelCase_ =shards_indices_per_group[-1].stop if shards_indices_per_group else 0 lowerCamelCase_ =range(__snake_case , start + num_shards_to_add ) shards_indices_per_group.append(__snake_case ) return shards_indices_per_group def a_ ( __snake_case : dict , __snake_case : int ) -> List[dict]: """simple docstring""" lowerCamelCase_ =_number_of_shards_in_gen_kwargs(__snake_case ) if num_shards == 1: return [dict(__snake_case )] else: lowerCamelCase_ =_distribute_shards(num_shards=__snake_case , max_num_jobs=__snake_case ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(__snake_case , __snake_case ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(__snake_case ) ) ] def a_ ( __snake_case : List[dict] ) -> dict: """simple docstring""" return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , __snake_case ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def a_ ( __snake_case : np.random.Generator , __snake_case : dict ) -> dict: """simple docstring""" lowerCamelCase_ ={len(__snake_case ) for value in gen_kwargs.values() if isinstance(__snake_case , __snake_case )} lowerCamelCase_ ={} for size in list_sizes: lowerCamelCase_ =list(range(__snake_case ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes lowerCamelCase_ =dict(__snake_case ) for key, value in shuffled_kwargs.items(): if isinstance(__snake_case , __snake_case ): lowerCamelCase_ =[value[i] for i in indices_per_size[len(__snake_case )]] return shuffled_kwargs
676
1
'''simple docstring''' import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase : List[str] =StableUnCLIPPipeline lowercase : Union[str, Any] =TEXT_TO_IMAGE_PARAMS lowercase : Any =TEXT_TO_IMAGE_BATCH_PARAMS lowercase : str =TEXT_TO_IMAGE_IMAGE_PARAMS lowercase : Any =TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false lowercase : Union[str, Any] =False def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =32 lowerCamelCase_ =embedder_hidden_size # prior components torch.manual_seed(0 ) lowerCamelCase_ =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) torch.manual_seed(0 ) lowerCamelCase_ =CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=lowerCAmelCase, projection_dim=lowerCAmelCase, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, ) ) torch.manual_seed(0 ) lowerCamelCase_ =PriorTransformer( num_attention_heads=2, attention_head_dim=12, embedding_dim=lowerCAmelCase, num_layers=1, ) torch.manual_seed(0 ) lowerCamelCase_ =DDPMScheduler( variance_type='''fixed_small_log''', prediction_type='''sample''', num_train_timesteps=1_000, clip_sample=lowerCAmelCase, clip_sample_range=5.0, beta_schedule='''squaredcos_cap_v2''', ) # regular denoising components torch.manual_seed(0 ) lowerCamelCase_ =StableUnCLIPImageNormalizer(embedding_dim=lowerCAmelCase ) lowerCamelCase_ =DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' ) torch.manual_seed(0 ) lowerCamelCase_ =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) torch.manual_seed(0 ) lowerCamelCase_ =CLIPTextModel( CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=lowerCAmelCase, projection_dim=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, ) ) torch.manual_seed(0 ) lowerCamelCase_ =UNetaDConditionModel( sample_size=32, in_channels=4, out_channels=4, down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D'''), up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D'''), block_out_channels=(32, 64), attention_head_dim=(2, 4), class_embed_type='''projection''', projection_class_embeddings_input_dim=embedder_projection_dim * 2, cross_attention_dim=lowerCAmelCase, layers_per_block=1, upcast_attention=lowerCAmelCase, use_linear_projection=lowerCAmelCase, ) torch.manual_seed(0 ) lowerCamelCase_ =DDIMScheduler( beta_schedule='''scaled_linear''', beta_start=0.0_0_0_8_5, beta_end=0.0_1_2, prediction_type='''v_prediction''', set_alpha_to_one=lowerCAmelCase, steps_offset=1, ) torch.manual_seed(0 ) lowerCamelCase_ =AutoencoderKL() lowerCamelCase_ ={ # prior components '''prior_tokenizer''': prior_tokenizer, '''prior_text_encoder''': prior_text_encoder, '''prior''': prior, '''prior_scheduler''': prior_scheduler, # image noising components '''image_normalizer''': image_normalizer, '''image_noising_scheduler''': image_noising_scheduler, # regular denoising components '''tokenizer''': tokenizer, '''text_encoder''': text_encoder, '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, } return components def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=0 ): """simple docstring""" if str(lowerCAmelCase ).startswith('''mps''' ): lowerCamelCase_ =torch.manual_seed(lowerCAmelCase ) else: lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase ) lowerCamelCase_ ={ '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''prior_num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =torch_device == '''cpu''' self._test_attention_slicing_forward_pass(test_max_difference=lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =torch_device in ['''cpu''', '''mps'''] self._test_inference_batch_single_identical(test_max_difference=lowerCAmelCase ) @slow @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): def lowercase__ ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' ) lowerCamelCase_ =StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''', torch_dtype=torch.floataa ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() lowerCamelCase_ =torch.Generator(device='''cpu''' ).manual_seed(0 ) lowerCamelCase_ =pipe('''anime turle''', generator=lowerCAmelCase, output_type='''np''' ) lowerCamelCase_ =output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(lowerCAmelCase, lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowerCamelCase_ =StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''', torch_dtype=torch.floataa ) lowerCamelCase_ =pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() lowerCamelCase_ =pipe( '''anime turtle''', prior_num_inference_steps=2, num_inference_steps=2, output_type='''np''', ) lowerCamelCase_ =torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
676
'''simple docstring''' import argparse import logging import pickle import random import time import numpy as np from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO ) a_ : int = logging.getLogger(__name__) def a_ ( ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ =argparse.ArgumentParser( description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' ) parser.add_argument('''--file_path''' , type=__snake_case , default='''data/dump.txt''' , help='''The path to the data.''' ) parser.add_argument('''--tokenizer_type''' , type=__snake_case , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] ) parser.add_argument('''--tokenizer_name''' , type=__snake_case , default='''bert-base-uncased''' , help='''The tokenizer to use.''' ) parser.add_argument('''--dump_file''' , type=__snake_case , default='''data/dump''' , help='''The dump file prefix.''' ) lowerCamelCase_ =parser.parse_args() logger.info(F'''Loading Tokenizer ({args.tokenizer_name})''' ) if args.tokenizer_type == "bert": lowerCamelCase_ =BertTokenizer.from_pretrained(args.tokenizer_name ) lowerCamelCase_ =tokenizer.special_tokens_map['''cls_token'''] # `[CLS]` lowerCamelCase_ =tokenizer.special_tokens_map['''sep_token'''] # `[SEP]` elif args.tokenizer_type == "roberta": lowerCamelCase_ =RobertaTokenizer.from_pretrained(args.tokenizer_name ) lowerCamelCase_ =tokenizer.special_tokens_map['''cls_token'''] # `<s>` lowerCamelCase_ =tokenizer.special_tokens_map['''sep_token'''] # `</s>` elif args.tokenizer_type == "gpt2": lowerCamelCase_ =GPTaTokenizer.from_pretrained(args.tokenizer_name ) lowerCamelCase_ =tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>` lowerCamelCase_ =tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>` logger.info(F'''Loading text from {args.file_path}''' ) with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp: lowerCamelCase_ =fp.readlines() logger.info('''Start encoding''' ) logger.info(F'''{len(__snake_case )} examples to process.''' ) lowerCamelCase_ =[] lowerCamelCase_ =0 lowerCamelCase_ =1_0000 lowerCamelCase_ =time.time() for text in data: lowerCamelCase_ =F'''{bos} {text.strip()} {sep}''' lowerCamelCase_ =tokenizer.encode(__snake_case , add_special_tokens=__snake_case ) rslt.append(__snake_case ) iter += 1 if iter % interval == 0: lowerCamelCase_ =time.time() logger.info(F'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' ) lowerCamelCase_ =time.time() logger.info('''Finished binarization''' ) logger.info(F'''{len(__snake_case )} examples processed.''' ) lowerCamelCase_ =F'''{args.dump_file}.{args.tokenizer_name}.pickle''' lowerCamelCase_ =tokenizer.vocab_size if vocab_size < (1 << 16): lowerCamelCase_ =[np.uintaa(__snake_case ) for d in rslt] else: lowerCamelCase_ =[np.intaa(__snake_case ) for d in rslt] random.shuffle(rslt_ ) logger.info(F'''Dump to {dp_file}''' ) with open(__snake_case , '''wb''' ) as handle: pickle.dump(rslt_ , __snake_case , protocol=pickle.HIGHEST_PROTOCOL ) if __name__ == "__main__": main()
676
1
'''simple docstring''' # this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys a_ : List[Any] = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""") a_ : Union[str, Any] = subprocess.check_output(F"""git diff --name-only {fork_point_sha}""".split()).decode("""utf-8""").split() a_ : Union[str, Any] = """|""".join(sys.argv[1:]) a_ : Tuple = re.compile(RF"""^({joined_dirs}).*?\.py$""") a_ : Tuple = [x for x in modified_files if regex.match(x)] print(""" """.join(relevant_modified_files), end="""""")
676
'''simple docstring''' import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : str = logging.get_logger(__name__) a_ : int = { """RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""", } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : List[str] ='mvp' lowercase : List[str] =['past_key_values'] lowercase : Dict ={'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self, lowerCAmelCase=50_267, lowerCAmelCase=1_024, lowerCAmelCase=12, lowerCAmelCase=4_096, lowerCAmelCase=16, lowerCAmelCase=12, lowerCAmelCase=4_096, lowerCAmelCase=16, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase="gelu", lowerCAmelCase=1_024, lowerCAmelCase=0.1, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=0.0, lowerCAmelCase=False, lowerCAmelCase=True, lowerCAmelCase=1, lowerCAmelCase=0, lowerCAmelCase=2, lowerCAmelCase=True, lowerCAmelCase=2, lowerCAmelCase=2, lowerCAmelCase=False, lowerCAmelCase=100, lowerCAmelCase=800, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =vocab_size lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =d_model lowerCamelCase_ =encoder_ffn_dim lowerCamelCase_ =encoder_layers lowerCamelCase_ =encoder_attention_heads lowerCamelCase_ =decoder_ffn_dim lowerCamelCase_ =decoder_layers lowerCamelCase_ =decoder_attention_heads lowerCamelCase_ =dropout lowerCamelCase_ =attention_dropout lowerCamelCase_ =activation_dropout lowerCamelCase_ =activation_function lowerCamelCase_ =init_std lowerCamelCase_ =encoder_layerdrop lowerCamelCase_ =decoder_layerdrop lowerCamelCase_ =classifier_dropout lowerCamelCase_ =use_cache lowerCamelCase_ =encoder_layers lowerCamelCase_ =scale_embedding # scale factor will be sqrt(d_model) if True lowerCamelCase_ =use_prompt lowerCamelCase_ =prompt_length lowerCamelCase_ =prompt_mid_dim super().__init__( pad_token_id=lowerCAmelCase, bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, is_encoder_decoder=lowerCAmelCase, decoder_start_token_id=lowerCAmelCase, forced_eos_token_id=lowerCAmelCase, **lowerCAmelCase, ) if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''', lowerCAmelCase ): lowerCamelCase_ =self.bos_token_id warnings.warn( f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. ''' '''The config can simply be saved and uploaded again to be fixed.''' )
676
1
'''simple docstring''' import unittest from pathlib import Path from shutil import copyfile from transformers import SPIECE_UNDERLINE, is_sentencepiece_available from transformers.models.speech_to_text import SpeechaTextTokenizer from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin a_ : List[str] = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_sentencepiece_available(): import sentencepiece as sp a_ : Optional[Any] = 5 a_ : Union[str, Any] = 10 @require_sentencepiece @require_tokenizers class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ): lowercase : Union[str, Any] =SpeechaTextTokenizer lowercase : List[str] =False lowercase : List[Any] =True def lowercase__ ( self ): """simple docstring""" super().setUp() lowerCamelCase_ =sp.SentencePieceProcessor() spm_model.Load(lowerCAmelCase ) lowerCamelCase_ =['''<s>''', '''<pad>''', '''</s>''', '''<unk>'''] vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(lowerCAmelCase ) )] lowerCamelCase_ =dict(zip(lowerCAmelCase, range(len(lowerCAmelCase ) ) ) ) lowerCamelCase_ =Path(self.tmpdirname ) save_json(lowerCAmelCase, save_dir / VOCAB_FILES_NAMES['''vocab_file'''] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(lowerCAmelCase, save_dir / VOCAB_FILES_NAMES['''spm_file'''] ) lowerCamelCase_ =SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''<pad>''' lowerCamelCase_ =1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase ), lowerCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase ), lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0], '''<s>''' ) self.assertEqual(vocab_keys[1], '''<pad>''' ) self.assertEqual(vocab_keys[-1], '''j''' ) self.assertEqual(len(lowerCAmelCase ), 1_001 ) def lowercase__ ( self ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size, 1_001 ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) lowerCamelCase_ =tokenizer.tokenize('''This is a test''' ) self.assertListEqual(lowerCAmelCase, ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCAmelCase ), [289, 50, 14, 174, 386], ) lowerCamelCase_ =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( lowerCAmelCase, [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''], ) lowerCamelCase_ =tokenizer.convert_tokens_to_ids(lowerCAmelCase ) self.assertListEqual(lowerCAmelCase, [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] ) lowerCamelCase_ =tokenizer.convert_ids_to_tokens(lowerCAmelCase ) self.assertListEqual( lowerCAmelCase, [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''], ) @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ={'''input_ids''': [[3_791, 797, 31, 11, 64, 797, 31, 2_429, 433, 12, 1_176, 12, 20, 786, 915, 142, 2_413, 240, 37, 3_238, 797, 31, 11, 35, 93, 915, 142, 2_413, 240, 37, 5_540, 567, 1_276, 93, 37, 610, 40, 62, 455, 657, 1_042, 123, 780, 177, 37, 309, 241, 1_298, 514, 20, 292, 2_737, 114, 2_469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3_388, 511, 459, 4, 3_555, 40, 321, 302, 705, 4, 3_388, 511, 583, 326, 5, 5, 5, 62, 3_310, 560, 177, 2_680, 217, 1_508, 32, 31, 853, 418, 64, 583, 511, 1_605, 62, 35, 93, 560, 177, 2_680, 217, 1_508, 1_521, 64, 583, 511, 519, 62, 20, 1_515, 764, 20, 149, 261, 5_625, 7_972, 20, 5_540, 567, 1_276, 93, 3_925, 1_675, 11, 15, 802, 7_972, 576, 217, 1_508, 11, 35, 93, 1_253, 2_441, 15, 289, 652, 31, 416, 321, 3_842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2_681, 1_153, 3_434, 20, 5_540, 37, 567, 126, 1_253, 2_441, 3_376, 449, 210, 431, 1_563, 177, 767, 5_540, 11, 1_203, 472, 11, 2_953, 685, 285, 364, 706, 1_153, 20, 6_799, 20, 2_869, 20, 4_464, 126, 40, 2_429, 20, 1_040, 866, 2_664, 418, 20, 318, 20, 1_726, 186, 20, 265, 522, 35, 93, 2_191, 4_634, 20, 1_040, 12, 6_799, 15, 228, 2_356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_575, 2_666, 684, 1_582, 1_176, 12, 627, 149, 619, 20, 4_902, 563, 11, 20, 149, 261, 3_420, 2_356, 174, 142, 4_714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCAmelCase, model_name='''facebook/s2t-small-mustc-en-de-st''', revision='''a14f04cf0776c02f62a8cb800cf7909e15ea23ad''', ) @require_sentencepiece class __UpperCamelCase ( unittest.TestCase ): lowercase : Optional[Any] ='valhalla/s2t_mustc_multilinguial_medium' lowercase : str ='C\'est trop cool' lowercase : Dict ='Esto es genial' @classmethod def lowercase__ ( cls ): """simple docstring""" lowerCamelCase_ =SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name ) return cls def lowercase__ ( self ): """simple docstring""" self.assertEqual(self.tokenizer.lang_code_to_id['''pt'''], 4 ) self.assertEqual(self.tokenizer.lang_code_to_id['''ru'''], 6 ) self.assertEqual(self.tokenizer.lang_code_to_id['''it'''], 9 ) self.assertEqual(self.tokenizer.lang_code_to_id['''de'''], 11 ) def lowercase__ ( self ): """simple docstring""" self.assertEqual(self.tokenizer.vocab_size, 10_000 ) def lowercase__ ( self ): """simple docstring""" self.assertIn(lowerCAmelCase, self.tokenizer.all_special_ids ) lowerCamelCase_ =[ES_CODE, 4, 1_601, 47, 7_647, 2] lowerCamelCase_ =self.tokenizer.decode(lowerCAmelCase, skip_special_tokens=lowerCAmelCase ) lowerCamelCase_ =self.tokenizer.decode(generated_ids[1:], skip_special_tokens=lowerCAmelCase ) self.assertEqual(lowerCAmelCase, lowerCAmelCase ) self.assertNotIn(self.tokenizer.eos_token, lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''fr''' lowerCamelCase_ =self.tokenizer(self.french_text ).input_ids self.assertEqual(encoded[0], lowerCAmelCase ) self.assertEqual(encoded[-1], self.tokenizer.eos_token_id ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''fr''' self.assertListEqual(self.tokenizer.prefix_tokens, [FR_CODE] ) lowerCamelCase_ ='''es''' self.assertListEqual(self.tokenizer.prefix_tokens, [ES_CODE] )
676
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a_ : int = logging.get_logger(__name__) a_ : str = {"""vocab_file""": """spiece.model"""} a_ : Optional[int] = { """vocab_file""": { """bert_for_seq_generation""": ( """https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model""" ), } } a_ : List[Any] = {"""bert_for_seq_generation""": 5_12} class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[int] =VOCAB_FILES_NAMES lowercase : Optional[int] =PRETRAINED_VOCAB_FILES_MAP lowercase : Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : List[int] =[] lowercase : str =['input_ids', 'attention_mask'] def __init__( self, lowerCAmelCase, lowerCAmelCase="<s>", lowerCAmelCase="</s>", lowerCAmelCase="<unk>", lowerCAmelCase="<pad>", lowerCAmelCase="<::::>", lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ ={} if sp_model_kwargs is None else sp_model_kwargs # Add extra_ids to the special token list super().__init__( bos_token=lowerCAmelCase, eos_token=lowerCAmelCase, unk_token=lowerCAmelCase, pad_token=lowerCAmelCase, sep_token=lowerCAmelCase, sp_model_kwargs=self.sp_model_kwargs, **lowerCAmelCase, ) lowerCamelCase_ =vocab_file lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowerCAmelCase ) @property def lowercase__ ( self ): """simple docstring""" return self.sp_model.get_piece_size() def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ={self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): """simple docstring""" lowerCamelCase_ =self.__dict__.copy() lowerCamelCase_ =None return state def __setstate__( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =d # for backward compatibility if not hasattr(self, '''sp_model_kwargs''' ): lowerCamelCase_ ={} lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return self.sp_model.encode(lowerCAmelCase, out_type=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return self.sp_model.piece_to_id(lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.sp_model.IdToPiece(lowerCAmelCase ) return token def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =[] lowerCamelCase_ ='''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(lowerCAmelCase ) + token lowerCamelCase_ =[] else: current_sub_tokens.append(lowerCAmelCase ) out_string += self.sp_model.decode(lowerCAmelCase ) return out_string.strip() def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ): """simple docstring""" if not os.path.isdir(lowerCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCamelCase_ =os.path.join( lowerCAmelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file, lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(lowerCAmelCase, '''wb''' ) as fi: lowerCamelCase_ =self.sp_model.serialized_model_proto() fi.write(lowerCAmelCase ) return (out_vocab_file,)
676
1
'''simple docstring''' from __future__ import annotations import math from collections.abc import Callable def a_ ( __snake_case : Callable[[int | float], int | float] , __snake_case : int | float , __snake_case : int | float , __snake_case : int = 100 , ) -> float: """simple docstring""" lowerCamelCase_ =x_start lowerCamelCase_ =fnc(__snake_case ) lowerCamelCase_ =0.0 for _ in range(__snake_case ): # Approximates curve as a sequence of linear lines and sums their length lowerCamelCase_ =(x_end - x_start) / steps + xa lowerCamelCase_ =fnc(__snake_case ) length += math.hypot(xa - xa , fxa - fxa ) # Increment step lowerCamelCase_ =xa lowerCamelCase_ =fxa return length if __name__ == "__main__": def a_ ( __snake_case : Optional[Any] ) -> Optional[int]: """simple docstring""" return math.sin(10 * x ) print("""f(x) = sin(10 * x)""") print("""The length of the curve from x = -10 to x = 10 is:""") a_ : Tuple = 10 while i <= 10_00_00: print(F"""With {i} steps: {line_length(f, -10, 10, i)}""") i *= 10
676
'''simple docstring''' from collections.abc import Sequence def a_ ( __snake_case : Sequence[float] , __snake_case : float ) -> float: """simple docstring""" return sum(c * (x**i) for i, c in enumerate(__snake_case ) ) def a_ ( __snake_case : Sequence[float] , __snake_case : float ) -> float: """simple docstring""" lowerCamelCase_ =0.0 for coeff in reversed(__snake_case ): lowerCamelCase_ =result * x + coeff return result if __name__ == "__main__": a_ : Optional[int] = (0.0, 0.0, 5.0, 9.3, 7.0) a_ : Tuple = 10.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
676
1
'''simple docstring''' import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class __UpperCamelCase : def __init__( self, lowerCAmelCase, lowerCAmelCase=14, lowerCAmelCase=7, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=False, lowerCAmelCase=True, lowerCAmelCase=99, lowerCAmelCase=32, lowerCAmelCase=4, lowerCAmelCase=4, lowerCAmelCase=4, lowerCAmelCase=37, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=0.0_2, ): """simple docstring""" lowerCamelCase_ =parent lowerCamelCase_ =batch_size lowerCamelCase_ =seq_length lowerCamelCase_ =is_training lowerCamelCase_ =use_input_mask lowerCamelCase_ =use_token_type_ids lowerCamelCase_ =use_labels lowerCamelCase_ =vocab_size lowerCamelCase_ =hidden_size lowerCamelCase_ =rotary_dim lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =intermediate_size lowerCamelCase_ =hidden_act lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =attention_probs_dropout_prob lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =initializer_range lowerCamelCase_ =None lowerCamelCase_ =vocab_size - 1 lowerCamelCase_ =vocab_size - 1 lowerCamelCase_ =vocab_size - 1 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) lowerCamelCase_ =None if self.use_input_mask: lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase_ =GPTJConfig( vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, n_positions=self.max_position_embeddings, use_cache=lowerCAmelCase, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, rotary_dim=self.rotary_dim, ) return (config, input_ids, input_mask) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.prepare_config_and_inputs() lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =config_and_inputs lowerCamelCase_ ={'''input_ids''': input_ids, '''attention_mask''': attention_mask} return config, inputs_dict def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =20 lowerCamelCase_ =model_class_name(lowerCAmelCase ) lowerCamelCase_ =model.init_cache(input_ids.shape[0], lowerCAmelCase ) lowerCamelCase_ =jnp.ones((input_ids.shape[0], max_decoder_length), dtype='''i4''' ) lowerCamelCase_ =jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :], (input_ids.shape[0], input_ids.shape[-1] - 1) ) lowerCamelCase_ =model( input_ids[:, :-1], attention_mask=lowerCAmelCase, past_key_values=lowerCAmelCase, position_ids=lowerCAmelCase, ) lowerCamelCase_ =jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]], dtype='''i4''' ) lowerCamelCase_ =model( input_ids[:, -1:], attention_mask=lowerCAmelCase, past_key_values=outputs_cache.past_key_values, position_ids=lowerCAmelCase, ) lowerCamelCase_ =model(lowerCAmelCase ) lowerCamelCase_ =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3, msg=f'''Max diff is {diff}''' ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =20 lowerCamelCase_ =model_class_name(lowerCAmelCase ) lowerCamelCase_ =jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )], axis=-1, ) lowerCamelCase_ =model.init_cache(input_ids.shape[0], lowerCAmelCase ) lowerCamelCase_ =jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :], (input_ids.shape[0], input_ids.shape[-1] - 1) ) lowerCamelCase_ =model( input_ids[:, :-1], attention_mask=lowerCAmelCase, past_key_values=lowerCAmelCase, position_ids=lowerCAmelCase, ) lowerCamelCase_ =jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]], dtype='''i4''' ) lowerCamelCase_ =model( input_ids[:, -1:], past_key_values=outputs_cache.past_key_values, attention_mask=lowerCAmelCase, position_ids=lowerCAmelCase, ) lowerCamelCase_ =model(lowerCAmelCase, attention_mask=lowerCAmelCase ) lowerCamelCase_ =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3, msg=f'''Max diff is {diff}''' ) @require_flax class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase : str =(FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () lowercase : Optional[Any] =(FlaxGPTJForCausalLM,) if is_flax_available() else () def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =FlaxGPTJModelTester(self ) def lowercase__ ( self ): """simple docstring""" for model_class_name in self.all_model_classes: lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" for model_class_name in self.all_model_classes: lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) @tooslow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =GPTaTokenizer.from_pretrained('''gpt2''', pad_token='''<|endoftext|>''', padding_side='''left''' ) lowerCamelCase_ =tokenizer(['''Hello this is a long string''', '''Hey'''], return_tensors='''np''', padding=lowerCAmelCase, truncation=lowerCAmelCase ) lowerCamelCase_ =FlaxGPTJForCausalLM.from_pretrained('''EleutherAI/gpt-j-6B''' ) lowerCamelCase_ =False lowerCamelCase_ =model.config.eos_token_id lowerCamelCase_ =jax.jit(model.generate ) lowerCamelCase_ =jit_generate( inputs['''input_ids'''], attention_mask=inputs['''attention_mask'''], pad_token_id=tokenizer.pad_token_id ).sequences lowerCamelCase_ =tokenizer.batch_decode(lowerCAmelCase, skip_special_tokens=lowerCAmelCase ) lowerCamelCase_ =[ '''Hello this is a long string of text.\n\nI\'m trying to get the text of the''', '''Hey, I\'m a little late to the party. I\'m going to''', ] self.assertListEqual(lowerCAmelCase, lowerCAmelCase ) @is_pt_flax_cross_test def lowercase__ ( self ): """simple docstring""" lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs lowerCamelCase_ =self._prepare_for_class(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ ={k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class lowerCamelCase_ =model_class.__name__[4:] # Skip the "Flax" at the beginning lowerCamelCase_ =getattr(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_, lowerCamelCase_ =pt_inputs['''input_ids'''].shape lowerCamelCase_ =np.random.randint(0, seq_length - 1, size=(batch_size,) ) for batch_idx, start_index in enumerate(lowerCAmelCase ): lowerCamelCase_ =0 lowerCamelCase_ =1 lowerCamelCase_ =0 lowerCamelCase_ =1 lowerCamelCase_ =pt_model_class(lowerCAmelCase ).eval() lowerCamelCase_ =model_class(lowerCAmelCase, dtype=jnp.floataa ) lowerCamelCase_ =convert_pytorch_state_dict_to_flax(pt_model.state_dict(), lowerCAmelCase ) lowerCamelCase_ =fx_state with torch.no_grad(): lowerCamelCase_ =pt_model(**lowerCAmelCase ).to_tuple() lowerCamelCase_ =fx_model(**lowerCAmelCase ).to_tuple() self.assertEqual(len(lowerCAmelCase ), len(lowerCAmelCase ), '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output in zip(lowerCAmelCase, lowerCAmelCase ): self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4e-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(lowerCAmelCase ) lowerCamelCase_ =model_class.from_pretrained(lowerCAmelCase, from_pt=lowerCAmelCase ) lowerCamelCase_ =fx_model_loaded(**lowerCAmelCase ).to_tuple() self.assertEqual( len(lowerCAmelCase ), len(lowerCAmelCase ), '''Output lengths differ between Flax and PyTorch''' ) for fx_output_loaded, pt_output in zip(lowerCAmelCase, lowerCAmelCase ): self.assert_almost_equals(fx_output_loaded[:, -1], pt_output[:, -1].numpy(), 4e-2 ) @is_pt_flax_cross_test def lowercase__ ( self ): """simple docstring""" lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs lowerCamelCase_ =self._prepare_for_class(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ ={k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class lowerCamelCase_ =model_class.__name__[4:] # Skip the "Flax" at the beginning lowerCamelCase_ =getattr(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =pt_model_class(lowerCAmelCase ).eval() lowerCamelCase_ =model_class(lowerCAmelCase, dtype=jnp.floataa ) lowerCamelCase_ =load_flax_weights_in_pytorch_model(lowerCAmelCase, fx_model.params ) lowerCamelCase_, lowerCamelCase_ =pt_inputs['''input_ids'''].shape lowerCamelCase_ =np.random.randint(0, seq_length - 1, size=(batch_size,) ) for batch_idx, start_index in enumerate(lowerCAmelCase ): lowerCamelCase_ =0 lowerCamelCase_ =1 lowerCamelCase_ =0 lowerCamelCase_ =1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): lowerCamelCase_ =pt_model(**lowerCAmelCase ).to_tuple() lowerCamelCase_ =fx_model(**lowerCAmelCase ).to_tuple() self.assertEqual(len(lowerCAmelCase ), len(lowerCAmelCase ), '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output in zip(lowerCAmelCase, lowerCAmelCase ): self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4e-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(lowerCAmelCase ) lowerCamelCase_ =pt_model_class.from_pretrained(lowerCAmelCase, from_flax=lowerCAmelCase ) with torch.no_grad(): lowerCamelCase_ =pt_model_loaded(**lowerCAmelCase ).to_tuple() self.assertEqual( len(lowerCAmelCase ), len(lowerCAmelCase ), '''Output lengths differ between Flax and PyTorch''' ) for fx_output, pt_output in zip(lowerCAmelCase, lowerCAmelCase ): self.assert_almost_equals(fx_output[:, -1], pt_output[:, -1].numpy(), 4e-2 ) @tooslow def lowercase__ ( self ): """simple docstring""" for model_class_name in self.all_model_classes: lowerCamelCase_ =model_class_name.from_pretrained('''EleutherAI/gpt-j-6B''' ) lowerCamelCase_ =model(np.ones((1, 1) ) ) self.assertIsNotNone(lowerCAmelCase )
676
'''simple docstring''' import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[int] =['image_processor', 'tokenizer'] lowercase : str ='CLIPImageProcessor' lowercase : Optional[Any] =('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast') def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''', lowerCAmelCase, ) lowerCamelCase_ =kwargs.pop('''feature_extractor''' ) lowerCamelCase_ =image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(lowerCAmelCase, lowerCAmelCase ) def __call__( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ): """simple docstring""" if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: lowerCamelCase_ =self.tokenizer(lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase ) if images is not None: lowerCamelCase_ =self.image_processor(lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase ) if text is not None and images is not None: lowerCamelCase_ =image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**lowerCAmelCase ), tensor_type=lowerCAmelCase ) def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase ) @property def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.tokenizer.model_input_names lowerCamelCase_ =self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
676
1
'''simple docstring''' from __future__ import annotations a_ : Dict = [ [-1, 0], # left [0, -1], # down [1, 0], # right [0, 1], # up ] def a_ ( __snake_case : list[list[int]] , __snake_case : list[int] , __snake_case : list[int] , __snake_case : int , __snake_case : list[list[int]] , ) -> tuple[list[list[int]], list[list[int]]]: """simple docstring""" lowerCamelCase_ =[ [0 for col in range(len(grid[0] ) )] for row in range(len(__snake_case ) ) ] # the reference grid lowerCamelCase_ =1 lowerCamelCase_ =[ [0 for col in range(len(grid[0] ) )] for row in range(len(__snake_case ) ) ] # the action grid lowerCamelCase_ =init[0] lowerCamelCase_ =init[1] lowerCamelCase_ =0 lowerCamelCase_ =g + heuristic[x][y] # cost from starting cell to destination cell lowerCamelCase_ =[[f, g, x, y]] lowerCamelCase_ =False # flag that is set when search is complete lowerCamelCase_ =False # flag set if we can't find expand while not found and not resign: if len(__snake_case ) == 0: raise ValueError('''Algorithm is unable to find solution''' ) else: # to choose the least costliest action so as to move closer to the goal cell.sort() cell.reverse() lowerCamelCase_ =cell.pop() lowerCamelCase_ =next_cell[2] lowerCamelCase_ =next_cell[3] lowerCamelCase_ =next_cell[1] if x == goal[0] and y == goal[1]: lowerCamelCase_ =True else: for i in range(len(__snake_case ) ): # to try out different valid actions lowerCamelCase_ =x + DIRECTIONS[i][0] lowerCamelCase_ =y + DIRECTIONS[i][1] if xa >= 0 and xa < len(__snake_case ) and ya >= 0 and ya < len(grid[0] ): if closed[xa][ya] == 0 and grid[xa][ya] == 0: lowerCamelCase_ =g + cost lowerCamelCase_ =ga + heuristic[xa][ya] cell.append([fa, ga, xa, ya] ) lowerCamelCase_ =1 lowerCamelCase_ =i lowerCamelCase_ =[] lowerCamelCase_ =goal[0] lowerCamelCase_ =goal[1] invpath.append([x, y] ) # we get the reverse path from here while x != init[0] or y != init[1]: lowerCamelCase_ =x - DIRECTIONS[action[x][y]][0] lowerCamelCase_ =y - DIRECTIONS[action[x][y]][1] lowerCamelCase_ =xa lowerCamelCase_ =ya invpath.append([x, y] ) lowerCamelCase_ =[] for i in range(len(__snake_case ) ): path.append(invpath[len(__snake_case ) - 1 - i] ) return path, action if __name__ == "__main__": a_ : Optional[Any] = [ [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 1, 0], ] a_ : Dict = [0, 0] # all coordinates are given in format [y,x] a_ : Union[str, Any] = [len(grid) - 1, len(grid[0]) - 1] a_ : Dict = 1 # the cost map which pushes the path closer to the goal a_ : Tuple = [[0 for row in range(len(grid[0]))] for col in range(len(grid))] for i in range(len(grid)): for j in range(len(grid[0])): a_ : Dict = abs(i - goal[0]) + abs(j - goal[1]) if grid[i][j] == 1: # added extra penalty in the heuristic map a_ : Union[str, Any] = 99 a_ , a_ : Any = search(grid, init, goal, cost, heuristic) print("""ACTION MAP""") for i in range(len(action)): print(action[i]) for i in range(len(path)): print(path[i])
676
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING a_ : Optional[Any] = logging.get_logger(__name__) @add_end_docstrings(lowerCamelCase__ ) class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" super().__init__(*lowerCAmelCase, **lowerCAmelCase ) requires_backends(self, '''vision''' ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING ) def lowercase__ ( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None ): """simple docstring""" lowerCamelCase_ ={} lowerCamelCase_ ={} if prompt is not None: lowerCamelCase_ =prompt if generate_kwargs is not None: lowerCamelCase_ =generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: lowerCamelCase_ ={} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( '''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,''' ''' please use only one''' ) lowerCamelCase_ =max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return super().__call__(lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ): """simple docstring""" lowerCamelCase_ =load_image(lowerCAmelCase ) if prompt is not None: if not isinstance(lowerCAmelCase, lowerCAmelCase ): raise ValueError( f'''Received an invalid text input, got - {type(lowerCAmelCase )} - but expected a single string. ''' '''Note also that one single text can be provided for conditional image to text generation.''' ) lowerCamelCase_ =self.model.config.model_type if model_type == "git": lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework ) lowerCamelCase_ =self.tokenizer(text=lowerCAmelCase, add_special_tokens=lowerCAmelCase ).input_ids lowerCamelCase_ =[self.tokenizer.cls_token_id] + input_ids lowerCamelCase_ =torch.tensor(lowerCAmelCase ).unsqueeze(0 ) model_inputs.update({'''input_ids''': input_ids} ) elif model_type == "pix2struct": lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, header_text=lowerCAmelCase, return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework ) lowerCamelCase_ =self.tokenizer(lowerCAmelCase, return_tensors=self.framework ) model_inputs.update(lowerCAmelCase ) else: raise ValueError(f'''Model type {model_type} does not support conditional text generation''' ) else: lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: lowerCamelCase_ =None return model_inputs def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ): """simple docstring""" if ( "input_ids" in model_inputs and isinstance(model_inputs['''input_ids'''], lowerCAmelCase ) and all(x is None for x in model_inputs['''input_ids'''] ) ): lowerCamelCase_ =None if generate_kwargs is None: lowerCamelCase_ ={} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. lowerCamelCase_ =model_inputs.pop(self.model.main_input_name ) lowerCamelCase_ =self.model.generate(lowerCAmelCase, **lowerCAmelCase, **lowerCAmelCase ) return model_outputs def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =[] for output_ids in model_outputs: lowerCamelCase_ ={ '''generated_text''': self.tokenizer.decode( lowerCAmelCase, skip_special_tokens=lowerCAmelCase, ) } records.append(lowerCAmelCase ) return records
676
1
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline else: from .pipeline_kandinsky import KandinskyPipeline from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput from .text_encoder import MultilingualCLIP
676
'''simple docstring''' import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def a_ ( __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Any ) -> str: """simple docstring""" # Initialise PyTorch model lowerCamelCase_ =BertConfig.from_json_file(__snake_case ) print(F'''Building PyTorch model from configuration: {config}''' ) lowerCamelCase_ =BertForPreTraining(__snake_case ) # Load weights from tf checkpoint load_tf_weights_in_bert(__snake_case , __snake_case , __snake_case ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , __snake_case ) if __name__ == "__main__": a_ : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--bert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained BERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) a_ : Optional[Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
676
1
'''simple docstring''' def a_ ( __snake_case : int , __snake_case : int ) -> str: """simple docstring""" if not isinstance(__snake_case , __snake_case ): raise ValueError('''iterations must be defined as integers''' ) if not isinstance(__snake_case , __snake_case ) or not number >= 1: raise ValueError( '''starting number must be and integer and be more than 0''' ) if not iterations >= 1: raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' ) lowerCamelCase_ ='''''' while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(__snake_case ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
676
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : Union[str, Any] = logging.get_logger(__name__) a_ : Optional[int] = { """BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""", # See all AltCLIP models at https://huggingface.co/models?filter=altclip } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[Any] ='altclip_text_model' def __init__( self, lowerCAmelCase=250_002, lowerCAmelCase=1_024, lowerCAmelCase=24, lowerCAmelCase=16, lowerCAmelCase=4_096, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=514, lowerCAmelCase=1, lowerCAmelCase=0.0_2, lowerCAmelCase=0.0_2, lowerCAmelCase=1e-05, lowerCAmelCase=1, lowerCAmelCase=0, lowerCAmelCase=2, lowerCAmelCase="absolute", lowerCAmelCase=True, lowerCAmelCase=768, **lowerCAmelCase, ): """simple docstring""" super().__init__(pad_token_id=lowerCAmelCase, bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, **lowerCAmelCase ) lowerCamelCase_ =vocab_size lowerCamelCase_ =hidden_size lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =hidden_act lowerCamelCase_ =intermediate_size lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =attention_probs_dropout_prob lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =type_vocab_size lowerCamelCase_ =initializer_range lowerCamelCase_ =initializer_factor lowerCamelCase_ =layer_norm_eps lowerCamelCase_ =position_embedding_type lowerCamelCase_ =use_cache lowerCamelCase_ =project_dim class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Dict ='altclip_vision_model' def __init__( self, lowerCAmelCase=768, lowerCAmelCase=3_072, lowerCAmelCase=512, lowerCAmelCase=12, lowerCAmelCase=12, lowerCAmelCase=3, lowerCAmelCase=224, lowerCAmelCase=32, lowerCAmelCase="quick_gelu", lowerCAmelCase=1e-5, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=1.0, **lowerCAmelCase, ): """simple docstring""" super().__init__(**lowerCAmelCase ) lowerCamelCase_ =hidden_size lowerCamelCase_ =intermediate_size lowerCamelCase_ =projection_dim lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =num_channels lowerCamelCase_ =patch_size lowerCamelCase_ =image_size lowerCamelCase_ =initializer_range lowerCamelCase_ =initializer_factor lowerCamelCase_ =attention_dropout lowerCamelCase_ =layer_norm_eps lowerCamelCase_ =hidden_act @classmethod def lowercase__ ( cls, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" cls._set_token_in_kwargs(lowerCAmelCase ) lowerCamelCase_, lowerCamelCase_ =cls.get_config_dict(lowerCAmelCase, **lowerCAmelCase ) # get the vision config dict if we are loading from AltCLIPConfig if config_dict.get('''model_type''' ) == "altclip": lowerCamelCase_ =config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls, '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(lowerCAmelCase, **lowerCAmelCase ) class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Dict ='altclip' lowercase : str =True def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=768, lowerCAmelCase=2.6_5_9_2, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =kwargs.pop('''text_config_dict''', lowerCAmelCase ) lowerCamelCase_ =kwargs.pop('''vision_config_dict''', lowerCAmelCase ) super().__init__(**lowerCAmelCase ) # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`. if text_config_dict is not None: if text_config is None: lowerCamelCase_ ={} # This is the complete result when using `text_config_dict`. lowerCamelCase_ =AltCLIPTextConfig(**lowerCAmelCase ).to_dict() # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different. for key, value in _text_config_dict.items(): if key in text_config and value != text_config[key] and key not in ["transformers_version"]: # If specified in `text_config_dict` if key in text_config_dict: lowerCamelCase_ =( f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. ''' f'''The value `text_config_dict["{key}"]` will be used instead.''' ) # If inferred from default argument values (just to be super careful) else: lowerCamelCase_ =( f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The ''' f'''value `text_config["{key}"]` will be overriden.''' ) logger.warning(lowerCAmelCase ) # Update all values in `text_config` with the ones in `_text_config_dict`. text_config.update(_text_config_dict ) if vision_config_dict is not None: if vision_config is None: lowerCamelCase_ ={} # This is the complete result when using `vision_config_dict`. lowerCamelCase_ =AltCLIPVisionConfig(**lowerCAmelCase ).to_dict() # convert keys to string instead of integer if "id2label" in _vision_config_dict: lowerCamelCase_ ={ str(lowerCAmelCase ): value for key, value in _vision_config_dict['''id2label'''].items() } # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different. for key, value in _vision_config_dict.items(): if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]: # If specified in `vision_config_dict` if key in vision_config_dict: lowerCamelCase_ =( f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different ''' f'''values. The value `vision_config_dict["{key}"]` will be used instead.''' ) # If inferred from default argument values (just to be super careful) else: lowerCamelCase_ =( f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. ''' f'''The value `vision_config["{key}"]` will be overriden.''' ) logger.warning(lowerCAmelCase ) # Update all values in `vision_config` with the ones in `_vision_config_dict`. vision_config.update(_vision_config_dict ) if text_config is None: lowerCamelCase_ ={} logger.info('''`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.''' ) if vision_config is None: lowerCamelCase_ ={} logger.info('''`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.''' ) lowerCamelCase_ =AltCLIPTextConfig(**lowerCAmelCase ) lowerCamelCase_ =AltCLIPVisionConfig(**lowerCAmelCase ) lowerCamelCase_ =projection_dim lowerCamelCase_ =logit_scale_init_value lowerCamelCase_ =1.0 @classmethod def lowercase__ ( cls, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =copy.deepcopy(self.__dict__ ) lowerCamelCase_ =self.text_config.to_dict() lowerCamelCase_ =self.vision_config.to_dict() lowerCamelCase_ =self.__class__.model_type return output
676
1
'''simple docstring''' def a_ ( __snake_case : int ) -> bool: """simple docstring""" lowerCamelCase_ =n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
676
'''simple docstring''' import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, lowerCAmelCase, lowerCAmelCase=13, lowerCAmelCase=7, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=2, lowerCAmelCase=99, lowerCAmelCase=0, lowerCAmelCase=32, lowerCAmelCase=5, lowerCAmelCase=4, lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=12, lowerCAmelCase=2, lowerCAmelCase=0.0_2, lowerCAmelCase=3, lowerCAmelCase=4, lowerCAmelCase="last", lowerCAmelCase=None, lowerCAmelCase=None, ): """simple docstring""" lowerCamelCase_ =parent lowerCamelCase_ =batch_size lowerCamelCase_ =seq_length lowerCamelCase_ =is_training lowerCamelCase_ =use_input_lengths lowerCamelCase_ =use_token_type_ids lowerCamelCase_ =use_labels lowerCamelCase_ =gelu_activation lowerCamelCase_ =sinusoidal_embeddings lowerCamelCase_ =causal lowerCamelCase_ =asm lowerCamelCase_ =n_langs lowerCamelCase_ =vocab_size lowerCamelCase_ =n_special lowerCamelCase_ =hidden_size lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =attention_probs_dropout_prob lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =type_vocab_size lowerCamelCase_ =type_sequence_label_size lowerCamelCase_ =initializer_range lowerCamelCase_ =num_labels lowerCamelCase_ =num_choices lowerCamelCase_ =summary_type lowerCamelCase_ =use_proj lowerCamelCase_ =scope def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase_ =None if self.use_input_lengths: lowerCamelCase_ =( ids_tensor([self.batch_size], vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length lowerCamelCase_ =None if self.use_token_type_ids: lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.n_langs ) lowerCamelCase_ =None lowerCamelCase_ =None lowerCamelCase_ =None if self.use_labels: lowerCamelCase_ =ids_tensor([self.batch_size], self.type_sequence_label_size ) lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.num_labels ) lowerCamelCase_ =ids_tensor([self.batch_size], 2 ).float() lowerCamelCase_ =ids_tensor([self.batch_size], self.num_choices ) lowerCamelCase_ =self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def lowercase__ ( self ): """simple docstring""" return FlaubertConfig( vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =FlaubertModel(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase, lengths=lowerCAmelCase, langs=lowerCAmelCase ) lowerCamelCase_ =model(lowerCAmelCase, langs=lowerCAmelCase ) lowerCamelCase_ =model(lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =FlaubertWithLMHeadModel(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape, () ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =FlaubertForQuestionAnsweringSimple(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase ) lowerCamelCase_ =model(lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =FlaubertForQuestionAnswering(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase ) lowerCamelCase_ =model( lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase, cls_index=lowerCAmelCase, is_impossible=lowerCAmelCase, p_mask=lowerCAmelCase, ) lowerCamelCase_ =model( lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase, cls_index=lowerCAmelCase, is_impossible=lowerCAmelCase, ) ((lowerCamelCase_), ) =result_with_labels.to_tuple() lowerCamelCase_ =model(lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase ) ((lowerCamelCase_), ) =result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape, () ) self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =FlaubertForSequenceClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase ) lowerCamelCase_ =model(lowerCAmelCase, labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape, () ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =self.num_labels lowerCamelCase_ =FlaubertForTokenClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase, attention_mask=lowerCAmelCase, labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =self.num_choices lowerCamelCase_ =FlaubertForMultipleChoice(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() lowerCamelCase_ =token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() lowerCamelCase_ =input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() lowerCamelCase_ =model( lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.prepare_config_and_inputs() ( ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ) =config_and_inputs lowerCamelCase_ ={ '''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase : List[Any] =( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) lowercase : Tuple =( { 'feature-extraction': FlaubertModel, 'fill-mask': FlaubertWithLMHeadModel, 'question-answering': FlaubertForQuestionAnsweringSimple, 'text-classification': FlaubertForSequenceClassification, 'token-classification': FlaubertForTokenClassification, 'zero-shot': FlaubertForSequenceClassification, } if is_torch_available() else {} ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('''Fast''' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=False ): """simple docstring""" lowerCamelCase_ =super()._prepare_for_class(lowerCAmelCase, lowerCAmelCase, return_labels=lowerCAmelCase ) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": lowerCamelCase_ =torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase ) lowerCamelCase_ =torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase ) return inputs_dict def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =FlaubertModelTester(self ) lowerCamelCase_ =ConfigTester(self, config_class=lowerCAmelCase, emb_dim=37 ) def lowercase__ ( self ): """simple docstring""" self.config_tester.run_common_tests() def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*lowerCAmelCase ) @slow def lowercase__ ( self ): """simple docstring""" for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ =FlaubertModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) @slow @require_torch_gpu def lowercase__ ( self ): """simple docstring""" lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return lowerCamelCase_ =True lowerCamelCase_ =model_class(config=lowerCAmelCase ) lowerCamelCase_ =self._prepare_for_class(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =torch.jit.trace( lowerCAmelCase, (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(lowerCAmelCase, os.path.join(lowerCAmelCase, '''traced_model.pt''' ) ) lowerCamelCase_ =torch.jit.load(os.path.join(lowerCAmelCase, '''traced_model.pt''' ), map_location=lowerCAmelCase ) loaded(inputs_dict['''input_ids'''].to(lowerCAmelCase ), inputs_dict['''attention_mask'''].to(lowerCAmelCase ) ) @require_torch class __UpperCamelCase ( unittest.TestCase ): @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' ) lowerCamelCase_ =torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] ) with torch.no_grad(): lowerCamelCase_ =model(lowerCAmelCase )[0] lowerCamelCase_ =torch.Size((1, 11, 768) ) self.assertEqual(output.shape, lowerCAmelCase ) lowerCamelCase_ =torch.tensor( [[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], lowerCAmelCase, atol=1e-4 ) )
676
1
'''simple docstring''' from __future__ import annotations import os import tempfile import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import is_tensorflow_text_available, is_tf_available from transformers.testing_utils import require_tensorflow_text, require_tf, slow from ..test_modeling_tf_common import floats_tensor from .test_framework_agnostic import GenerationIntegrationTestsMixin if is_tf_available(): import tensorflow as tf from transformers import ( AutoTokenizer, TFAutoModelForCausalLM, TFAutoModelForSeqaSeqLM, TFAutoModelForSpeechSeqaSeq, TFAutoModelForVisionaSeq, TFBartForConditionalGeneration, TFLogitsProcessorList, TFMinLengthLogitsProcessor, tf_top_k_top_p_filtering, ) if is_tensorflow_text_available(): import tensorflow_text as text @require_tf class __UpperCamelCase ( unittest.TestCase ): def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =tf.convert_to_tensor( [ [ 8.2_2_2_0_9_9_1, # 3rd highest value; idx. 0 -0.5_6_2_0_0_4_4, 5.2_3_2_2_9_7_5_2, 4.0_3_8_6_3_9_3, -6.8_7_9_8_3_7_8, -0.5_4_7_8_5_8_0_2, -3.2_0_1_2_1_5_3, 2.9_2_7_7_7_1_7_6, 1.8_8_1_7_1_9_5_3, 7.3_5_3_4_1_2_7_6, # 5th highest value; idx. 9 8.4_3_2_0_7_8_3_3, # 2nd highest value; idx. 10 -9.8_5_7_1_1_8_3_6, -5.9_6_2_0_9_2_3_6, -1.1_3_0_3_9_1_6_1, -7.1_1_1_5_2_9_4, -0.8_3_6_9_6_3_3, -5.3_1_8_6_4_0_8, 7.0_6_4_2_7_4_0_7, 0.8_1_3_6_9_3_4_4, -0.8_2_0_2_3_8_1_7, -5.9_1_7_9_7_9_6, 0.5_8_8_1_3_4_4_3, -6.9_9_7_7_8_4_3_8, 4.7_1_5_5_1_1_8_9, -0.1_8_7_7_1_6_3_7, 7.4_4_0_2_0_7_5_9, # 4th highest value; idx. 25 9.3_8_4_5_0_9_8_7, # 1st highest value; idx. 26 2.1_2_6_6_2_9_4_1, -9.3_2_5_6_2_0_3_8, 2.3_5_6_5_2_5_2_2, ], # cummulative prob of 5 highest values <= 0.6 [ 0.5_8_4_2_5_5_1_8, 4.5_3_1_3_9_2_3_8, -5.5_7_5_1_0_4_6_4, -6.2_8_0_3_0_6_9_9, -7.1_9_5_2_9_5_0_3, -4.0_2_1_2_2_5_5_1, 1.3_9_3_3_7_0_3_7, -6.0_6_7_0_7_0_5_7, 1.5_9_4_8_0_5_1_7, -9.6_4_3_1_1_9, 0.0_3_9_0_7_7_9_9, 0.6_7_2_3_1_7_6_2, -8.8_8_2_0_6_7_2_6, 6.2_7_1_1_5_9_2_2, # 4th highest value; idx. 13 2.2_8_5_2_0_7_2_3, 4.8_2_7_6_7_5_0_6, 4.3_0_4_2_1_3_6_8, 8.8_2_7_5_3_1_3, # 2nd highest value; idx. 17 5.4_4_0_2_9_9_5_8, # 5th highest value; idx. 18 -4.4_7_3_5_7_9_4, 7.3_8_5_7_9_5_3_6, # 3rd highest value; idx. 20 -2.9_1_0_5_1_6_6_3, 2.6_1_9_4_6_0_7_7, -2.5_6_7_4_7_6_2, -9.4_8_9_5_9_3_0_2, -4.0_2_9_2_2_6_4_5, -1.3_5_4_1_6_9_1_8, 9.6_7_7_0_2_3_2_3, # 1st highest value; idx. 27 -5.8_9_4_7_8_5_5_3, 1.8_5_3_7_0_4_6_7, ], # cummulative prob of 5 highest values <= 0.6 ], dtype=tf.floataa, ) lowerCamelCase_ =tf.convert_to_tensor( [[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]], dtype=tf.intaa, ) # expected non filtered idx as noted above lowerCamelCase_ =tf.convert_to_tensor( [8.2_2_2_0_9_9, 7.3_5_3_4_1_2_6, 8.4_3_2_0_7_8, 7.4_4_0_2_0_7_5, 9.3_8_4_5_1, 6.2_7_1_1_5_9, 8.8_2_7_5_3_1, 5.4_4_0_2_9_9_5, 7.3_8_5_7_9_5_6, 9.6_7_7_0_2_3], dtype=tf.floataa, ) # expected non filtered values as noted above lowerCamelCase_ =tf_top_k_top_p_filtering(lowerCAmelCase, top_k=10, top_p=0.6, min_tokens_to_keep=4 ) lowerCamelCase_ =output[output != -float('''inf''' )] lowerCamelCase_ =tf.cast( tf.where(tf.not_equal(lowerCAmelCase, tf.constant(-float('''inf''' ), dtype=tf.floataa ) ) ), dtype=tf.intaa, ) tf.debugging.assert_near(lowerCAmelCase, lowerCAmelCase, rtol=1e-12 ) tf.debugging.assert_equal(lowerCAmelCase, lowerCAmelCase ) @require_tf class __UpperCamelCase ( unittest.TestCase , lowerCamelCase__ ): # setting framework_dependent_parameters needs to be gated, just like its contents' imports if is_tf_available(): lowercase : Union[str, Any] ={ 'AutoModelForCausalLM': TFAutoModelForCausalLM, 'AutoModelForSpeechSeq2Seq': TFAutoModelForSpeechSeqaSeq, 'AutoModelForSeq2SeqLM': TFAutoModelForSeqaSeqLM, 'AutoModelForVision2Seq': TFAutoModelForVisionaSeq, 'LogitsProcessorList': TFLogitsProcessorList, 'MinLengthLogitsProcessor': TFMinLengthLogitsProcessor, 'create_tensor_fn': tf.convert_to_tensor, 'floats_tensor': floats_tensor, 'return_tensors': 'tf', } @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowerCamelCase_ =2 lowerCamelCase_ =2 class __UpperCamelCase ( tf.Module ): def __init__( self, lowerCAmelCase ): """simple docstring""" super(lowerCAmelCase, self ).__init__() lowerCamelCase_ =model @tf.function( input_signature=( tf.TensorSpec((None, input_length), tf.intaa, name='''input_ids''' ), tf.TensorSpec((None, input_length), tf.intaa, name='''attention_mask''' ), ), jit_compile=lowerCAmelCase, ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.model.generate( input_ids=lowerCAmelCase, attention_mask=lowerCAmelCase, max_new_tokens=lowerCAmelCase, return_dict_in_generate=lowerCAmelCase, ) return {"sequences": outputs["sequences"]} lowerCamelCase_ =[[2, 0], [102, 103]] lowerCamelCase_ =[[1, 0], [1, 1]] lowerCamelCase_ =DummyModel(model=lowerCAmelCase ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(lowerCAmelCase, lowerCAmelCase, signatures={'''serving_default''': dummy_model.serving} ) lowerCamelCase_ =tf.saved_model.load(lowerCAmelCase ).signatures['''serving_default'''] for batch_size in range(1, len(lowerCAmelCase ) + 1 ): lowerCamelCase_ ={ '''input_ids''': tf.constant(dummy_input_ids[:batch_size] ), '''attention_mask''': tf.constant(dummy_attention_masks[:batch_size] ), } lowerCamelCase_ =serving_func(**lowerCAmelCase )['''sequences'''] lowerCamelCase_ =test_model.generate(**lowerCAmelCase, max_new_tokens=lowerCAmelCase ) tf.debugging.assert_equal(lowerCAmelCase, lowerCAmelCase ) @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowerCamelCase_ =1 lowerCamelCase_ =2 class __UpperCamelCase ( tf.Module ): def __init__( self, lowerCAmelCase ): """simple docstring""" super(lowerCAmelCase, self ).__init__() lowerCamelCase_ =model @tf.function( input_signature=( tf.TensorSpec((batch_size, None), tf.intaa, name='''input_ids''' ), tf.TensorSpec((batch_size, None), tf.intaa, name='''attention_mask''' ), ), jit_compile=lowerCAmelCase, ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.model.generate( input_ids=lowerCAmelCase, attention_mask=lowerCAmelCase, max_new_tokens=lowerCAmelCase, return_dict_in_generate=lowerCAmelCase, ) return {"sequences": outputs["sequences"]} lowerCamelCase_ =[[2], [102, 103]] lowerCamelCase_ =[[1], [1, 1]] lowerCamelCase_ =DummyModel(model=lowerCAmelCase ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(lowerCAmelCase, lowerCAmelCase, signatures={'''serving_default''': dummy_model.serving} ) lowerCamelCase_ =tf.saved_model.load(lowerCAmelCase ).signatures['''serving_default'''] for input_row in range(len(lowerCAmelCase ) ): lowerCamelCase_ ={ '''input_ids''': tf.constant([dummy_input_ids[input_row]] ), '''attention_mask''': tf.constant([dummy_attention_masks[input_row]] ), } lowerCamelCase_ =serving_func(**lowerCAmelCase )['''sequences'''] lowerCamelCase_ =test_model.generate(**lowerCAmelCase, max_new_tokens=lowerCAmelCase ) tf.debugging.assert_equal(lowerCAmelCase, lowerCAmelCase ) @slow @require_tensorflow_text def lowercase__ ( self ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: # file needed to load the TF tokenizer hf_hub_download(repo_id='''google/flan-t5-small''', filename='''spiece.model''', local_dir=lowerCAmelCase ) class __UpperCamelCase ( tf.keras.layers.Layer ): def __init__( self ): """simple docstring""" super().__init__() lowerCamelCase_ =text.SentencepieceTokenizer( model=tf.io.gfile.GFile(os.path.join(lowerCAmelCase, '''spiece.model''' ), '''rb''' ).read() ) lowerCamelCase_ =TFAutoModelForSeqaSeqLM.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) def lowercase__ ( self, lowerCAmelCase, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.tokenizer.tokenize(lowerCAmelCase ) lowerCamelCase_, lowerCamelCase_ =text.pad_model_inputs( lowerCAmelCase, max_seq_length=64, pad_value=self.model.config.pad_token_id ) lowerCamelCase_ =self.model.generate(input_ids=lowerCAmelCase, attention_mask=lowerCAmelCase ) return self.tokenizer.detokenize(lowerCAmelCase ) lowerCamelCase_ =CompleteSentenceTransformer() lowerCamelCase_ =tf.keras.layers.Input(shape=(1,), dtype=tf.string, name='''inputs''' ) lowerCamelCase_ =complete_model(lowerCAmelCase ) lowerCamelCase_ =tf.keras.Model(lowerCAmelCase, lowerCAmelCase ) keras_model.save(lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ={ '''do_sample''': True, '''num_beams''': 1, '''top_p''': 0.7, '''top_k''': 10, '''temperature''': 0.7, } lowerCamelCase_ =14 lowerCamelCase_ =AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowerCamelCase_ ='''Hello, my dog is cute and''' lowerCamelCase_ =tokenizer(lowerCAmelCase, return_tensors='''tf''' ) lowerCamelCase_ =TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowerCamelCase_ =638 # forces the generation to happen on CPU, to avoid GPU-related quirks with tf.device(''':/CPU:0''' ): tf.random.set_seed(0 ) lowerCamelCase_ =model.generate(**lowerCAmelCase, eos_token_id=lowerCAmelCase, **lowerCAmelCase ) self.assertTrue(expectation == len(generated_tokens[0] ) ) lowerCamelCase_ =[638, 198] with tf.device(''':/CPU:0''' ): tf.random.set_seed(0 ) lowerCamelCase_ =model.generate(**lowerCAmelCase, eos_token_id=lowerCAmelCase, **lowerCAmelCase ) self.assertTrue(expectation == len(generated_tokens[0] ) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bart''' ) lowerCamelCase_ ='''Hugging Face is a technology company based in New York and Paris.''' lowerCamelCase_ =bart_tokenizer(lowerCAmelCase, return_tensors='''tf''' ).input_ids lowerCamelCase_ =TFBartForConditionalGeneration.from_pretrained('''hf-internal-testing/tiny-random-bart''' ) lowerCamelCase_ =bart_model.generate(lowerCAmelCase ).numpy() class __UpperCamelCase ( lowerCamelCase__ ): def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None, **lowerCAmelCase ): """simple docstring""" return super().call(lowerCAmelCase, **lowerCAmelCase ) lowerCamelCase_ =FakeBart.from_pretrained('''hf-internal-testing/tiny-random-bart''' ) lowerCamelCase_ =bart_model.generate(lowerCAmelCase, foo='''bar''' ).numpy() self.assertTrue(np.array_equal(lowerCAmelCase, lowerCAmelCase ) ) class __UpperCamelCase ( bart_model.model.encoder.__class__ ): def lowercase__ ( self, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return super().call(lowerCAmelCase, **lowerCAmelCase ) lowerCamelCase_ =FakeEncoder(bart_model.config, bart_model.model.shared ) lowerCamelCase_ =fake_encoder # Normal generation still works (the output will be different because the encoder weights are different) lowerCamelCase_ =bart_model.generate(lowerCAmelCase ).numpy() with self.assertRaises(lowerCAmelCase ): # FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo" bart_model.generate(lowerCAmelCase, foo='''bar''' )
676
'''simple docstring''' import os from pickle import UnpicklingError from typing import Dict, Tuple import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict, unflatten_dict import transformers from .utils import logging a_ : List[Any] = logging.get_logger(__name__) def a_ ( __snake_case : Optional[int] , __snake_case : str , __snake_case : List[Any] , __snake_case : int=False ) -> List[str]: """simple docstring""" try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise if not is_sharded: lowerCamelCase_ =os.path.abspath(__snake_case ) logger.info(F'''Loading PyTorch weights from {pt_path}''' ) lowerCamelCase_ =torch.load(__snake_case , map_location='''cpu''' ) logger.info(F'''PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.''' ) lowerCamelCase_ =convert_pytorch_state_dict_to_flax(__snake_case , __snake_case ) else: # model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files lowerCamelCase_ =convert_pytorch_sharded_state_dict_to_flax(__snake_case , __snake_case ) return flax_state_dict def a_ ( __snake_case : Tuple[str] , __snake_case : np.ndarray , __snake_case : Dict[str, jnp.ndarray] , __snake_case : str , ) -> (Tuple[str], np.ndarray): """simple docstring""" def is_key_or_prefix_key_in_dict(__snake_case : Tuple[str] ) -> bool: return len(set(__snake_case ) & {key, (model_prefix,) + key} ) > 0 # layer norm lowerCamelCase_ =pt_tuple_key[:-1] + ('''scale''',) if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(__snake_case ): return renamed_pt_tuple_key, pt_tensor # batch norm layer mean lowerCamelCase_ =pt_tuple_key[:-1] + ('''mean''',) if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(__snake_case ): return renamed_pt_tuple_key, pt_tensor # batch norm layer var lowerCamelCase_ =pt_tuple_key[:-1] + ('''var''',) if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(__snake_case ): return renamed_pt_tuple_key, pt_tensor # embedding lowerCamelCase_ =pt_tuple_key[:-1] + ('''embedding''',) if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(__snake_case ): return renamed_pt_tuple_key, pt_tensor # conv layer lowerCamelCase_ =pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(__snake_case ): lowerCamelCase_ =pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowerCamelCase_ =pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(__snake_case ): lowerCamelCase_ =pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowerCamelCase_ =pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowerCamelCase_ =pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 lowerCamelCase_ =None if pt_tuple_key[-3::2] == ("parametrizations", "original0"): lowerCamelCase_ =pt_tuple_key[-2] + '''_g''' elif pt_tuple_key[-3::2] == ("parametrizations", "original1"): lowerCamelCase_ =pt_tuple_key[-2] + '''_v''' if name is not None: lowerCamelCase_ =pt_tuple_key[:-3] + (name,) return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def a_ ( __snake_case : Union[str, Any] , __snake_case : str ) -> str: """simple docstring""" # convert pytorch tensor to numpy lowerCamelCase_ ={k: v.numpy() for k, v in pt_state_dict.items()} lowerCamelCase_ =flax_model.base_model_prefix # use params dict if the model contains batch norm layers if "params" in flax_model.params: lowerCamelCase_ =flax_model.params['''params'''] else: lowerCamelCase_ =flax_model.params lowerCamelCase_ =flatten_dict(__snake_case ) # add batch_stats keys,values to dict if "batch_stats" in flax_model.params: lowerCamelCase_ =flatten_dict(flax_model.params['''batch_stats'''] ) random_flax_state_dict.update(__snake_case ) lowerCamelCase_ ={} lowerCamelCase_ =(model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) lowerCamelCase_ =(model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCamelCase_ =tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary lowerCamelCase_ =pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: lowerCamelCase_ =pt_tuple_key[1:] # Correctly rename weight parameters lowerCamelCase_, lowerCamelCase_ =rename_key_and_reshape_tensor( __snake_case , __snake_case , __snake_case , __snake_case ) # add model prefix if necessary lowerCamelCase_ =(model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: lowerCamelCase_ =(model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1] or "var" in flax_key[-1]: lowerCamelCase_ =jnp.asarray(__snake_case ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(__snake_case , __snake_case ) continue # also add unexpected weight so that warning is thrown lowerCamelCase_ =jnp.asarray(__snake_case ) else: # also add unexpected weight so that warning is thrown lowerCamelCase_ =jnp.asarray(__snake_case ) return unflatten_dict(__snake_case ) def a_ ( __snake_case : Union[str, Any] , __snake_case : List[Any] ) -> Optional[Any]: """simple docstring""" import torch # Load the index lowerCamelCase_ ={} for shard_file in shard_filenames: # load using msgpack utils lowerCamelCase_ =torch.load(__snake_case ) lowerCamelCase_ ={k: v.numpy() for k, v in pt_state_dict.items()} lowerCamelCase_ =flax_model.base_model_prefix # use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict if "batch_stats" in flax_model.params: lowerCamelCase_ =flax_model.params['''params'''] lowerCamelCase_ =flatten_dict(__snake_case ) random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) ) else: lowerCamelCase_ =flax_model.params lowerCamelCase_ =flatten_dict(__snake_case ) lowerCamelCase_ =(model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) lowerCamelCase_ =(model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCamelCase_ =tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary lowerCamelCase_ =pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: lowerCamelCase_ =pt_tuple_key[1:] # Correctly rename weight parameters lowerCamelCase_, lowerCamelCase_ =rename_key_and_reshape_tensor( __snake_case , __snake_case , __snake_case , __snake_case ) # add model prefix if necessary lowerCamelCase_ =(model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: lowerCamelCase_ =(model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1]: lowerCamelCase_ =jnp.asarray(__snake_case ) continue if "var" in flax_key[-1]: lowerCamelCase_ =jnp.asarray(__snake_case ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(__snake_case , __snake_case ) continue # also add unexpected weight so that warning is thrown lowerCamelCase_ =jnp.asarray(__snake_case ) else: # also add unexpected weight so that warning is thrown lowerCamelCase_ =jnp.asarray(__snake_case ) return unflatten_dict(__snake_case ) def a_ ( __snake_case : List[str] , __snake_case : Dict ) -> str: """simple docstring""" lowerCamelCase_ =os.path.abspath(__snake_case ) logger.info(F'''Loading Flax weights from {flax_checkpoint_path}''' ) # import correct flax class lowerCamelCase_ =getattr(__snake_case , '''Flax''' + model.__class__.__name__ ) # load flax weight dict with open(__snake_case , '''rb''' ) as state_f: try: lowerCamelCase_ =from_bytes(__snake_case , state_f.read() ) except UnpicklingError: raise EnvironmentError(F'''Unable to convert {flax_checkpoint_path} to Flax deserializable object. ''' ) return load_flax_weights_in_pytorch_model(__snake_case , __snake_case ) def a_ ( __snake_case : Optional[Any] , __snake_case : Union[str, Any] ) -> Optional[int]: """simple docstring""" try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise # check if we have bf16 weights lowerCamelCase_ =flatten_dict(jax.tree_util.tree_map(lambda __snake_case : x.dtype == jnp.bfloataa , __snake_case ) ).values() if any(__snake_case ): # convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( '''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` ''' '''before loading those in PyTorch model.''' ) lowerCamelCase_ =jax.tree_util.tree_map( lambda __snake_case : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __snake_case ) lowerCamelCase_ =flatten_dict(__snake_case ) lowerCamelCase_ =pt_model.state_dict() lowerCamelCase_ =(pt_model.base_model_prefix in flax_state) and ( pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) lowerCamelCase_ =(pt_model.base_model_prefix not in flax_state) and ( pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) # keep track of unexpected & missing keys lowerCamelCase_ =[] lowerCamelCase_ =set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): lowerCamelCase_ =flax_key_tuple[0] == pt_model.base_model_prefix lowerCamelCase_ ='''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict # adapt flax_key to prepare for loading from/to base model only if load_model_with_head_into_base_model and has_base_model_prefix: lowerCamelCase_ =flax_key_tuple[1:] elif load_base_model_into_model_with_head and require_base_model_prefix: lowerCamelCase_ =(pt_model.base_model_prefix,) + flax_key_tuple # rename flax weights to PyTorch format if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(__snake_case ) not in pt_model_dict: # conv layer lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',) lowerCamelCase_ =jnp.transpose(__snake_case , (3, 2, 0, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(__snake_case ) not in pt_model_dict: # linear layer lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',) lowerCamelCase_ =flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',) # adding batch stats from flax batch norm to pt elif "mean" in flax_key_tuple[-1]: lowerCamelCase_ =flax_key_tuple[:-1] + ('''running_mean''',) elif "var" in flax_key_tuple[-1]: lowerCamelCase_ =flax_key_tuple[:-1] + ('''running_var''',) if "batch_stats" in flax_state: lowerCamelCase_ ='''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header else: lowerCamelCase_ ='''.'''.join(__snake_case ) # We also need to look at `pt_model_dict` and see if there are keys requiring further transformation. lowerCamelCase_ ={} # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 for key in pt_model_dict: lowerCamelCase_ =key.split('''.''' ) lowerCamelCase_ =None if key_components[-3::2] == ["parametrizations", "original0"]: lowerCamelCase_ =key_components[-2] + '''_g''' elif key_components[-3::2] == ["parametrizations", "original1"]: lowerCamelCase_ =key_components[-2] + '''_v''' if name is not None: lowerCamelCase_ =key_components[:-3] + [name] lowerCamelCase_ ='''.'''.join(__snake_case ) lowerCamelCase_ =key if flax_key in special_pt_names: lowerCamelCase_ =special_pt_names[flax_key] if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( F'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected ''' F'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) else: # add weight to pytorch dict lowerCamelCase_ =np.asarray(__snake_case ) if not isinstance(__snake_case , np.ndarray ) else flax_tensor lowerCamelCase_ =torch.from_numpy(__snake_case ) # remove from missing keys missing_keys.remove(__snake_case ) else: # weight is not expected by PyTorch model unexpected_keys.append(__snake_case ) pt_model.load_state_dict(__snake_case ) # re-transform missing_keys to list lowerCamelCase_ =list(__snake_case ) if len(__snake_case ) > 0: logger.warning( '''Some weights of the Flax model were not used when initializing the PyTorch model''' F''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing''' F''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture''' ''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This''' F''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect''' ''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a''' ''' FlaxBertForSequenceClassification model).''' ) else: logger.warning(F'''All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n''' ) if len(__snake_case ) > 0: logger.warning( F'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly''' F''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to''' ''' use it for predictions and inference.''' ) else: logger.warning( F'''All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n''' '''If your task is similar to the task the model of the checkpoint was trained on, ''' F'''you can already use {pt_model.__class__.__name__} for predictions without further training.''' ) return pt_model
676
1
'''simple docstring''' import os from pickle import UnpicklingError from typing import Dict, Tuple import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict, unflatten_dict import transformers from .utils import logging a_ : List[Any] = logging.get_logger(__name__) def a_ ( __snake_case : Optional[int] , __snake_case : str , __snake_case : List[Any] , __snake_case : int=False ) -> List[str]: """simple docstring""" try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise if not is_sharded: lowerCamelCase_ =os.path.abspath(__snake_case ) logger.info(F'''Loading PyTorch weights from {pt_path}''' ) lowerCamelCase_ =torch.load(__snake_case , map_location='''cpu''' ) logger.info(F'''PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.''' ) lowerCamelCase_ =convert_pytorch_state_dict_to_flax(__snake_case , __snake_case ) else: # model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files lowerCamelCase_ =convert_pytorch_sharded_state_dict_to_flax(__snake_case , __snake_case ) return flax_state_dict def a_ ( __snake_case : Tuple[str] , __snake_case : np.ndarray , __snake_case : Dict[str, jnp.ndarray] , __snake_case : str , ) -> (Tuple[str], np.ndarray): """simple docstring""" def is_key_or_prefix_key_in_dict(__snake_case : Tuple[str] ) -> bool: return len(set(__snake_case ) & {key, (model_prefix,) + key} ) > 0 # layer norm lowerCamelCase_ =pt_tuple_key[:-1] + ('''scale''',) if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(__snake_case ): return renamed_pt_tuple_key, pt_tensor # batch norm layer mean lowerCamelCase_ =pt_tuple_key[:-1] + ('''mean''',) if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(__snake_case ): return renamed_pt_tuple_key, pt_tensor # batch norm layer var lowerCamelCase_ =pt_tuple_key[:-1] + ('''var''',) if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(__snake_case ): return renamed_pt_tuple_key, pt_tensor # embedding lowerCamelCase_ =pt_tuple_key[:-1] + ('''embedding''',) if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(__snake_case ): return renamed_pt_tuple_key, pt_tensor # conv layer lowerCamelCase_ =pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(__snake_case ): lowerCamelCase_ =pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowerCamelCase_ =pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(__snake_case ): lowerCamelCase_ =pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowerCamelCase_ =pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowerCamelCase_ =pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 lowerCamelCase_ =None if pt_tuple_key[-3::2] == ("parametrizations", "original0"): lowerCamelCase_ =pt_tuple_key[-2] + '''_g''' elif pt_tuple_key[-3::2] == ("parametrizations", "original1"): lowerCamelCase_ =pt_tuple_key[-2] + '''_v''' if name is not None: lowerCamelCase_ =pt_tuple_key[:-3] + (name,) return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def a_ ( __snake_case : Union[str, Any] , __snake_case : str ) -> str: """simple docstring""" # convert pytorch tensor to numpy lowerCamelCase_ ={k: v.numpy() for k, v in pt_state_dict.items()} lowerCamelCase_ =flax_model.base_model_prefix # use params dict if the model contains batch norm layers if "params" in flax_model.params: lowerCamelCase_ =flax_model.params['''params'''] else: lowerCamelCase_ =flax_model.params lowerCamelCase_ =flatten_dict(__snake_case ) # add batch_stats keys,values to dict if "batch_stats" in flax_model.params: lowerCamelCase_ =flatten_dict(flax_model.params['''batch_stats'''] ) random_flax_state_dict.update(__snake_case ) lowerCamelCase_ ={} lowerCamelCase_ =(model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) lowerCamelCase_ =(model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCamelCase_ =tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary lowerCamelCase_ =pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: lowerCamelCase_ =pt_tuple_key[1:] # Correctly rename weight parameters lowerCamelCase_, lowerCamelCase_ =rename_key_and_reshape_tensor( __snake_case , __snake_case , __snake_case , __snake_case ) # add model prefix if necessary lowerCamelCase_ =(model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: lowerCamelCase_ =(model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1] or "var" in flax_key[-1]: lowerCamelCase_ =jnp.asarray(__snake_case ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(__snake_case , __snake_case ) continue # also add unexpected weight so that warning is thrown lowerCamelCase_ =jnp.asarray(__snake_case ) else: # also add unexpected weight so that warning is thrown lowerCamelCase_ =jnp.asarray(__snake_case ) return unflatten_dict(__snake_case ) def a_ ( __snake_case : Union[str, Any] , __snake_case : List[Any] ) -> Optional[Any]: """simple docstring""" import torch # Load the index lowerCamelCase_ ={} for shard_file in shard_filenames: # load using msgpack utils lowerCamelCase_ =torch.load(__snake_case ) lowerCamelCase_ ={k: v.numpy() for k, v in pt_state_dict.items()} lowerCamelCase_ =flax_model.base_model_prefix # use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict if "batch_stats" in flax_model.params: lowerCamelCase_ =flax_model.params['''params'''] lowerCamelCase_ =flatten_dict(__snake_case ) random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) ) else: lowerCamelCase_ =flax_model.params lowerCamelCase_ =flatten_dict(__snake_case ) lowerCamelCase_ =(model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) lowerCamelCase_ =(model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCamelCase_ =tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary lowerCamelCase_ =pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: lowerCamelCase_ =pt_tuple_key[1:] # Correctly rename weight parameters lowerCamelCase_, lowerCamelCase_ =rename_key_and_reshape_tensor( __snake_case , __snake_case , __snake_case , __snake_case ) # add model prefix if necessary lowerCamelCase_ =(model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: lowerCamelCase_ =(model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1]: lowerCamelCase_ =jnp.asarray(__snake_case ) continue if "var" in flax_key[-1]: lowerCamelCase_ =jnp.asarray(__snake_case ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(__snake_case , __snake_case ) continue # also add unexpected weight so that warning is thrown lowerCamelCase_ =jnp.asarray(__snake_case ) else: # also add unexpected weight so that warning is thrown lowerCamelCase_ =jnp.asarray(__snake_case ) return unflatten_dict(__snake_case ) def a_ ( __snake_case : List[str] , __snake_case : Dict ) -> str: """simple docstring""" lowerCamelCase_ =os.path.abspath(__snake_case ) logger.info(F'''Loading Flax weights from {flax_checkpoint_path}''' ) # import correct flax class lowerCamelCase_ =getattr(__snake_case , '''Flax''' + model.__class__.__name__ ) # load flax weight dict with open(__snake_case , '''rb''' ) as state_f: try: lowerCamelCase_ =from_bytes(__snake_case , state_f.read() ) except UnpicklingError: raise EnvironmentError(F'''Unable to convert {flax_checkpoint_path} to Flax deserializable object. ''' ) return load_flax_weights_in_pytorch_model(__snake_case , __snake_case ) def a_ ( __snake_case : Optional[Any] , __snake_case : Union[str, Any] ) -> Optional[int]: """simple docstring""" try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise # check if we have bf16 weights lowerCamelCase_ =flatten_dict(jax.tree_util.tree_map(lambda __snake_case : x.dtype == jnp.bfloataa , __snake_case ) ).values() if any(__snake_case ): # convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( '''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` ''' '''before loading those in PyTorch model.''' ) lowerCamelCase_ =jax.tree_util.tree_map( lambda __snake_case : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __snake_case ) lowerCamelCase_ =flatten_dict(__snake_case ) lowerCamelCase_ =pt_model.state_dict() lowerCamelCase_ =(pt_model.base_model_prefix in flax_state) and ( pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) lowerCamelCase_ =(pt_model.base_model_prefix not in flax_state) and ( pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) # keep track of unexpected & missing keys lowerCamelCase_ =[] lowerCamelCase_ =set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): lowerCamelCase_ =flax_key_tuple[0] == pt_model.base_model_prefix lowerCamelCase_ ='''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict # adapt flax_key to prepare for loading from/to base model only if load_model_with_head_into_base_model and has_base_model_prefix: lowerCamelCase_ =flax_key_tuple[1:] elif load_base_model_into_model_with_head and require_base_model_prefix: lowerCamelCase_ =(pt_model.base_model_prefix,) + flax_key_tuple # rename flax weights to PyTorch format if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(__snake_case ) not in pt_model_dict: # conv layer lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',) lowerCamelCase_ =jnp.transpose(__snake_case , (3, 2, 0, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(__snake_case ) not in pt_model_dict: # linear layer lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',) lowerCamelCase_ =flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',) # adding batch stats from flax batch norm to pt elif "mean" in flax_key_tuple[-1]: lowerCamelCase_ =flax_key_tuple[:-1] + ('''running_mean''',) elif "var" in flax_key_tuple[-1]: lowerCamelCase_ =flax_key_tuple[:-1] + ('''running_var''',) if "batch_stats" in flax_state: lowerCamelCase_ ='''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header else: lowerCamelCase_ ='''.'''.join(__snake_case ) # We also need to look at `pt_model_dict` and see if there are keys requiring further transformation. lowerCamelCase_ ={} # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 for key in pt_model_dict: lowerCamelCase_ =key.split('''.''' ) lowerCamelCase_ =None if key_components[-3::2] == ["parametrizations", "original0"]: lowerCamelCase_ =key_components[-2] + '''_g''' elif key_components[-3::2] == ["parametrizations", "original1"]: lowerCamelCase_ =key_components[-2] + '''_v''' if name is not None: lowerCamelCase_ =key_components[:-3] + [name] lowerCamelCase_ ='''.'''.join(__snake_case ) lowerCamelCase_ =key if flax_key in special_pt_names: lowerCamelCase_ =special_pt_names[flax_key] if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( F'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected ''' F'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) else: # add weight to pytorch dict lowerCamelCase_ =np.asarray(__snake_case ) if not isinstance(__snake_case , np.ndarray ) else flax_tensor lowerCamelCase_ =torch.from_numpy(__snake_case ) # remove from missing keys missing_keys.remove(__snake_case ) else: # weight is not expected by PyTorch model unexpected_keys.append(__snake_case ) pt_model.load_state_dict(__snake_case ) # re-transform missing_keys to list lowerCamelCase_ =list(__snake_case ) if len(__snake_case ) > 0: logger.warning( '''Some weights of the Flax model were not used when initializing the PyTorch model''' F''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing''' F''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture''' ''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This''' F''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect''' ''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a''' ''' FlaxBertForSequenceClassification model).''' ) else: logger.warning(F'''All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n''' ) if len(__snake_case ) > 0: logger.warning( F'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly''' F''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to''' ''' use it for predictions and inference.''' ) else: logger.warning( F'''All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n''' '''If your task is similar to the task the model of the checkpoint was trained on, ''' F'''you can already use {pt_model.__class__.__name__} for predictions without further training.''' ) return pt_model
676
'''simple docstring''' def a_ ( __snake_case : str , __snake_case : str ) -> str: """simple docstring""" lowerCamelCase_ =len(__snake_case ) lowerCamelCase_ =len(__snake_case ) lowerCamelCase_ =( first_str_length if first_str_length > second_str_length else second_str_length ) lowerCamelCase_ =[] for char_count in range(__snake_case ): if char_count < first_str_length: output_list.append(first_str[char_count] ) if char_count < second_str_length: output_list.append(second_str[char_count] ) return "".join(__snake_case ) if __name__ == "__main__": print(alternative_string_arrange("""AB""", """XYZ"""), end=""" """)
676
1
'''simple docstring''' import os from collections import namedtuple import pytest from datasets import ClassLabel, Features, Sequence, Value from datasets.commands.test import TestCommand from datasets.info import DatasetInfo, DatasetInfosDict a_ : Optional[Any] = namedtuple( """_TestCommandArgs""", [ """dataset""", """name""", """cache_dir""", """data_dir""", """all_configs""", """save_infos""", """ignore_verifications""", """force_redownload""", """clear_cache""", ], defaults=[None, None, None, False, False, False, False, False], ) def a_ ( __snake_case : Any , __snake_case : Any ) -> Optional[int]: """simple docstring""" return (abs(source - target ) / target) < 0.0_1 @pytest.mark.integration def a_ ( __snake_case : Union[str, Any] ) -> Optional[int]: """simple docstring""" lowerCamelCase_ =_TestCommandArgs(dataset=__snake_case , all_configs=__snake_case , save_infos=__snake_case ) lowerCamelCase_ =TestCommand(*__snake_case ) test_command.run() lowerCamelCase_ =os.path.join(__snake_case , '''README.md''' ) assert os.path.exists(__snake_case ) lowerCamelCase_ =DatasetInfosDict.from_directory(__snake_case ) lowerCamelCase_ =DatasetInfosDict( { '''default''': DatasetInfo( features=Features( { '''tokens''': Sequence(Value('''string''' ) ), '''ner_tags''': Sequence( ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ), '''langs''': Sequence(Value('''string''' ) ), '''spans''': Sequence(Value('''string''' ) ), } ) , splits=[ { '''name''': '''train''', '''num_bytes''': 235_1563, '''num_examples''': 1_0000, }, { '''name''': '''validation''', '''num_bytes''': 23_8418, '''num_examples''': 1000, }, ] , download_size=394_0680 , dataset_size=258_9981 , ) } ) assert dataset_infos.keys() == expected_dataset_infos.keys() for key in DatasetInfo._INCLUDED_INFO_IN_YAML: lowerCamelCase_, lowerCamelCase_ =getattr(dataset_infos['''default'''] , __snake_case ), getattr(expected_dataset_infos['''default'''] , __snake_case ) if key == "num_bytes": assert is_apercent_close(__snake_case , __snake_case ) elif key == "splits": assert list(__snake_case ) == list(__snake_case ) for split in result: assert result[split].name == expected[split].name assert result[split].num_examples == expected[split].num_examples assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes ) else: result == expected
676
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ : Any = {"""configuration_timm_backbone""": ["""TimmBackboneConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Optional[Any] = ["""TimmBackbone"""] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys a_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
676
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import XLMRobertaTokenizer from diffusers import ( AltDiffusionImgaImgPipeline, AutoencoderKL, PNDMScheduler, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesConfig, RobertaSeriesModelWithTransformation, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class __UpperCamelCase ( unittest.TestCase ): def lowercase__ ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() @property def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =1 lowerCamelCase_ =3 lowerCamelCase_ =(32, 32) lowerCamelCase_ =floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0 ) ).to(lowerCAmelCase ) return image @property def lowercase__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCamelCase_ =UNetaDConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D'''), up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D'''), cross_attention_dim=32, ) return model @property def lowercase__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCamelCase_ =AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], latent_channels=4, ) return model @property def lowercase__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCamelCase_ =RobertaSeriesConfig( hidden_size=32, project_dim=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=5_006, ) return RobertaSeriesModelWithTransformation(lowerCAmelCase ) @property def lowercase__ ( self ): """simple docstring""" def extract(*lowerCAmelCase, **lowerCAmelCase ): class __UpperCamelCase : def __init__( self ): """simple docstring""" lowerCamelCase_ =torch.ones([0] ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" self.pixel_values.to(lowerCAmelCase ) return self return Out() return extract def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCamelCase_ =self.dummy_cond_unet lowerCamelCase_ =PNDMScheduler(skip_prk_steps=lowerCAmelCase ) lowerCamelCase_ =self.dummy_vae lowerCamelCase_ =self.dummy_text_encoder lowerCamelCase_ =XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' ) lowerCamelCase_ =77 lowerCamelCase_ =self.dummy_image.to(lowerCAmelCase ) lowerCamelCase_ =init_image / 2 + 0.5 # make sure here that pndm scheduler skips prk lowerCamelCase_ =AltDiffusionImgaImgPipeline( unet=lowerCAmelCase, scheduler=lowerCAmelCase, vae=lowerCAmelCase, text_encoder=lowerCAmelCase, tokenizer=lowerCAmelCase, safety_checker=lowerCAmelCase, feature_extractor=self.dummy_extractor, ) lowerCamelCase_ =VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor, do_normalize=lowerCAmelCase ) lowerCamelCase_ =alt_pipe.to(lowerCAmelCase ) alt_pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ ='''A painting of a squirrel eating a burger''' lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(0 ) lowerCamelCase_ =alt_pipe( [prompt], generator=lowerCAmelCase, guidance_scale=6.0, num_inference_steps=2, output_type='''np''', image=lowerCAmelCase, ) lowerCamelCase_ =output.images lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(0 ) lowerCamelCase_ =alt_pipe( [prompt], generator=lowerCAmelCase, guidance_scale=6.0, num_inference_steps=2, output_type='''np''', image=lowerCAmelCase, return_dict=lowerCAmelCase, )[0] lowerCamelCase_ =image[0, -3:, -3:, -1] lowerCamelCase_ =image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCamelCase_ =np.array([0.4_4_2_7, 0.3_7_3_1, 0.4_2_4_9, 0.4_9_4_1, 0.4_5_4_6, 0.4_1_4_8, 0.4_1_9_3, 0.4_6_6_6, 0.4_4_9_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3 @unittest.skipIf(torch_device != '''cuda''', '''This test requires a GPU''' ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.dummy_cond_unet lowerCamelCase_ =PNDMScheduler(skip_prk_steps=lowerCAmelCase ) lowerCamelCase_ =self.dummy_vae lowerCamelCase_ =self.dummy_text_encoder lowerCamelCase_ =XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' ) lowerCamelCase_ =77 lowerCamelCase_ =self.dummy_image.to(lowerCAmelCase ) # put models in fp16 lowerCamelCase_ =unet.half() lowerCamelCase_ =vae.half() lowerCamelCase_ =bert.half() # make sure here that pndm scheduler skips prk lowerCamelCase_ =AltDiffusionImgaImgPipeline( unet=lowerCAmelCase, scheduler=lowerCAmelCase, vae=lowerCAmelCase, text_encoder=lowerCAmelCase, tokenizer=lowerCAmelCase, safety_checker=lowerCAmelCase, feature_extractor=self.dummy_extractor, ) lowerCamelCase_ =VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor, do_normalize=lowerCAmelCase ) lowerCamelCase_ =alt_pipe.to(lowerCAmelCase ) alt_pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ ='''A painting of a squirrel eating a burger''' lowerCamelCase_ =torch.manual_seed(0 ) lowerCamelCase_ =alt_pipe( [prompt], generator=lowerCAmelCase, num_inference_steps=2, output_type='''np''', image=lowerCAmelCase, ).images assert image.shape == (1, 32, 32, 3) @unittest.skipIf(torch_device != '''cuda''', '''This test requires a GPU''' ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) # resize to resolution that is divisible by 8 but not 16 or 32 lowerCamelCase_ =init_image.resize((760, 504) ) lowerCamelCase_ ='''BAAI/AltDiffusion''' lowerCamelCase_ =AltDiffusionImgaImgPipeline.from_pretrained( lowerCAmelCase, safety_checker=lowerCAmelCase, ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) pipe.enable_attention_slicing() lowerCamelCase_ ='''A fantasy landscape, trending on artstation''' lowerCamelCase_ =torch.manual_seed(0 ) lowerCamelCase_ =pipe( prompt=lowerCAmelCase, image=lowerCAmelCase, strength=0.7_5, guidance_scale=7.5, generator=lowerCAmelCase, output_type='''np''', ) lowerCamelCase_ =output.images[0] lowerCamelCase_ =image[255:258, 383:386, -1] assert image.shape == (504, 760, 3) lowerCamelCase_ =np.array([0.9_3_5_8, 0.9_3_9_7, 0.9_5_9_9, 0.9_9_0_1, 1.0_0_0_0, 1.0_0_0_0, 0.9_8_8_2, 1.0_0_0_0, 1.0_0_0_0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): def lowercase__ ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) lowerCamelCase_ =init_image.resize((768, 512) ) lowerCamelCase_ =load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''' ) lowerCamelCase_ ='''BAAI/AltDiffusion''' lowerCamelCase_ =AltDiffusionImgaImgPipeline.from_pretrained( lowerCAmelCase, safety_checker=lowerCAmelCase, ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) pipe.enable_attention_slicing() lowerCamelCase_ ='''A fantasy landscape, trending on artstation''' lowerCamelCase_ =torch.manual_seed(0 ) lowerCamelCase_ =pipe( prompt=lowerCAmelCase, image=lowerCAmelCase, strength=0.7_5, guidance_scale=7.5, generator=lowerCAmelCase, output_type='''np''', ) lowerCamelCase_ =output.images[0] assert image.shape == (512, 768, 3) # img2img is flaky across GPUs even in fp32, so using MAE here assert np.abs(expected_image - image ).max() < 1e-2
676
'''simple docstring''' import functools def a_ ( __snake_case : str , __snake_case : str ) -> int: """simple docstring""" lowerCamelCase_ =len(__snake_case ) lowerCamelCase_ =len(__snake_case ) @functools.cache def min_distance(__snake_case : int , __snake_case : int ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa lowerCamelCase_ =int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , __snake_case ) , 1 + min_distance(__snake_case , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
676
1
'''simple docstring''' def a_ ( __snake_case : int , __snake_case : list[int] , __snake_case : int ) -> int: """simple docstring""" def count_of_possible_combinations(__snake_case : int ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(__snake_case ) def a_ ( __snake_case : int , __snake_case : list[int] , __snake_case : int ) -> int: """simple docstring""" def count_of_possible_combinations_with_dp_array( __snake_case : int , __snake_case : list[int] ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] lowerCamelCase_ =sum( count_of_possible_combinations_with_dp_array(target - item , __snake_case ) for item in array ) lowerCamelCase_ =answer return answer lowerCamelCase_ =[-1] * (target + 1) return count_of_possible_combinations_with_dp_array(__snake_case , __snake_case ) def a_ ( __snake_case : int , __snake_case : list[int] , __snake_case : int ) -> int: """simple docstring""" lowerCamelCase_ =[0] * (target + 1) lowerCamelCase_ =1 for i in range(1 , target + 1 ): for j in range(__snake_case ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() a_ : Tuple = 3 a_ : int = 5 a_ : List[str] = [1, 2, 5] print(combination_sum_iv(n, array, target))
676
'''simple docstring''' def a_ ( __snake_case : int ) -> bool: """simple docstring""" if not isinstance(__snake_case , __snake_case ): lowerCamelCase_ =F'''Input value of [number={number}] must be an integer''' raise TypeError(__snake_case ) if number < 0: return False lowerCamelCase_ =number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
676
1
'''simple docstring''' import os import unittest from transformers import LxmertTokenizer, LxmertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ): lowercase : List[str] =LxmertTokenizer lowercase : Dict =LxmertTokenizerFast lowercase : int =True lowercase : str =True def lowercase__ ( self ): """simple docstring""" super().setUp() lowerCamelCase_ =[ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] lowerCamelCase_ =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ ='''UNwant\u00E9d,running''' lowerCamelCase_ ='''unwanted, running''' return input_text, output_text def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.tokenizer_class(self.vocab_file ) lowerCamelCase_ =tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(lowerCAmelCase, ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ), [7, 4, 5, 10, 8, 9] ) def lowercase__ ( self ): """simple docstring""" if not self.test_rust_tokenizer: return lowerCamelCase_ =self.get_tokenizer() lowerCamelCase_ =self.get_rust_tokenizer() lowerCamelCase_ ='''I was born in 92000, and this is falsé.''' lowerCamelCase_ =tokenizer.tokenize(lowerCAmelCase ) lowerCamelCase_ =rust_tokenizer.tokenize(lowerCAmelCase ) self.assertListEqual(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =tokenizer.encode(lowerCAmelCase, add_special_tokens=lowerCAmelCase ) lowerCamelCase_ =rust_tokenizer.encode(lowerCAmelCase, add_special_tokens=lowerCAmelCase ) self.assertListEqual(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =self.get_rust_tokenizer() lowerCamelCase_ =tokenizer.encode(lowerCAmelCase ) lowerCamelCase_ =rust_tokenizer.encode(lowerCAmelCase ) self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
676
'''simple docstring''' from __future__ import annotations a_ : int = list[list[int]] # assigning initial values to the grid a_ : Matrix = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution a_ : Matrix = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def a_ ( __snake_case : Matrix , __snake_case : int , __snake_case : int , __snake_case : int ) -> bool: """simple docstring""" for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def a_ ( __snake_case : Matrix ) -> tuple[int, int] | None: """simple docstring""" for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def a_ ( __snake_case : Matrix ) -> Matrix | None: """simple docstring""" if location := find_empty_location(__snake_case ): lowerCamelCase_, lowerCamelCase_ =location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(__snake_case , __snake_case , __snake_case , __snake_case ): lowerCamelCase_ =digit if sudoku(__snake_case ) is not None: return grid lowerCamelCase_ =0 return None def a_ ( __snake_case : Matrix ) -> None: """simple docstring""" for row in grid: for cell in row: print(__snake_case , end=''' ''' ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print("""\nExample grid:\n""" + """=""" * 20) print_solution(example_grid) print("""\nExample grid solution:""") a_ : Union[str, Any] = sudoku(example_grid) if solution is not None: print_solution(solution) else: print("""Cannot find a solution.""")
676
1
'''simple docstring''' import argparse import tensorflow as tf import torch from transformers import BertConfig, BertForMaskedLM from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertPooler, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging logging.set_verbosity_info() def a_ ( __snake_case : str , __snake_case : str , __snake_case : str ) -> Optional[Any]: """simple docstring""" def get_masked_lm_array(__snake_case : str ): lowerCamelCase_ =F'''masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE''' lowerCamelCase_ =tf.train.load_variable(__snake_case , __snake_case ) if "kernel" in name: lowerCamelCase_ =array.transpose() return torch.from_numpy(__snake_case ) def get_encoder_array(__snake_case : str ): lowerCamelCase_ =F'''encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE''' lowerCamelCase_ =tf.train.load_variable(__snake_case , __snake_case ) if "kernel" in name: lowerCamelCase_ =array.transpose() return torch.from_numpy(__snake_case ) def get_encoder_layer_array(__snake_case : int , __snake_case : str ): lowerCamelCase_ =F'''encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE''' lowerCamelCase_ =tf.train.load_variable(__snake_case , __snake_case ) if "kernel" in name: lowerCamelCase_ =array.transpose() return torch.from_numpy(__snake_case ) def get_encoder_attention_layer_array(__snake_case : int , __snake_case : str , __snake_case : Optional[int] ): lowerCamelCase_ =F'''encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE''' lowerCamelCase_ =tf.train.load_variable(__snake_case , __snake_case ) lowerCamelCase_ =array.reshape(__snake_case ) if "kernel" in name: lowerCamelCase_ =array.transpose() return torch.from_numpy(__snake_case ) print(F'''Loading model based on config from {config_path}...''' ) lowerCamelCase_ =BertConfig.from_json_file(__snake_case ) lowerCamelCase_ =BertForMaskedLM(__snake_case ) # Layers for layer_index in range(0 , config.num_hidden_layers ): lowerCamelCase_ =model.bert.encoder.layer[layer_index] # Self-attention lowerCamelCase_ =layer.attention.self lowerCamelCase_ =get_encoder_attention_layer_array( __snake_case , '''_query_dense/kernel''' , self_attn.query.weight.data.shape ) lowerCamelCase_ =get_encoder_attention_layer_array( __snake_case , '''_query_dense/bias''' , self_attn.query.bias.data.shape ) lowerCamelCase_ =get_encoder_attention_layer_array( __snake_case , '''_key_dense/kernel''' , self_attn.key.weight.data.shape ) lowerCamelCase_ =get_encoder_attention_layer_array( __snake_case , '''_key_dense/bias''' , self_attn.key.bias.data.shape ) lowerCamelCase_ =get_encoder_attention_layer_array( __snake_case , '''_value_dense/kernel''' , self_attn.value.weight.data.shape ) lowerCamelCase_ =get_encoder_attention_layer_array( __snake_case , '''_value_dense/bias''' , self_attn.value.bias.data.shape ) # Self-attention Output lowerCamelCase_ =layer.attention.output lowerCamelCase_ =get_encoder_attention_layer_array( __snake_case , '''_output_dense/kernel''' , self_output.dense.weight.data.shape ) lowerCamelCase_ =get_encoder_attention_layer_array( __snake_case , '''_output_dense/bias''' , self_output.dense.bias.data.shape ) lowerCamelCase_ =get_encoder_layer_array(__snake_case , '''_attention_layer_norm/gamma''' ) lowerCamelCase_ =get_encoder_layer_array(__snake_case , '''_attention_layer_norm/beta''' ) # Intermediate lowerCamelCase_ =layer.intermediate lowerCamelCase_ =get_encoder_layer_array(__snake_case , '''_intermediate_dense/kernel''' ) lowerCamelCase_ =get_encoder_layer_array(__snake_case , '''_intermediate_dense/bias''' ) # Output lowerCamelCase_ =layer.output lowerCamelCase_ =get_encoder_layer_array(__snake_case , '''_output_dense/kernel''' ) lowerCamelCase_ =get_encoder_layer_array(__snake_case , '''_output_dense/bias''' ) lowerCamelCase_ =get_encoder_layer_array(__snake_case , '''_output_layer_norm/gamma''' ) lowerCamelCase_ =get_encoder_layer_array(__snake_case , '''_output_layer_norm/beta''' ) # Embeddings lowerCamelCase_ =get_encoder_array('''_position_embedding_layer/embeddings''' ) lowerCamelCase_ =get_encoder_array('''_type_embedding_layer/embeddings''' ) lowerCamelCase_ =get_encoder_array('''_embedding_norm_layer/gamma''' ) lowerCamelCase_ =get_encoder_array('''_embedding_norm_layer/beta''' ) # LM Head lowerCamelCase_ =model.cls.predictions.transform lowerCamelCase_ =get_masked_lm_array('''dense/kernel''' ) lowerCamelCase_ =get_masked_lm_array('''dense/bias''' ) lowerCamelCase_ =get_masked_lm_array('''layer_norm/gamma''' ) lowerCamelCase_ =get_masked_lm_array('''layer_norm/beta''' ) lowerCamelCase_ =get_masked_lm_array('''embedding_table''' ) # Pooling lowerCamelCase_ =BertPooler(config=__snake_case ) lowerCamelCase_ =get_encoder_array('''_pooler_layer/kernel''' ) lowerCamelCase_ =get_encoder_array('''_pooler_layer/bias''' ) # Export final model model.save_pretrained(__snake_case ) # Integration test - should load without any errors ;) lowerCamelCase_ =BertForMaskedLM.from_pretrained(__snake_case ) print(new_model.eval() ) print('''Model conversion was done sucessfully!''' ) if __name__ == "__main__": a_ : int = argparse.ArgumentParser() parser.add_argument( """--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow Token Dropping checkpoint path.""" ) parser.add_argument( """--bert_config_file""", type=str, required=True, help="""The config json file corresponding to the BERT model. This specifies the model architecture.""", ) parser.add_argument( """--pytorch_dump_path""", type=str, required=True, help="""Path to the output PyTorch model.""", ) a_ : Dict = parser.parse_args() convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
676
'''simple docstring''' from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : Union[str, Any] = logging.get_logger(__name__) a_ : Tuple = { """huggingface/informer-tourism-monthly""": ( """https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json""" ), # See all Informer models at https://huggingface.co/models?filter=informer } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Union[str, Any] ='informer' lowercase : Union[str, Any] ={ 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', 'num_hidden_layers': 'encoder_layers', } def __init__( self, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = "student_t", lowerCAmelCase = "nll", lowerCAmelCase = 1, lowerCAmelCase = None, lowerCAmelCase = "mean", lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = 64, lowerCAmelCase = 32, lowerCAmelCase = 32, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = True, lowerCAmelCase = "gelu", lowerCAmelCase = 0.0_5, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 100, lowerCAmelCase = 0.0_2, lowerCAmelCase=True, lowerCAmelCase = "prob", lowerCAmelCase = 5, lowerCAmelCase = True, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =prediction_length lowerCamelCase_ =context_length or prediction_length lowerCamelCase_ =distribution_output lowerCamelCase_ =loss lowerCamelCase_ =input_size lowerCamelCase_ =num_time_features lowerCamelCase_ =lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7] lowerCamelCase_ =scaling lowerCamelCase_ =num_dynamic_real_features lowerCamelCase_ =num_static_real_features lowerCamelCase_ =num_static_categorical_features # set cardinality if cardinality and num_static_categorical_features > 0: if len(lowerCAmelCase ) != num_static_categorical_features: raise ValueError( '''The cardinality should be a list of the same length as `num_static_categorical_features`''' ) lowerCamelCase_ =cardinality else: lowerCamelCase_ =[0] # set embedding_dimension if embedding_dimension and num_static_categorical_features > 0: if len(lowerCAmelCase ) != num_static_categorical_features: raise ValueError( '''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' ) lowerCamelCase_ =embedding_dimension else: lowerCamelCase_ =[min(50, (cat + 1) // 2 ) for cat in self.cardinality] lowerCamelCase_ =num_parallel_samples # Transformer architecture configuration lowerCamelCase_ =input_size * len(self.lags_sequence ) + self._number_of_features lowerCamelCase_ =d_model lowerCamelCase_ =encoder_attention_heads lowerCamelCase_ =decoder_attention_heads lowerCamelCase_ =encoder_ffn_dim lowerCamelCase_ =decoder_ffn_dim lowerCamelCase_ =encoder_layers lowerCamelCase_ =decoder_layers lowerCamelCase_ =dropout lowerCamelCase_ =attention_dropout lowerCamelCase_ =activation_dropout lowerCamelCase_ =encoder_layerdrop lowerCamelCase_ =decoder_layerdrop lowerCamelCase_ =activation_function lowerCamelCase_ =init_std lowerCamelCase_ =use_cache # Informer lowerCamelCase_ =attention_type lowerCamelCase_ =sampling_factor lowerCamelCase_ =distil super().__init__(is_encoder_decoder=lowerCAmelCase, **lowerCAmelCase ) @property def lowercase__ ( self ): """simple docstring""" return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
676
1
'''simple docstring''' from collections.abc import Sequence def a_ ( __snake_case : Sequence[float] , __snake_case : float ) -> float: """simple docstring""" return sum(c * (x**i) for i, c in enumerate(__snake_case ) ) def a_ ( __snake_case : Sequence[float] , __snake_case : float ) -> float: """simple docstring""" lowerCamelCase_ =0.0 for coeff in reversed(__snake_case ): lowerCamelCase_ =result * x + coeff return result if __name__ == "__main__": a_ : Optional[int] = (0.0, 0.0, 5.0, 9.3, 7.0) a_ : Tuple = 10.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
676
'''simple docstring''' from __future__ import annotations def a_ ( __snake_case : int ) -> list[int]: """simple docstring""" lowerCamelCase_ =[True] * limit lowerCamelCase_ =False lowerCamelCase_ =False lowerCamelCase_ =True for i in range(3 , int(limit**0.5 + 1 ) , 2 ): lowerCamelCase_ =i * 2 while index < limit: lowerCamelCase_ =False lowerCamelCase_ =index + i lowerCamelCase_ =[2] for i in range(3 , __snake_case , 2 ): if is_prime[i]: primes.append(__snake_case ) return primes def a_ ( __snake_case : int = 100_0000 ) -> int: """simple docstring""" lowerCamelCase_ =prime_sieve(__snake_case ) lowerCamelCase_ =0 lowerCamelCase_ =0 for i in range(len(__snake_case ) ): for j in range(i + length , len(__snake_case ) ): lowerCamelCase_ =sum(primes[i:j] ) if sol >= ceiling: break if sol in primes: lowerCamelCase_ =j - i lowerCamelCase_ =sol return largest if __name__ == "__main__": print(F"""{solution() = }""")
676
1
'''simple docstring''' def a_ ( __snake_case : int ) -> list: """simple docstring""" # bit count represents no. of bits in the gray code if bit_count < 0: raise ValueError('''The given input must be positive''' ) # get the generated string sequence lowerCamelCase_ =gray_code_sequence_string(__snake_case ) # # convert them to integers for i in range(len(__snake_case ) ): lowerCamelCase_ =int(sequence[i] , 2 ) return sequence def a_ ( __snake_case : int ) -> list: """simple docstring""" # The approach is a recursive one # Base case achieved when either n = 0 or n=1 if bit_count == 0: return ["0"] if bit_count == 1: return ["0", "1"] lowerCamelCase_ =1 << bit_count # defines the length of the sequence # 1<< n is equivalent to 2^n # recursive answer will generate answer for n-1 bits lowerCamelCase_ =gray_code_sequence_string(bit_count - 1 ) lowerCamelCase_ =[] # append 0 to first half of the smaller sequence generated for i in range(seq_len // 2 ): lowerCamelCase_ ='''0''' + smaller_sequence[i] sequence.append(__snake_case ) # append 1 to second half ... start from the end of the list for i in reversed(range(seq_len // 2 ) ): lowerCamelCase_ ='''1''' + smaller_sequence[i] sequence.append(__snake_case ) return sequence if __name__ == "__main__": import doctest doctest.testmod()
676
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" super().__init__() # make sure scheduler can always be converted to DDIM lowerCamelCase_ =DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=lowerCAmelCase, scheduler=lowerCAmelCase ) @torch.no_grad() def __call__( self, lowerCAmelCase = 1, lowerCAmelCase = None, lowerCAmelCase = 0.0, lowerCAmelCase = 50, lowerCAmelCase = None, lowerCAmelCase = "pil", lowerCAmelCase = True, ): """simple docstring""" if isinstance(self.unet.config.sample_size, lowerCAmelCase ): lowerCamelCase_ =( batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size, ) else: lowerCamelCase_ =(batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) if isinstance(lowerCAmelCase, lowerCAmelCase ) and len(lowerCAmelCase ) != batch_size: raise ValueError( f'''You have passed a list of generators of length {len(lowerCAmelCase )}, but requested an effective batch''' f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) lowerCamelCase_ =randn_tensor(lowerCAmelCase, generator=lowerCAmelCase, device=self.device, dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(lowerCAmelCase ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output lowerCamelCase_ =self.unet(lowerCAmelCase, lowerCAmelCase ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 lowerCamelCase_ =self.scheduler.step( lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, eta=lowerCAmelCase, use_clipped_model_output=lowerCAmelCase, generator=lowerCAmelCase ).prev_sample lowerCamelCase_ =(image / 2 + 0.5).clamp(0, 1 ) lowerCamelCase_ =image.cpu().permute(0, 2, 3, 1 ).numpy() if output_type == "pil": lowerCamelCase_ =self.numpy_to_pil(lowerCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowerCAmelCase )
676
1
'''simple docstring''' # DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion # and https://github.com/hojonathanho/diffusion import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import numpy as np import torch from diffusers.configuration_utils import ConfigMixin, register_to_config from diffusers.schedulers.scheduling_utils import SchedulerMixin from diffusers.utils import BaseOutput, deprecate @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM class __UpperCamelCase ( lowerCamelCase__ ): lowercase : torch.FloatTensor lowercase : Optional[torch.FloatTensor] =None def a_ ( __snake_case : Tuple , __snake_case : Union[str, Any]=0.9_9_9 , __snake_case : List[Any]="cosine" , ) -> Union[str, Any]: """simple docstring""" if alpha_transform_type == "cosine": def alpha_bar_fn(__snake_case : int ): return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(__snake_case : Union[str, Any] ): return math.exp(t * -1_2.0 ) else: raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) lowerCamelCase_ =[] for i in range(__snake_case ): lowerCamelCase_ =i / num_diffusion_timesteps lowerCamelCase_ =(i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(__snake_case ) / alpha_bar_fn(__snake_case ) , __snake_case ) ) return torch.tensor(__snake_case , dtype=torch.floataa ) class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): lowercase : str =1 @register_to_config def __init__( self, lowerCAmelCase = 1_000, lowerCAmelCase = 0.0_0_0_1, lowerCAmelCase = 0.0_2, lowerCAmelCase = "linear", lowerCAmelCase = None, lowerCAmelCase = True, lowerCAmelCase = True, lowerCAmelCase = 0, lowerCAmelCase = "epsilon", lowerCAmelCase = 1.0, **lowerCAmelCase, ): """simple docstring""" if kwargs.get('''set_alpha_to_one''', lowerCAmelCase ) is not None: lowerCamelCase_ =( '''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.''' ) deprecate('''set_alpha_to_one''', '''1.0.0''', lowerCAmelCase, standard_warn=lowerCAmelCase ) lowerCamelCase_ =kwargs['''set_alpha_to_one'''] if trained_betas is not None: lowerCamelCase_ =torch.tensor(lowerCAmelCase, dtype=torch.floataa ) elif beta_schedule == "linear": lowerCamelCase_ =torch.linspace(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. lowerCamelCase_ =( torch.linspace(beta_start**0.5, beta_end**0.5, lowerCAmelCase, dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule lowerCamelCase_ =betas_for_alpha_bar(lowerCAmelCase ) else: raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' ) lowerCamelCase_ =1.0 - self.betas lowerCamelCase_ =torch.cumprod(self.alphas, dim=0 ) # At every step in inverted ddim, we are looking into the next alphas_cumprod # For the final step, there is no next alphas_cumprod, and the index is out of bounds # `set_alpha_to_zero` decides whether we set this parameter simply to zero # in this case, self.step() just output the predicted noise # or whether we use the final alpha of the "non-previous" one. lowerCamelCase_ =torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1] # standard deviation of the initial noise distribution lowerCamelCase_ =1.0 # setable values lowerCamelCase_ =None lowerCamelCase_ =torch.from_numpy(np.arange(0, lowerCAmelCase ).copy().astype(np.intaa ) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ): """simple docstring""" return sample def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ): """simple docstring""" if num_inference_steps > self.config.num_train_timesteps: raise ValueError( f'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:''' f''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle''' f''' maximal {self.config.num_train_timesteps} timesteps.''' ) lowerCamelCase_ =num_inference_steps lowerCamelCase_ =self.config.num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 lowerCamelCase_ =(np.arange(0, lowerCAmelCase ) * step_ratio).round().copy().astype(np.intaa ) lowerCamelCase_ =torch.from_numpy(lowerCAmelCase ).to(lowerCAmelCase ) self.timesteps += self.config.steps_offset def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = 0.0, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = True, ): """simple docstring""" lowerCamelCase_ =timestep + self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas # change original implementation to exactly match noise levels for analogous forward process lowerCamelCase_ =self.alphas_cumprod[timestep] lowerCamelCase_ =( self.alphas_cumprod[prev_timestep] if prev_timestep < self.config.num_train_timesteps else self.final_alpha_cumprod ) lowerCamelCase_ =1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf if self.config.prediction_type == "epsilon": lowerCamelCase_ =(sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 lowerCamelCase_ =model_output elif self.config.prediction_type == "sample": lowerCamelCase_ =model_output lowerCamelCase_ =(sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 elif self.config.prediction_type == "v_prediction": lowerCamelCase_ =(alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output lowerCamelCase_ =(alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample else: raise ValueError( f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or''' ''' `v_prediction`''' ) # 4. Clip or threshold "predicted x_0" if self.config.clip_sample: lowerCamelCase_ =pred_original_sample.clamp( -self.config.clip_sample_range, self.config.clip_sample_range ) # 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf lowerCamelCase_ =(1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon # 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf lowerCamelCase_ =alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if not return_dict: return (prev_sample, pred_original_sample) return DDIMSchedulerOutput(prev_sample=lowerCAmelCase, pred_original_sample=lowerCAmelCase ) def __len__( self ): """simple docstring""" return self.config.num_train_timesteps
676
'''simple docstring''' from maths.prime_check import is_prime def a_ ( __snake_case : int ) -> int: """simple docstring""" if not isinstance(__snake_case , __snake_case ): lowerCamelCase_ =F'''Input value of [number={number}] must be an integer''' raise TypeError(__snake_case ) if is_prime(__snake_case ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
676
1
'''simple docstring''' import json import os from pathlib import Path import pytest from datasets.download.download_config import DownloadConfig from datasets.download.download_manager import DownloadManager from datasets.utils.file_utils import hash_url_to_filename a_ : Optional[Any] = """http://www.mocksite.com/file1.txt""" a_ : List[str] = """\"text\": [\"foo\", \"foo\"]""" a_ : Optional[Any] = """6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8""" class __UpperCamelCase : lowercase : int =2_00 lowercase : Union[str, Any] ={'Content-Length': '100'} lowercase : Optional[int] ={} def lowercase__ ( self, **lowerCAmelCase ): """simple docstring""" return [bytes(lowerCAmelCase, '''utf-8''' )] def a_ ( *__snake_case : Optional[int] , **__snake_case : List[Any] ) -> List[Any]: """simple docstring""" return MockResponse() @pytest.mark.parametrize('''urls_type''' , [str, list, dict] ) def a_ ( __snake_case : Any , __snake_case : int , __snake_case : List[str] ) -> Union[str, Any]: """simple docstring""" import requests monkeypatch.setattr(__snake_case , '''request''' , __snake_case ) lowerCamelCase_ =URL if issubclass(__snake_case , __snake_case ): lowerCamelCase_ =url elif issubclass(__snake_case , __snake_case ): lowerCamelCase_ =[url] elif issubclass(__snake_case , __snake_case ): lowerCamelCase_ ={'''train''': url} lowerCamelCase_ ='''dummy''' lowerCamelCase_ ='''downloads''' lowerCamelCase_ =tmp_path lowerCamelCase_ =DownloadConfig( cache_dir=os.path.join(__snake_case , __snake_case ) , use_etag=__snake_case , ) lowerCamelCase_ =DownloadManager(dataset_name=__snake_case , download_config=__snake_case ) lowerCamelCase_ =dl_manager.download(__snake_case ) lowerCamelCase_ =urls for downloaded_paths in [downloaded_paths]: if isinstance(__snake_case , __snake_case ): lowerCamelCase_ =[downloaded_paths] lowerCamelCase_ =[urls] elif isinstance(__snake_case , __snake_case ): assert "train" in downloaded_paths.keys() lowerCamelCase_ =downloaded_paths.values() lowerCamelCase_ =urls.values() assert downloaded_paths for downloaded_path, input_url in zip(__snake_case , __snake_case ): assert downloaded_path == dl_manager.downloaded_paths[input_url] lowerCamelCase_ =Path(__snake_case ) lowerCamelCase_ =downloaded_path.parts assert parts[-1] == HASH assert parts[-2] == cache_subdir assert downloaded_path.exists() lowerCamelCase_ =downloaded_path.read_text() assert content == CONTENT lowerCamelCase_ =downloaded_path.with_suffix('''.json''' ) assert metadata_downloaded_path.exists() lowerCamelCase_ =json.loads(metadata_downloaded_path.read_text() ) assert metadata_content == {"url": URL, "etag": None} @pytest.mark.parametrize('''paths_type''' , [str, list, dict] ) def a_ ( __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : Optional[int] ) -> Any: """simple docstring""" lowerCamelCase_ =str(__snake_case ) if issubclass(__snake_case , __snake_case ): lowerCamelCase_ =filename elif issubclass(__snake_case , __snake_case ): lowerCamelCase_ =[filename] elif issubclass(__snake_case , __snake_case ): lowerCamelCase_ ={'''train''': filename} lowerCamelCase_ ='''dummy''' lowerCamelCase_ =xz_file.parent lowerCamelCase_ ='''extracted''' lowerCamelCase_ =DownloadConfig( cache_dir=__snake_case , use_etag=__snake_case , ) lowerCamelCase_ =DownloadManager(dataset_name=__snake_case , download_config=__snake_case ) lowerCamelCase_ =dl_manager.extract(__snake_case ) lowerCamelCase_ =paths for extracted_paths in [extracted_paths]: if isinstance(__snake_case , __snake_case ): lowerCamelCase_ =[extracted_paths] lowerCamelCase_ =[paths] elif isinstance(__snake_case , __snake_case ): assert "train" in extracted_paths.keys() lowerCamelCase_ =extracted_paths.values() lowerCamelCase_ =paths.values() assert extracted_paths for extracted_path, input_path in zip(__snake_case , __snake_case ): assert extracted_path == dl_manager.extracted_paths[input_path] lowerCamelCase_ =Path(__snake_case ) lowerCamelCase_ =extracted_path.parts assert parts[-1] == hash_url_to_filename(__snake_case , etag=__snake_case ) assert parts[-2] == extracted_subdir assert extracted_path.exists() lowerCamelCase_ =extracted_path.read_text() lowerCamelCase_ =text_file.read_text() assert extracted_file_content == expected_file_content def a_ ( __snake_case : Dict , __snake_case : Union[str, Any] ) -> str: """simple docstring""" assert path.endswith('''.jsonl''' ) for num_items, line in enumerate(__snake_case , start=1 ): lowerCamelCase_ =json.loads(line.decode('''utf-8''' ) ) assert item.keys() == {"col_1", "col_2", "col_3"} assert num_items == 4 @pytest.mark.parametrize('''archive_jsonl''' , ['''tar_jsonl_path''', '''zip_jsonl_path'''] ) def a_ ( __snake_case : Tuple , __snake_case : Optional[int] ) -> List[Any]: """simple docstring""" lowerCamelCase_ =request.getfixturevalue(__snake_case ) lowerCamelCase_ =DownloadManager() for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(__snake_case ) , start=1 ): _test_jsonl(__snake_case , __snake_case ) assert num_jsonl == 2 @pytest.mark.parametrize('''archive_nested_jsonl''' , ['''tar_nested_jsonl_path''', '''zip_nested_jsonl_path'''] ) def a_ ( __snake_case : Dict , __snake_case : Tuple ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ =request.getfixturevalue(__snake_case ) lowerCamelCase_ =DownloadManager() for num_tar, (path, file) in enumerate(dl_manager.iter_archive(__snake_case ) , start=1 ): for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(__snake_case ) , start=1 ): _test_jsonl(__snake_case , __snake_case ) assert num_tar == 1 assert num_jsonl == 2 def a_ ( __snake_case : Dict ) -> Dict: """simple docstring""" lowerCamelCase_ =DownloadManager() for num_file, file in enumerate(dl_manager.iter_files(__snake_case ) , start=1 ): assert os.path.basename(__snake_case ) == ("test.txt" if num_file == 1 else "train.txt") assert num_file == 2
676
'''simple docstring''' # DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin, SchedulerOutput @dataclass class __UpperCamelCase ( lowerCamelCase__ ): lowercase : torch.FloatTensor lowercase : torch.FloatTensor class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): lowercase : Tuple =1 @register_to_config def __init__( self, lowerCAmelCase = 2_000, lowerCAmelCase = 0.1_5, lowerCAmelCase = 0.0_1, lowerCAmelCase = 1_3_4_8.0, lowerCAmelCase = 1e-5, lowerCAmelCase = 1, ): """simple docstring""" lowerCamelCase_ =sigma_max # setable values lowerCamelCase_ =None self.set_sigmas(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ): """simple docstring""" return sample def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None ): """simple docstring""" lowerCamelCase_ =sampling_eps if sampling_eps is not None else self.config.sampling_eps lowerCamelCase_ =torch.linspace(1, lowerCAmelCase, lowerCAmelCase, device=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None ): """simple docstring""" lowerCamelCase_ =sigma_min if sigma_min is not None else self.config.sigma_min lowerCamelCase_ =sigma_max if sigma_max is not None else self.config.sigma_max lowerCamelCase_ =sampling_eps if sampling_eps is not None else self.config.sampling_eps if self.timesteps is None: self.set_timesteps(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps) lowerCamelCase_ =torch.exp(torch.linspace(math.log(lowerCAmelCase ), math.log(lowerCAmelCase ), lowerCAmelCase ) ) lowerCamelCase_ =torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" return torch.where( timesteps == 0, torch.zeros_like(t.to(timesteps.device ) ), self.discrete_sigmas[timesteps - 1].to(timesteps.device ), ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = True, ): """simple docstring""" if self.timesteps is None: raise ValueError( '''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' ) lowerCamelCase_ =timestep * torch.ones( sample.shape[0], device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0]) lowerCamelCase_ =(timestep * (len(self.timesteps ) - 1)).long() # mps requires indices to be in the same device, so we use cpu as is the default with cuda lowerCamelCase_ =timesteps.to(self.discrete_sigmas.device ) lowerCamelCase_ =self.discrete_sigmas[timesteps].to(sample.device ) lowerCamelCase_ =self.get_adjacent_sigma(lowerCAmelCase, lowerCAmelCase ).to(sample.device ) lowerCamelCase_ =torch.zeros_like(lowerCAmelCase ) lowerCamelCase_ =(sigma**2 - adjacent_sigma**2) ** 0.5 # equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x) # also equation 47 shows the analog from SDE models to ancestral sampling methods lowerCamelCase_ =diffusion.flatten() while len(diffusion.shape ) < len(sample.shape ): lowerCamelCase_ =diffusion.unsqueeze(-1 ) lowerCamelCase_ =drift - diffusion**2 * model_output # equation 6: sample noise for the diffusion term of lowerCamelCase_ =randn_tensor( sample.shape, layout=sample.layout, generator=lowerCAmelCase, device=sample.device, dtype=sample.dtype ) lowerCamelCase_ =sample - drift # subtract because `dt` is a small negative timestep # TODO is the variable diffusion the correct scaling term for the noise? lowerCamelCase_ =prev_sample_mean + diffusion * noise # add impact of diffusion field g if not return_dict: return (prev_sample, prev_sample_mean) return SdeVeOutput(prev_sample=lowerCAmelCase, prev_sample_mean=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = True, ): """simple docstring""" if self.timesteps is None: raise ValueError( '''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' ) # For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z" # sample noise for correction lowerCamelCase_ =randn_tensor(sample.shape, layout=sample.layout, generator=lowerCAmelCase ).to(sample.device ) # compute step size from the model_output, the noise, and the snr lowerCamelCase_ =torch.norm(model_output.reshape(model_output.shape[0], -1 ), dim=-1 ).mean() lowerCamelCase_ =torch.norm(noise.reshape(noise.shape[0], -1 ), dim=-1 ).mean() lowerCamelCase_ =(self.config.snr * noise_norm / grad_norm) ** 2 * 2 lowerCamelCase_ =step_size * torch.ones(sample.shape[0] ).to(sample.device ) # self.repeat_scalar(step_size, sample.shape[0]) # compute corrected sample: model_output term and noise term lowerCamelCase_ =step_size.flatten() while len(step_size.shape ) < len(sample.shape ): lowerCamelCase_ =step_size.unsqueeze(-1 ) lowerCamelCase_ =sample + step_size * model_output lowerCamelCase_ =prev_sample_mean + ((step_size * 2) ** 0.5) * noise if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =timesteps.to(original_samples.device ) lowerCamelCase_ =self.discrete_sigmas.to(original_samples.device )[timesteps] lowerCamelCase_ =( noise * sigmas[:, None, None, None] if noise is not None else torch.randn_like(lowerCAmelCase ) * sigmas[:, None, None, None] ) lowerCamelCase_ =noise + original_samples return noisy_samples def __len__( self ): """simple docstring""" return self.config.num_train_timesteps
676
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) a_ : int = { """configuration_clip""": [ """CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CLIPConfig""", """CLIPOnnxConfig""", """CLIPTextConfig""", """CLIPVisionConfig""", ], """processing_clip""": ["""CLIPProcessor"""], """tokenization_clip""": ["""CLIPTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : int = ["""CLIPTokenizerFast"""] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Dict = ["""CLIPFeatureExtractor"""] a_ : Union[str, Any] = ["""CLIPImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Tuple = [ """CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """CLIPModel""", """CLIPPreTrainedModel""", """CLIPTextModel""", """CLIPTextModelWithProjection""", """CLIPVisionModel""", """CLIPVisionModelWithProjection""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Any = [ """TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFCLIPModel""", """TFCLIPPreTrainedModel""", """TFCLIPTextModel""", """TFCLIPVisionModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : List[Any] = [ """FlaxCLIPModel""", """FlaxCLIPPreTrainedModel""", """FlaxCLIPTextModel""", """FlaxCLIPTextPreTrainedModel""", """FlaxCLIPVisionModel""", """FlaxCLIPVisionPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_clip import ( CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPConfig, CLIPOnnxConfig, CLIPTextConfig, CLIPVisionConfig, ) from .processing_clip import CLIPProcessor from .tokenization_clip import CLIPTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_clip_fast import CLIPTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clip import CLIPFeatureExtractor from .image_processing_clip import CLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clip import ( CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPModel, CLIPPreTrainedModel, CLIPTextModel, CLIPTextModelWithProjection, CLIPVisionModel, CLIPVisionModelWithProjection, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_clip import ( TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFCLIPModel, TFCLIPPreTrainedModel, TFCLIPTextModel, TFCLIPVisionModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_clip import ( FlaxCLIPModel, FlaxCLIPPreTrainedModel, FlaxCLIPTextModel, FlaxCLIPTextPreTrainedModel, FlaxCLIPVisionModel, FlaxCLIPVisionPreTrainedModel, ) else: import sys a_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
676
'''simple docstring''' def a_ ( __snake_case : int , __snake_case : int ) -> str: """simple docstring""" if not isinstance(__snake_case , __snake_case ): raise ValueError('''iterations must be defined as integers''' ) if not isinstance(__snake_case , __snake_case ) or not number >= 1: raise ValueError( '''starting number must be and integer and be more than 0''' ) if not iterations >= 1: raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' ) lowerCamelCase_ ='''''' while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(__snake_case ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
676
1
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from importlib import import_module from typing import Dict, List, Optional, Tuple import numpy as np from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch import nn from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask import transformers from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process a_ : Dict = logging.getLogger(__name__) @dataclass class __UpperCamelCase : lowercase : str =field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) lowercase : Optional[str] =field( default=lowerCamelCase__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) lowercase : Optional[str] =field( default='NER' , metadata={'help': 'Task type to fine tune in training (e.g. NER, POS, etc)'} ) lowercase : Optional[str] =field( default=lowerCamelCase__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) lowercase : bool =field(default=lowerCamelCase__ , metadata={'help': 'Set this flag to use fast tokenization.'} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. lowercase : Optional[str] =field( default=lowerCamelCase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) @dataclass class __UpperCamelCase : lowercase : str =field( metadata={'help': 'The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'} ) lowercase : Optional[str] =field( default=lowerCamelCase__ , metadata={'help': 'Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'} , ) lowercase : int =field( default=1_28 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) lowercase : bool =field( default=lowerCamelCase__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) def a_ ( ) -> int: """simple docstring""" # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowerCamelCase_ =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' ''' --overwrite_output_dir to overcome.''' ) lowerCamelCase_ =import_module('''tasks''' ) try: lowerCamelCase_ =getattr(__snake_case , model_args.task_type ) lowerCamelCase_ =token_classification_task_clazz() except AttributeError: raise ValueError( F'''Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. ''' F'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('''Training/evaluation parameters %s''' , __snake_case ) # Set seed set_seed(training_args.seed ) # Prepare CONLL-2003 task lowerCamelCase_ =token_classification_task.get_labels(data_args.labels ) lowerCamelCase_ =dict(enumerate(__snake_case ) ) lowerCamelCase_ =len(__snake_case ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowerCamelCase_ =AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__snake_case , idalabel=__snake_case , labelaid={label: i for i, label in enumerate(__snake_case )} , cache_dir=model_args.cache_dir , ) lowerCamelCase_ =AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , ) lowerCamelCase_ =AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , ) # Get datasets lowerCamelCase_ =( TokenClassificationDataset( token_classification_task=__snake_case , data_dir=data_args.data_dir , tokenizer=__snake_case , labels=__snake_case , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) lowerCamelCase_ =( TokenClassificationDataset( token_classification_task=__snake_case , data_dir=data_args.data_dir , tokenizer=__snake_case , labels=__snake_case , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def align_predictions(__snake_case : np.ndarray , __snake_case : np.ndarray ) -> Tuple[List[int], List[int]]: lowerCamelCase_ =np.argmax(__snake_case , axis=2 ) lowerCamelCase_, lowerCamelCase_ =preds.shape lowerCamelCase_ =[[] for _ in range(__snake_case )] lowerCamelCase_ =[[] for _ in range(__snake_case )] for i in range(__snake_case ): for j in range(__snake_case ): if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index: out_label_list[i].append(label_map[label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) return preds_list, out_label_list def compute_metrics(__snake_case : EvalPrediction ) -> Dict: lowerCamelCase_, lowerCamelCase_ =align_predictions(p.predictions , p.label_ids ) return { "accuracy_score": accuracy_score(__snake_case , __snake_case ), "precision": precision_score(__snake_case , __snake_case ), "recall": recall_score(__snake_case , __snake_case ), "f1": fa_score(__snake_case , __snake_case ), } # Data collator lowerCamelCase_ =DataCollatorWithPadding(__snake_case , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer lowerCamelCase_ =Trainer( model=__snake_case , args=__snake_case , train_dataset=__snake_case , eval_dataset=__snake_case , compute_metrics=__snake_case , data_collator=__snake_case , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_process_zero(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation lowerCamelCase_ ={} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) lowerCamelCase_ =trainer.evaluate() lowerCamelCase_ =os.path.join(training_args.output_dir , '''eval_results.txt''' ) if trainer.is_world_process_zero(): with open(__snake_case , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key, value in result.items(): logger.info(''' %s = %s''' , __snake_case , __snake_case ) writer.write('''%s = %s\n''' % (key, value) ) results.update(__snake_case ) # Predict if training_args.do_predict: lowerCamelCase_ =TokenClassificationDataset( token_classification_task=__snake_case , data_dir=data_args.data_dir , tokenizer=__snake_case , labels=__snake_case , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , ) lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =trainer.predict(__snake_case ) lowerCamelCase_, lowerCamelCase_ =align_predictions(__snake_case , __snake_case ) lowerCamelCase_ =os.path.join(training_args.output_dir , '''test_results.txt''' ) if trainer.is_world_process_zero(): with open(__snake_case , '''w''' ) as writer: for key, value in metrics.items(): logger.info(''' %s = %s''' , __snake_case , __snake_case ) writer.write('''%s = %s\n''' % (key, value) ) # Save predictions lowerCamelCase_ =os.path.join(training_args.output_dir , '''test_predictions.txt''' ) if trainer.is_world_process_zero(): with open(__snake_case , '''w''' ) as writer: with open(os.path.join(data_args.data_dir , '''test.txt''' ) , '''r''' ) as f: token_classification_task.write_predictions_to_file(__snake_case , __snake_case , __snake_case ) return results def a_ ( __snake_case : Dict ) -> Tuple: """simple docstring""" # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
676
'''simple docstring''' from typing import List import numpy as np def a_ ( __snake_case : dict ) -> int: """simple docstring""" lowerCamelCase_ ={key: len(__snake_case ) for key, value in gen_kwargs.items() if isinstance(__snake_case , __snake_case )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( '''Sharding is ambiguous for this dataset: ''' + '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n''' + '''\n'''.join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() ) + '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, ''' + '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.''' ) ) lowerCamelCase_ =max(lists_lengths.values() , default=0 ) return max(1 , __snake_case ) def a_ ( __snake_case : int , __snake_case : int ) -> List[range]: """simple docstring""" lowerCamelCase_ =[] for group_idx in range(__snake_case ): lowerCamelCase_ =num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break lowerCamelCase_ =shards_indices_per_group[-1].stop if shards_indices_per_group else 0 lowerCamelCase_ =range(__snake_case , start + num_shards_to_add ) shards_indices_per_group.append(__snake_case ) return shards_indices_per_group def a_ ( __snake_case : dict , __snake_case : int ) -> List[dict]: """simple docstring""" lowerCamelCase_ =_number_of_shards_in_gen_kwargs(__snake_case ) if num_shards == 1: return [dict(__snake_case )] else: lowerCamelCase_ =_distribute_shards(num_shards=__snake_case , max_num_jobs=__snake_case ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(__snake_case , __snake_case ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(__snake_case ) ) ] def a_ ( __snake_case : List[dict] ) -> dict: """simple docstring""" return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , __snake_case ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def a_ ( __snake_case : np.random.Generator , __snake_case : dict ) -> dict: """simple docstring""" lowerCamelCase_ ={len(__snake_case ) for value in gen_kwargs.values() if isinstance(__snake_case , __snake_case )} lowerCamelCase_ ={} for size in list_sizes: lowerCamelCase_ =list(range(__snake_case ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes lowerCamelCase_ =dict(__snake_case ) for key, value in shuffled_kwargs.items(): if isinstance(__snake_case , __snake_case ): lowerCamelCase_ =[value[i] for i in indices_per_size[len(__snake_case )]] return shuffled_kwargs
676
1
'''simple docstring''' from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf a_ : Union[str, Any] = logging.get_logger(__name__) @dataclass class __UpperCamelCase ( lowerCamelCase__ ): lowercase : List[Any] =[ 'no_inference', 'no_cuda', 'no_tpu', 'no_speed', 'no_memory', 'no_env_print', 'no_multi_process', ] def __init__( self, **lowerCAmelCase ): """simple docstring""" for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: lowerCamelCase_ =deprecated_arg[3:] lowerCamelCase_ =not kwargs.pop(lowerCAmelCase ) logger.warning( f'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or''' f''' {positive_arg}={kwargs[positive_arg]}''' ) lowerCamelCase_ =kwargs.pop('''tpu_name''', self.tpu_name ) lowerCamelCase_ =kwargs.pop('''device_idx''', self.device_idx ) lowerCamelCase_ =kwargs.pop('''eager_mode''', self.eager_mode ) lowerCamelCase_ =kwargs.pop('''use_xla''', self.use_xla ) super().__init__(**lowerCAmelCase ) lowercase : str =field( default=lowerCamelCase__ , metadata={'help': 'Name of TPU'} , ) lowercase : int =field( default=0 , metadata={'help': 'CPU / GPU device index. Defaults to 0.'} , ) lowercase : bool =field(default=lowerCamelCase__ , metadata={'help': 'Benchmark models in eager model.'} ) lowercase : bool =field( default=lowerCamelCase__ , metadata={ 'help': 'Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.' } , ) @cached_property def lowercase__ ( self ): """simple docstring""" requires_backends(self, ['''tf'''] ) lowerCamelCase_ =None if self.tpu: try: if self.tpu_name: lowerCamelCase_ =tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name ) else: lowerCamelCase_ =tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: lowerCamelCase_ =None return tpu @cached_property def lowercase__ ( self ): """simple docstring""" requires_backends(self, ['''tf'''] ) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu ) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu ) lowerCamelCase_ =tf.distribute.TPUStrategy(self._setup_tpu ) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx], '''GPU''' ) lowerCamelCase_ =tf.distribute.OneDeviceStrategy(device=f'''/gpu:{self.device_idx}''' ) else: tf.config.set_visible_devices([], '''GPU''' ) # disable GPU lowerCamelCase_ =tf.distribute.OneDeviceStrategy(device=f'''/cpu:{self.device_idx}''' ) return strategy @property def lowercase__ ( self ): """simple docstring""" requires_backends(self, ['''tf'''] ) return self._setup_tpu is not None @property def lowercase__ ( self ): """simple docstring""" requires_backends(self, ['''tf'''] ) return self._setup_strategy @property def lowercase__ ( self ): """simple docstring""" requires_backends(self, ['''tf'''] ) return tf.config.list_physical_devices('''GPU''' ) @property def lowercase__ ( self ): """simple docstring""" requires_backends(self, ['''tf'''] ) if self.cuda: return len(self.gpu_list ) return 0 @property def lowercase__ ( self ): """simple docstring""" return self.n_gpu > 0
676
'''simple docstring''' import argparse import logging import pickle import random import time import numpy as np from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO ) a_ : int = logging.getLogger(__name__) def a_ ( ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ =argparse.ArgumentParser( description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' ) parser.add_argument('''--file_path''' , type=__snake_case , default='''data/dump.txt''' , help='''The path to the data.''' ) parser.add_argument('''--tokenizer_type''' , type=__snake_case , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] ) parser.add_argument('''--tokenizer_name''' , type=__snake_case , default='''bert-base-uncased''' , help='''The tokenizer to use.''' ) parser.add_argument('''--dump_file''' , type=__snake_case , default='''data/dump''' , help='''The dump file prefix.''' ) lowerCamelCase_ =parser.parse_args() logger.info(F'''Loading Tokenizer ({args.tokenizer_name})''' ) if args.tokenizer_type == "bert": lowerCamelCase_ =BertTokenizer.from_pretrained(args.tokenizer_name ) lowerCamelCase_ =tokenizer.special_tokens_map['''cls_token'''] # `[CLS]` lowerCamelCase_ =tokenizer.special_tokens_map['''sep_token'''] # `[SEP]` elif args.tokenizer_type == "roberta": lowerCamelCase_ =RobertaTokenizer.from_pretrained(args.tokenizer_name ) lowerCamelCase_ =tokenizer.special_tokens_map['''cls_token'''] # `<s>` lowerCamelCase_ =tokenizer.special_tokens_map['''sep_token'''] # `</s>` elif args.tokenizer_type == "gpt2": lowerCamelCase_ =GPTaTokenizer.from_pretrained(args.tokenizer_name ) lowerCamelCase_ =tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>` lowerCamelCase_ =tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>` logger.info(F'''Loading text from {args.file_path}''' ) with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp: lowerCamelCase_ =fp.readlines() logger.info('''Start encoding''' ) logger.info(F'''{len(__snake_case )} examples to process.''' ) lowerCamelCase_ =[] lowerCamelCase_ =0 lowerCamelCase_ =1_0000 lowerCamelCase_ =time.time() for text in data: lowerCamelCase_ =F'''{bos} {text.strip()} {sep}''' lowerCamelCase_ =tokenizer.encode(__snake_case , add_special_tokens=__snake_case ) rslt.append(__snake_case ) iter += 1 if iter % interval == 0: lowerCamelCase_ =time.time() logger.info(F'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' ) lowerCamelCase_ =time.time() logger.info('''Finished binarization''' ) logger.info(F'''{len(__snake_case )} examples processed.''' ) lowerCamelCase_ =F'''{args.dump_file}.{args.tokenizer_name}.pickle''' lowerCamelCase_ =tokenizer.vocab_size if vocab_size < (1 << 16): lowerCamelCase_ =[np.uintaa(__snake_case ) for d in rslt] else: lowerCamelCase_ =[np.intaa(__snake_case ) for d in rslt] random.shuffle(rslt_ ) logger.info(F'''Dump to {dp_file}''' ) with open(__snake_case , '''wb''' ) as handle: pickle.dump(rslt_ , __snake_case , protocol=pickle.HIGHEST_PROTOCOL ) if __name__ == "__main__": main()
676
1
'''simple docstring''' import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import TimesformerConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, TimesformerForVideoClassification, TimesformerModel, ) from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class __UpperCamelCase : def __init__( self, lowerCAmelCase, lowerCAmelCase=13, lowerCAmelCase=10, lowerCAmelCase=3, lowerCAmelCase=2, lowerCAmelCase=2, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=32, lowerCAmelCase=5, lowerCAmelCase=4, lowerCAmelCase=37, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=10, lowerCAmelCase=0.0_2, lowerCAmelCase="divided_space_time", lowerCAmelCase=None, ): """simple docstring""" lowerCamelCase_ =parent lowerCamelCase_ =batch_size lowerCamelCase_ =image_size lowerCamelCase_ =num_channels lowerCamelCase_ =patch_size lowerCamelCase_ =num_frames lowerCamelCase_ =is_training lowerCamelCase_ =use_labels lowerCamelCase_ =hidden_size lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =intermediate_size lowerCamelCase_ =hidden_act lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =attention_probs_dropout_prob lowerCamelCase_ =attention_type lowerCamelCase_ =initializer_range lowerCamelCase_ =scope lowerCamelCase_ =num_labels # in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token lowerCamelCase_ =(image_size // patch_size) ** 2 lowerCamelCase_ =(num_frames) * self.num_patches_per_frame + 1 def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase_ =None if self.use_labels: lowerCamelCase_ =ids_tensor([self.batch_size], self.num_labels ) lowerCamelCase_ =self.get_config() return config, pixel_values, labels def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =TimesformerConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_frames=self.num_frames, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, initializer_range=self.initializer_range, attention_type=self.attention_type, ) lowerCamelCase_ =self.num_labels return config def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =TimesformerModel(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =TimesformerForVideoClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase ) # verify the logits shape lowerCamelCase_ =torch.Size((self.batch_size, self.num_labels) ) self.parent.assertEqual(result.logits.shape, lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.prepare_config_and_inputs() lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =config_and_inputs lowerCamelCase_ ={'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase : Dict =(TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else () lowercase : Tuple =( {'feature-extraction': TimesformerModel, 'video-classification': TimesformerForVideoClassification} if is_torch_available() else {} ) lowercase : Optional[Any] =False lowercase : Union[str, Any] =False lowercase : Optional[int] =False lowercase : List[Any] =False def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =TimesformerModelTester(self ) lowerCamelCase_ =ConfigTester( self, config_class=lowerCAmelCase, has_text_modality=lowerCAmelCase, hidden_size=37 ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=False ): """simple docstring""" lowerCamelCase_ =copy.deepcopy(lowerCAmelCase ) if return_labels: if model_class in get_values(lowerCAmelCase ): lowerCamelCase_ =torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase ) return inputs_dict def lowercase__ ( self ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='''TimeSformer does not use inputs_embeds''' ) def lowercase__ ( self ): """simple docstring""" pass def lowercase__ ( self ): """simple docstring""" lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ =model_class(lowerCAmelCase ) self.assertIsInstance(model.get_input_embeddings(), (nn.Module) ) lowerCamelCase_ =model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCAmelCase, nn.Linear ) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ =model_class(lowerCAmelCase ) lowerCamelCase_ =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase_ =[*signature.parameters.keys()] lowerCamelCase_ =['''pixel_values'''] self.assertListEqual(arg_names[:1], lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*lowerCAmelCase ) @slow def lowercase__ ( self ): """simple docstring""" for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ =TimesformerModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" if not self.has_attentions: pass else: lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ =True for model_class in self.all_model_classes: lowerCamelCase_ =self.model_tester.seq_length lowerCamelCase_ =self.model_tester.num_frames lowerCamelCase_ =True lowerCamelCase_ =False lowerCamelCase_ =True lowerCamelCase_ =model_class(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() with torch.no_grad(): lowerCamelCase_ =model(**self._prepare_for_class(lowerCAmelCase, lowerCAmelCase ) ) lowerCamelCase_ =outputs.attentions self.assertEqual(len(lowerCAmelCase ), self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowerCamelCase_ =True lowerCamelCase_ =model_class(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() with torch.no_grad(): lowerCamelCase_ =model(**self._prepare_for_class(lowerCAmelCase, lowerCAmelCase ) ) lowerCamelCase_ =outputs.attentions self.assertEqual(len(lowerCAmelCase ), self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1], ) lowerCamelCase_ =len(lowerCAmelCase ) # Check attention is always last and order is fine lowerCamelCase_ =True lowerCamelCase_ =True lowerCamelCase_ =model_class(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() with torch.no_grad(): lowerCamelCase_ =model(**self._prepare_for_class(lowerCAmelCase, lowerCAmelCase ) ) self.assertEqual(out_len + 1, len(lowerCAmelCase ) ) lowerCamelCase_ =outputs.attentions self.assertEqual(len(lowerCAmelCase ), self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(self_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1], ) def lowercase__ ( self ): """simple docstring""" def check_hidden_states_output(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): lowerCamelCase_ =model_class(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() with torch.no_grad(): lowerCamelCase_ =model(**self._prepare_for_class(lowerCAmelCase, lowerCAmelCase ) ) lowerCamelCase_ =outputs.hidden_states lowerCamelCase_ =self.model_tester.num_hidden_layers + 1 self.assertEqual(len(lowerCAmelCase ), lowerCAmelCase ) lowerCamelCase_ =self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ), [seq_length, self.model_tester.hidden_size], ) lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ =True check_hidden_states_output(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase_ =True check_hidden_states_output(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) def a_ ( ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ =hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' ) lowerCamelCase_ =np.load(__snake_case ) return list(__snake_case ) @require_torch @require_vision class __UpperCamelCase ( unittest.TestCase ): @cached_property def lowercase__ ( self ): """simple docstring""" return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =TimesformerForVideoClassification.from_pretrained('''facebook/timesformer-base-finetuned-k400''' ).to( lowerCAmelCase ) lowerCamelCase_ =self.default_image_processor lowerCamelCase_ =prepare_video() lowerCamelCase_ =image_processor(video[:8], return_tensors='''pt''' ).to(lowerCAmelCase ) # forward pass with torch.no_grad(): lowerCamelCase_ =model(**lowerCAmelCase ) # verify the logits lowerCamelCase_ =torch.Size((1, 400) ) self.assertEqual(outputs.logits.shape, lowerCAmelCase ) lowerCamelCase_ =torch.tensor([-0.3_0_1_6, -0.7_7_1_3, -0.4_2_0_5] ).to(lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCAmelCase, atol=1e-4 ) )
676
'''simple docstring''' import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : str = logging.get_logger(__name__) a_ : int = { """RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""", } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : List[str] ='mvp' lowercase : List[str] =['past_key_values'] lowercase : Dict ={'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self, lowerCAmelCase=50_267, lowerCAmelCase=1_024, lowerCAmelCase=12, lowerCAmelCase=4_096, lowerCAmelCase=16, lowerCAmelCase=12, lowerCAmelCase=4_096, lowerCAmelCase=16, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase="gelu", lowerCAmelCase=1_024, lowerCAmelCase=0.1, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=0.0, lowerCAmelCase=False, lowerCAmelCase=True, lowerCAmelCase=1, lowerCAmelCase=0, lowerCAmelCase=2, lowerCAmelCase=True, lowerCAmelCase=2, lowerCAmelCase=2, lowerCAmelCase=False, lowerCAmelCase=100, lowerCAmelCase=800, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =vocab_size lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =d_model lowerCamelCase_ =encoder_ffn_dim lowerCamelCase_ =encoder_layers lowerCamelCase_ =encoder_attention_heads lowerCamelCase_ =decoder_ffn_dim lowerCamelCase_ =decoder_layers lowerCamelCase_ =decoder_attention_heads lowerCamelCase_ =dropout lowerCamelCase_ =attention_dropout lowerCamelCase_ =activation_dropout lowerCamelCase_ =activation_function lowerCamelCase_ =init_std lowerCamelCase_ =encoder_layerdrop lowerCamelCase_ =decoder_layerdrop lowerCamelCase_ =classifier_dropout lowerCamelCase_ =use_cache lowerCamelCase_ =encoder_layers lowerCamelCase_ =scale_embedding # scale factor will be sqrt(d_model) if True lowerCamelCase_ =use_prompt lowerCamelCase_ =prompt_length lowerCamelCase_ =prompt_mid_dim super().__init__( pad_token_id=lowerCAmelCase, bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, is_encoder_decoder=lowerCAmelCase, decoder_start_token_id=lowerCAmelCase, forced_eos_token_id=lowerCAmelCase, **lowerCAmelCase, ) if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''', lowerCAmelCase ): lowerCamelCase_ =self.bos_token_id warnings.warn( f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. ''' '''The config can simply be saved and uploaded again to be fixed.''' )
676
1
'''simple docstring''' import inspect import os import unittest from dataclasses import dataclass import torch from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs from accelerate.state import AcceleratorState from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu from accelerate.utils import KwargsHandler @dataclass class __UpperCamelCase ( lowerCamelCase__ ): lowercase : int =0 lowercase : bool =False lowercase : float =3.0 class __UpperCamelCase ( unittest.TestCase ): def lowercase__ ( self ): """simple docstring""" self.assertDictEqual(MockClass().to_kwargs(), {} ) self.assertDictEqual(MockClass(a=2 ).to_kwargs(), {'''a''': 2} ) self.assertDictEqual(MockClass(a=2, b=lowerCAmelCase ).to_kwargs(), {'''a''': 2, '''b''': True} ) self.assertDictEqual(MockClass(a=2, c=2.2_5 ).to_kwargs(), {'''a''': 2, '''c''': 2.2_5} ) @require_cuda def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =GradScalerKwargs(init_scale=1_024, growth_factor=2 ) AcceleratorState._reset_state() lowerCamelCase_ =Accelerator(mixed_precision='''fp16''', kwargs_handlers=[scaler_handler] ) print(accelerator.use_fpaa ) lowerCamelCase_ =accelerator.scaler # Check the kwargs have been applied self.assertEqual(scaler._init_scale, 1_0_2_4.0 ) self.assertEqual(scaler._growth_factor, 2.0 ) # Check the other values are at the default self.assertEqual(scaler._backoff_factor, 0.5 ) self.assertEqual(scaler._growth_interval, 2_000 ) self.assertEqual(scaler._enabled, lowerCAmelCase ) @require_multi_gpu def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )] execute_subprocess_async(lowerCAmelCase, env=os.environ.copy() ) if __name__ == "__main__": a_ : str = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True) a_ : int = Accelerator(kwargs_handlers=[ddp_scaler]) a_ : Union[str, Any] = torch.nn.Linear(1_00, 2_00) a_ : Optional[int] = accelerator.prepare(model) # Check the values changed in kwargs a_ : List[str] = """""" a_ : str = model.bucket_bytes_cap // (10_24 * 10_24) if observed_bucket_cap_map != 15: error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n" if model.find_unused_parameters is not True: error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n" # Check the values of the defaults if model.dim != 0: error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n" if model.broadcast_buffers is not True: error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n" if model.gradient_as_bucket_view is not False: error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n" # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
676
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a_ : int = logging.get_logger(__name__) a_ : str = {"""vocab_file""": """spiece.model"""} a_ : Optional[int] = { """vocab_file""": { """bert_for_seq_generation""": ( """https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model""" ), } } a_ : List[Any] = {"""bert_for_seq_generation""": 5_12} class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[int] =VOCAB_FILES_NAMES lowercase : Optional[int] =PRETRAINED_VOCAB_FILES_MAP lowercase : Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : List[int] =[] lowercase : str =['input_ids', 'attention_mask'] def __init__( self, lowerCAmelCase, lowerCAmelCase="<s>", lowerCAmelCase="</s>", lowerCAmelCase="<unk>", lowerCAmelCase="<pad>", lowerCAmelCase="<::::>", lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ ={} if sp_model_kwargs is None else sp_model_kwargs # Add extra_ids to the special token list super().__init__( bos_token=lowerCAmelCase, eos_token=lowerCAmelCase, unk_token=lowerCAmelCase, pad_token=lowerCAmelCase, sep_token=lowerCAmelCase, sp_model_kwargs=self.sp_model_kwargs, **lowerCAmelCase, ) lowerCamelCase_ =vocab_file lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowerCAmelCase ) @property def lowercase__ ( self ): """simple docstring""" return self.sp_model.get_piece_size() def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ={self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): """simple docstring""" lowerCamelCase_ =self.__dict__.copy() lowerCamelCase_ =None return state def __setstate__( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =d # for backward compatibility if not hasattr(self, '''sp_model_kwargs''' ): lowerCamelCase_ ={} lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return self.sp_model.encode(lowerCAmelCase, out_type=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return self.sp_model.piece_to_id(lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.sp_model.IdToPiece(lowerCAmelCase ) return token def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =[] lowerCamelCase_ ='''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(lowerCAmelCase ) + token lowerCamelCase_ =[] else: current_sub_tokens.append(lowerCAmelCase ) out_string += self.sp_model.decode(lowerCAmelCase ) return out_string.strip() def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ): """simple docstring""" if not os.path.isdir(lowerCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCamelCase_ =os.path.join( lowerCAmelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file, lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(lowerCAmelCase, '''wb''' ) as fi: lowerCamelCase_ =self.sp_model.serialized_model_proto() fi.write(lowerCAmelCase ) return (out_vocab_file,)
676
1
'''simple docstring''' from unittest import TestCase from datasets import Dataset from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters def a_ ( ) -> List[Any]: """simple docstring""" lowerCamelCase_ ={ '''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''], '''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''], '''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7], } lowerCamelCase_ =Dataset.from_dict(__snake_case ) return dataset class __UpperCamelCase ( lowerCamelCase__ ): def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =get_dataset() lowerCamelCase_ =make_duplicate_clusters(lowerCAmelCase, 0.8_5 ) self.assertEqual(len(duplicate_clusters[0] ), 2 ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =get_dataset() lowerCamelCase_, lowerCamelCase_ =deduplicate_dataset(lowerCAmelCase ) self.assertEqual(len(lowerCAmelCase ), 2 ) print(lowerCAmelCase ) self.assertEqual(duplicate_clusters[0][0]['''copies'''], 2 ) self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''], lowerCAmelCase )
676
'''simple docstring''' from collections.abc import Sequence def a_ ( __snake_case : Sequence[float] , __snake_case : float ) -> float: """simple docstring""" return sum(c * (x**i) for i, c in enumerate(__snake_case ) ) def a_ ( __snake_case : Sequence[float] , __snake_case : float ) -> float: """simple docstring""" lowerCamelCase_ =0.0 for coeff in reversed(__snake_case ): lowerCamelCase_ =result * x + coeff return result if __name__ == "__main__": a_ : Optional[int] = (0.0, 0.0, 5.0, 9.3, 7.0) a_ : Tuple = 10.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
676
1
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING a_ : Optional[Any] = logging.get_logger(__name__) @add_end_docstrings(lowerCamelCase__ ) class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" super().__init__(*lowerCAmelCase, **lowerCAmelCase ) requires_backends(self, '''vision''' ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING ) def lowercase__ ( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None ): """simple docstring""" lowerCamelCase_ ={} lowerCamelCase_ ={} if prompt is not None: lowerCamelCase_ =prompt if generate_kwargs is not None: lowerCamelCase_ =generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: lowerCamelCase_ ={} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( '''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,''' ''' please use only one''' ) lowerCamelCase_ =max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return super().__call__(lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ): """simple docstring""" lowerCamelCase_ =load_image(lowerCAmelCase ) if prompt is not None: if not isinstance(lowerCAmelCase, lowerCAmelCase ): raise ValueError( f'''Received an invalid text input, got - {type(lowerCAmelCase )} - but expected a single string. ''' '''Note also that one single text can be provided for conditional image to text generation.''' ) lowerCamelCase_ =self.model.config.model_type if model_type == "git": lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework ) lowerCamelCase_ =self.tokenizer(text=lowerCAmelCase, add_special_tokens=lowerCAmelCase ).input_ids lowerCamelCase_ =[self.tokenizer.cls_token_id] + input_ids lowerCamelCase_ =torch.tensor(lowerCAmelCase ).unsqueeze(0 ) model_inputs.update({'''input_ids''': input_ids} ) elif model_type == "pix2struct": lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, header_text=lowerCAmelCase, return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework ) lowerCamelCase_ =self.tokenizer(lowerCAmelCase, return_tensors=self.framework ) model_inputs.update(lowerCAmelCase ) else: raise ValueError(f'''Model type {model_type} does not support conditional text generation''' ) else: lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: lowerCamelCase_ =None return model_inputs def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ): """simple docstring""" if ( "input_ids" in model_inputs and isinstance(model_inputs['''input_ids'''], lowerCAmelCase ) and all(x is None for x in model_inputs['''input_ids'''] ) ): lowerCamelCase_ =None if generate_kwargs is None: lowerCamelCase_ ={} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. lowerCamelCase_ =model_inputs.pop(self.model.main_input_name ) lowerCamelCase_ =self.model.generate(lowerCAmelCase, **lowerCAmelCase, **lowerCAmelCase ) return model_outputs def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =[] for output_ids in model_outputs: lowerCamelCase_ ={ '''generated_text''': self.tokenizer.decode( lowerCAmelCase, skip_special_tokens=lowerCAmelCase, ) } records.append(lowerCAmelCase ) return records
676
'''simple docstring''' import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[int] =['image_processor', 'tokenizer'] lowercase : str ='CLIPImageProcessor' lowercase : Optional[Any] =('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast') def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''', lowerCAmelCase, ) lowerCamelCase_ =kwargs.pop('''feature_extractor''' ) lowerCamelCase_ =image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(lowerCAmelCase, lowerCAmelCase ) def __call__( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ): """simple docstring""" if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: lowerCamelCase_ =self.tokenizer(lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase ) if images is not None: lowerCamelCase_ =self.image_processor(lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase ) if text is not None and images is not None: lowerCamelCase_ =image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**lowerCAmelCase ), tensor_type=lowerCAmelCase ) def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase ) @property def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.tokenizer.model_input_names lowerCamelCase_ =self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
676
1
'''simple docstring''' import json import re from typing import TYPE_CHECKING, List, Optional, Tuple, Union import numpy as np from ...utils import is_tf_available, is_torch_available, logging if TYPE_CHECKING: if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_codegen import CodeGenTokenizer a_ : List[str] = logging.get_logger(__name__) a_ : int = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} a_ : int = { """vocab_file""": { """Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json""", }, """merges_file""": { """Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt""", }, """tokenizer_file""": { """Salesforce/codegen-350M-mono""": ( """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json""" ), }, } a_ : Dict = { """Salesforce/codegen-350M-mono""": 20_48, } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[Any] =VOCAB_FILES_NAMES lowercase : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP lowercase : List[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : Dict =['input_ids', 'attention_mask'] lowercase : Union[str, Any] =CodeGenTokenizer def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase="<|endoftext|>", lowerCAmelCase="<|endoftext|>", lowerCAmelCase="<|endoftext|>", lowerCAmelCase=False, **lowerCAmelCase, ): """simple docstring""" super().__init__( lowerCAmelCase, lowerCAmelCase, tokenizer_file=lowerCAmelCase, unk_token=lowerCAmelCase, bos_token=lowerCAmelCase, eos_token=lowerCAmelCase, add_prefix_space=lowerCAmelCase, **lowerCAmelCase, ) if kwargs.pop('''add_bos_token''', lowerCAmelCase ): lowerCamelCase_ =kwargs.pop('''name_or_path''', '''''' ) raise ValueError( '''Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.''' '''Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n''' f'''`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n''' f'''`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n''' '''This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.''' ''' so that the fast tokenizer works correctly.''' ) lowerCamelCase_ =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''', lowerCAmelCase ) != add_prefix_space: lowerCamelCase_ =getattr(lowerCAmelCase, pre_tok_state.pop('''type''' ) ) lowerCamelCase_ =add_prefix_space lowerCamelCase_ =pre_tok_class(**lowerCAmelCase ) lowerCamelCase_ =add_prefix_space def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =kwargs.get('''is_split_into_words''', lowerCAmelCase ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =kwargs.get('''is_split_into_words''', lowerCAmelCase ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ): """simple docstring""" lowerCamelCase_ =self._tokenizer.model.save(lowerCAmelCase, name=lowerCAmelCase ) return tuple(lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =super().decode( token_ids=lowerCAmelCase, skip_special_tokens=lowerCAmelCase, clean_up_tokenization_spaces=lowerCAmelCase, **lowerCAmelCase, ) if truncate_before_pattern is not None and len(lowerCAmelCase ) > 0: lowerCamelCase_ =self.truncate(lowerCAmelCase, lowerCAmelCase ) return decoded_text def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" def find_re(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): lowerCamelCase_ =pattern.search(lowerCAmelCase, lowerCAmelCase ) return m.start() if m else -1 lowerCamelCase_ =[re.compile(lowerCAmelCase, re.MULTILINE ) for pattern in truncate_before_pattern] lowerCamelCase_ =list(re.finditer('''^print''', lowerCAmelCase, re.MULTILINE ) ) if len(lowerCAmelCase ) > 1: lowerCamelCase_ =completion[: prints[1].start()] lowerCamelCase_ =list(re.finditer('''^def''', lowerCAmelCase, re.MULTILINE ) ) if len(lowerCAmelCase ) > 1: lowerCamelCase_ =completion[: defs[1].start()] lowerCamelCase_ =0 lowerCamelCase_ =[ pos for pos in [find_re(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) for terminal in terminals] if pos != -1 ] if len(lowerCAmelCase ) > 0: return completion[: min(lowerCAmelCase )] else: return completion
676
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING a_ : Optional[Any] = logging.get_logger(__name__) @add_end_docstrings(lowerCamelCase__ ) class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" super().__init__(*lowerCAmelCase, **lowerCAmelCase ) requires_backends(self, '''vision''' ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING ) def lowercase__ ( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None ): """simple docstring""" lowerCamelCase_ ={} lowerCamelCase_ ={} if prompt is not None: lowerCamelCase_ =prompt if generate_kwargs is not None: lowerCamelCase_ =generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: lowerCamelCase_ ={} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( '''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,''' ''' please use only one''' ) lowerCamelCase_ =max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return super().__call__(lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ): """simple docstring""" lowerCamelCase_ =load_image(lowerCAmelCase ) if prompt is not None: if not isinstance(lowerCAmelCase, lowerCAmelCase ): raise ValueError( f'''Received an invalid text input, got - {type(lowerCAmelCase )} - but expected a single string. ''' '''Note also that one single text can be provided for conditional image to text generation.''' ) lowerCamelCase_ =self.model.config.model_type if model_type == "git": lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework ) lowerCamelCase_ =self.tokenizer(text=lowerCAmelCase, add_special_tokens=lowerCAmelCase ).input_ids lowerCamelCase_ =[self.tokenizer.cls_token_id] + input_ids lowerCamelCase_ =torch.tensor(lowerCAmelCase ).unsqueeze(0 ) model_inputs.update({'''input_ids''': input_ids} ) elif model_type == "pix2struct": lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, header_text=lowerCAmelCase, return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework ) lowerCamelCase_ =self.tokenizer(lowerCAmelCase, return_tensors=self.framework ) model_inputs.update(lowerCAmelCase ) else: raise ValueError(f'''Model type {model_type} does not support conditional text generation''' ) else: lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: lowerCamelCase_ =None return model_inputs def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ): """simple docstring""" if ( "input_ids" in model_inputs and isinstance(model_inputs['''input_ids'''], lowerCAmelCase ) and all(x is None for x in model_inputs['''input_ids'''] ) ): lowerCamelCase_ =None if generate_kwargs is None: lowerCamelCase_ ={} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. lowerCamelCase_ =model_inputs.pop(self.model.main_input_name ) lowerCamelCase_ =self.model.generate(lowerCAmelCase, **lowerCAmelCase, **lowerCAmelCase ) return model_outputs def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =[] for output_ids in model_outputs: lowerCamelCase_ ={ '''generated_text''': self.tokenizer.decode( lowerCAmelCase, skip_special_tokens=lowerCAmelCase, ) } records.append(lowerCAmelCase ) return records
676
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import _LazyModule a_ : Optional[Any] = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]} if TYPE_CHECKING: from .tokenization_byta import ByTaTokenizer else: import sys a_ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
676
'''simple docstring''' import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def a_ ( __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Any ) -> str: """simple docstring""" # Initialise PyTorch model lowerCamelCase_ =BertConfig.from_json_file(__snake_case ) print(F'''Building PyTorch model from configuration: {config}''' ) lowerCamelCase_ =BertForPreTraining(__snake_case ) # Load weights from tf checkpoint load_tf_weights_in_bert(__snake_case , __snake_case , __snake_case ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , __snake_case ) if __name__ == "__main__": a_ : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--bert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained BERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) a_ : Optional[Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
676
1
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ConditionalDetrImageProcessor class __UpperCamelCase ( unittest.TestCase ): def __init__( self, lowerCAmelCase, lowerCAmelCase=7, lowerCAmelCase=3, lowerCAmelCase=30, lowerCAmelCase=400, lowerCAmelCase=True, lowerCAmelCase=None, lowerCAmelCase=True, lowerCAmelCase=[0.5, 0.5, 0.5], lowerCAmelCase=[0.5, 0.5, 0.5], lowerCAmelCase=True, lowerCAmelCase=1 / 255, lowerCAmelCase=True, ): """simple docstring""" lowerCamelCase_ =size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1_333} lowerCamelCase_ =parent lowerCamelCase_ =batch_size lowerCamelCase_ =num_channels lowerCamelCase_ =min_resolution lowerCamelCase_ =max_resolution lowerCamelCase_ =do_resize lowerCamelCase_ =size lowerCamelCase_ =do_normalize lowerCamelCase_ =image_mean lowerCamelCase_ =image_std lowerCamelCase_ =do_rescale lowerCamelCase_ =rescale_factor lowerCamelCase_ =do_pad def lowercase__ ( self ): """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=False ): """simple docstring""" if not batched: lowerCamelCase_ =image_inputs[0] if isinstance(lowerCAmelCase, Image.Image ): lowerCamelCase_, lowerCamelCase_ =image.size else: lowerCamelCase_, lowerCamelCase_ =image.shape[1], image.shape[2] if w < h: lowerCamelCase_ =int(self.size['''shortest_edge'''] * h / w ) lowerCamelCase_ =self.size['''shortest_edge'''] elif w > h: lowerCamelCase_ =self.size['''shortest_edge'''] lowerCamelCase_ =int(self.size['''shortest_edge'''] * w / h ) else: lowerCamelCase_ =self.size['''shortest_edge'''] lowerCamelCase_ =self.size['''shortest_edge'''] else: lowerCamelCase_ =[] for image in image_inputs: lowerCamelCase_, lowerCamelCase_ =self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) lowerCamelCase_ =max(lowerCAmelCase, key=lambda lowerCAmelCase : item[0] )[0] lowerCamelCase_ =max(lowerCAmelCase, key=lambda lowerCAmelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ): lowercase : Optional[Any] =ConditionalDetrImageProcessor if is_vision_available() else None def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =ConditionalDetrImageProcessingTester(self ) @property def lowercase__ ( self ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCAmelCase, '''image_mean''' ) ) self.assertTrue(hasattr(lowerCAmelCase, '''image_std''' ) ) self.assertTrue(hasattr(lowerCAmelCase, '''do_normalize''' ) ) self.assertTrue(hasattr(lowerCAmelCase, '''do_resize''' ) ) self.assertTrue(hasattr(lowerCAmelCase, '''size''' ) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size, {'''shortest_edge''': 18, '''longest_edge''': 1_333} ) self.assertEqual(image_processor.do_pad, lowerCAmelCase ) lowerCamelCase_ =self.image_processing_class.from_dict( self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=lowerCAmelCase ) self.assertEqual(image_processor.size, {'''shortest_edge''': 42, '''longest_edge''': 84} ) self.assertEqual(image_processor.do_pad, lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" pass def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase_ =prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase, Image.Image ) # Test not batched input lowerCamelCase_ =image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values lowerCamelCase_, lowerCamelCase_ =self.image_processor_tester.get_expected_values(lowerCAmelCase ) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched lowerCamelCase_, lowerCamelCase_ =self.image_processor_tester.get_expected_values(lowerCAmelCase, batched=lowerCAmelCase ) lowerCamelCase_ =image_processing(lowerCAmelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCamelCase_ =prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCAmelCase, numpify=lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase, np.ndarray ) # Test not batched input lowerCamelCase_ =image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values lowerCamelCase_, lowerCamelCase_ =self.image_processor_tester.get_expected_values(lowerCAmelCase ) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched lowerCamelCase_ =image_processing(lowerCAmelCase, return_tensors='''pt''' ).pixel_values lowerCamelCase_, lowerCamelCase_ =self.image_processor_tester.get_expected_values(lowerCAmelCase, batched=lowerCAmelCase ) self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCamelCase_ =prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCAmelCase, torchify=lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase, torch.Tensor ) # Test not batched input lowerCamelCase_ =image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values lowerCamelCase_, lowerCamelCase_ =self.image_processor_tester.get_expected_values(lowerCAmelCase ) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched lowerCamelCase_ =image_processing(lowerCAmelCase, return_tensors='''pt''' ).pixel_values lowerCamelCase_, lowerCamelCase_ =self.image_processor_tester.get_expected_values(lowerCAmelCase, batched=lowerCAmelCase ) self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''', '''r''' ) as f: lowerCamelCase_ =json.loads(f.read() ) lowerCamelCase_ ={'''image_id''': 39_769, '''annotations''': target} # encode them lowerCamelCase_ =ConditionalDetrImageProcessor.from_pretrained('''microsoft/conditional-detr-resnet-50''' ) lowerCamelCase_ =image_processing(images=lowerCAmelCase, annotations=lowerCAmelCase, return_tensors='''pt''' ) # verify pixel values lowerCamelCase_ =torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding['''pixel_values'''].shape, lowerCAmelCase ) lowerCamelCase_ =torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3], lowerCAmelCase, atol=1e-4 ) ) # verify area lowerCamelCase_ =torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''], lowerCAmelCase ) ) # verify boxes lowerCamelCase_ =torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape, lowerCAmelCase ) lowerCamelCase_ =torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0], lowerCAmelCase, atol=1e-3 ) ) # verify image_id lowerCamelCase_ =torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''], lowerCAmelCase ) ) # verify is_crowd lowerCamelCase_ =torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''], lowerCAmelCase ) ) # verify class_labels lowerCamelCase_ =torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''], lowerCAmelCase ) ) # verify orig_size lowerCamelCase_ =torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''], lowerCAmelCase ) ) # verify size lowerCamelCase_ =torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''], lowerCAmelCase ) ) @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''', '''r''' ) as f: lowerCamelCase_ =json.loads(f.read() ) lowerCamelCase_ ={'''file_name''': '''000000039769.png''', '''image_id''': 39_769, '''segments_info''': target} lowerCamelCase_ =pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' ) # encode them lowerCamelCase_ =ConditionalDetrImageProcessor(format='''coco_panoptic''' ) lowerCamelCase_ =image_processing(images=lowerCAmelCase, annotations=lowerCAmelCase, masks_path=lowerCAmelCase, return_tensors='''pt''' ) # verify pixel values lowerCamelCase_ =torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding['''pixel_values'''].shape, lowerCAmelCase ) lowerCamelCase_ =torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3], lowerCAmelCase, atol=1e-4 ) ) # verify area lowerCamelCase_ =torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''], lowerCAmelCase ) ) # verify boxes lowerCamelCase_ =torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape, lowerCAmelCase ) lowerCamelCase_ =torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0], lowerCAmelCase, atol=1e-3 ) ) # verify image_id lowerCamelCase_ =torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''], lowerCAmelCase ) ) # verify is_crowd lowerCamelCase_ =torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''], lowerCAmelCase ) ) # verify class_labels lowerCamelCase_ =torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''], lowerCAmelCase ) ) # verify masks lowerCamelCase_ =822_873 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item(), lowerCAmelCase ) # verify orig_size lowerCamelCase_ =torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''], lowerCAmelCase ) ) # verify size lowerCamelCase_ =torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''], lowerCAmelCase ) )
676
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : Union[str, Any] = logging.get_logger(__name__) a_ : Optional[int] = { """BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""", # See all AltCLIP models at https://huggingface.co/models?filter=altclip } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[Any] ='altclip_text_model' def __init__( self, lowerCAmelCase=250_002, lowerCAmelCase=1_024, lowerCAmelCase=24, lowerCAmelCase=16, lowerCAmelCase=4_096, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=514, lowerCAmelCase=1, lowerCAmelCase=0.0_2, lowerCAmelCase=0.0_2, lowerCAmelCase=1e-05, lowerCAmelCase=1, lowerCAmelCase=0, lowerCAmelCase=2, lowerCAmelCase="absolute", lowerCAmelCase=True, lowerCAmelCase=768, **lowerCAmelCase, ): """simple docstring""" super().__init__(pad_token_id=lowerCAmelCase, bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, **lowerCAmelCase ) lowerCamelCase_ =vocab_size lowerCamelCase_ =hidden_size lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =hidden_act lowerCamelCase_ =intermediate_size lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =attention_probs_dropout_prob lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =type_vocab_size lowerCamelCase_ =initializer_range lowerCamelCase_ =initializer_factor lowerCamelCase_ =layer_norm_eps lowerCamelCase_ =position_embedding_type lowerCamelCase_ =use_cache lowerCamelCase_ =project_dim class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Dict ='altclip_vision_model' def __init__( self, lowerCAmelCase=768, lowerCAmelCase=3_072, lowerCAmelCase=512, lowerCAmelCase=12, lowerCAmelCase=12, lowerCAmelCase=3, lowerCAmelCase=224, lowerCAmelCase=32, lowerCAmelCase="quick_gelu", lowerCAmelCase=1e-5, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=1.0, **lowerCAmelCase, ): """simple docstring""" super().__init__(**lowerCAmelCase ) lowerCamelCase_ =hidden_size lowerCamelCase_ =intermediate_size lowerCamelCase_ =projection_dim lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =num_channels lowerCamelCase_ =patch_size lowerCamelCase_ =image_size lowerCamelCase_ =initializer_range lowerCamelCase_ =initializer_factor lowerCamelCase_ =attention_dropout lowerCamelCase_ =layer_norm_eps lowerCamelCase_ =hidden_act @classmethod def lowercase__ ( cls, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" cls._set_token_in_kwargs(lowerCAmelCase ) lowerCamelCase_, lowerCamelCase_ =cls.get_config_dict(lowerCAmelCase, **lowerCAmelCase ) # get the vision config dict if we are loading from AltCLIPConfig if config_dict.get('''model_type''' ) == "altclip": lowerCamelCase_ =config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls, '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(lowerCAmelCase, **lowerCAmelCase ) class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Dict ='altclip' lowercase : str =True def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=768, lowerCAmelCase=2.6_5_9_2, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =kwargs.pop('''text_config_dict''', lowerCAmelCase ) lowerCamelCase_ =kwargs.pop('''vision_config_dict''', lowerCAmelCase ) super().__init__(**lowerCAmelCase ) # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`. if text_config_dict is not None: if text_config is None: lowerCamelCase_ ={} # This is the complete result when using `text_config_dict`. lowerCamelCase_ =AltCLIPTextConfig(**lowerCAmelCase ).to_dict() # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different. for key, value in _text_config_dict.items(): if key in text_config and value != text_config[key] and key not in ["transformers_version"]: # If specified in `text_config_dict` if key in text_config_dict: lowerCamelCase_ =( f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. ''' f'''The value `text_config_dict["{key}"]` will be used instead.''' ) # If inferred from default argument values (just to be super careful) else: lowerCamelCase_ =( f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The ''' f'''value `text_config["{key}"]` will be overriden.''' ) logger.warning(lowerCAmelCase ) # Update all values in `text_config` with the ones in `_text_config_dict`. text_config.update(_text_config_dict ) if vision_config_dict is not None: if vision_config is None: lowerCamelCase_ ={} # This is the complete result when using `vision_config_dict`. lowerCamelCase_ =AltCLIPVisionConfig(**lowerCAmelCase ).to_dict() # convert keys to string instead of integer if "id2label" in _vision_config_dict: lowerCamelCase_ ={ str(lowerCAmelCase ): value for key, value in _vision_config_dict['''id2label'''].items() } # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different. for key, value in _vision_config_dict.items(): if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]: # If specified in `vision_config_dict` if key in vision_config_dict: lowerCamelCase_ =( f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different ''' f'''values. The value `vision_config_dict["{key}"]` will be used instead.''' ) # If inferred from default argument values (just to be super careful) else: lowerCamelCase_ =( f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. ''' f'''The value `vision_config["{key}"]` will be overriden.''' ) logger.warning(lowerCAmelCase ) # Update all values in `vision_config` with the ones in `_vision_config_dict`. vision_config.update(_vision_config_dict ) if text_config is None: lowerCamelCase_ ={} logger.info('''`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.''' ) if vision_config is None: lowerCamelCase_ ={} logger.info('''`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.''' ) lowerCamelCase_ =AltCLIPTextConfig(**lowerCAmelCase ) lowerCamelCase_ =AltCLIPVisionConfig(**lowerCAmelCase ) lowerCamelCase_ =projection_dim lowerCamelCase_ =logit_scale_init_value lowerCamelCase_ =1.0 @classmethod def lowercase__ ( cls, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =copy.deepcopy(self.__dict__ ) lowerCamelCase_ =self.text_config.to_dict() lowerCamelCase_ =self.vision_config.to_dict() lowerCamelCase_ =self.__class__.model_type return output
676
1
'''simple docstring''' from __future__ import annotations def a_ ( __snake_case : list[int] , __snake_case : list[int] , __snake_case : list[int] , __snake_case : list[list[str]] , __snake_case : int , ) -> None: """simple docstring""" lowerCamelCase_ =len(__snake_case ) # If row is equal to the size of the board it means there are a queen in each row in # the current board (possible_board) if row == n: # We convert the variable possible_board that looks like this: [1, 3, 0, 2] to # this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . '] boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board] ) return # We iterate each column in the row to find all possible results in each row for col in range(__snake_case ): # We apply that we learned previously. First we check that in the current board # (possible_board) there are not other same value because if there is it means # that there are a collision in vertical. Then we apply the two formulas we # learned before: # # 45º: y - x = b or 45: row - col = b # 135º: y + x = b or row + col = b. # # And we verify if the results of this two formulas not exist in their variables # respectively. (diagonal_right_collisions, diagonal_left_collisions) # # If any or these are True it means there is a collision so we continue to the # next value in the for loop. if ( col in possible_board or row - col in diagonal_right_collisions or row + col in diagonal_left_collisions ): continue # If it is False we call dfs function again and we update the inputs depth_first_search( [*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , __snake_case , __snake_case , ) def a_ ( __snake_case : int ) -> None: """simple docstring""" lowerCamelCase_ =[] depth_first_search([] , [] , [] , __snake_case , __snake_case ) # Print all the boards for board in boards: for column in board: print(__snake_case ) print('''''' ) print(len(__snake_case ) , '''solutions were found.''' ) if __name__ == "__main__": import doctest doctest.testmod() n_queens_solution(4)
676
'''simple docstring''' import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, lowerCAmelCase, lowerCAmelCase=13, lowerCAmelCase=7, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=2, lowerCAmelCase=99, lowerCAmelCase=0, lowerCAmelCase=32, lowerCAmelCase=5, lowerCAmelCase=4, lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=12, lowerCAmelCase=2, lowerCAmelCase=0.0_2, lowerCAmelCase=3, lowerCAmelCase=4, lowerCAmelCase="last", lowerCAmelCase=None, lowerCAmelCase=None, ): """simple docstring""" lowerCamelCase_ =parent lowerCamelCase_ =batch_size lowerCamelCase_ =seq_length lowerCamelCase_ =is_training lowerCamelCase_ =use_input_lengths lowerCamelCase_ =use_token_type_ids lowerCamelCase_ =use_labels lowerCamelCase_ =gelu_activation lowerCamelCase_ =sinusoidal_embeddings lowerCamelCase_ =causal lowerCamelCase_ =asm lowerCamelCase_ =n_langs lowerCamelCase_ =vocab_size lowerCamelCase_ =n_special lowerCamelCase_ =hidden_size lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =attention_probs_dropout_prob lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =type_vocab_size lowerCamelCase_ =type_sequence_label_size lowerCamelCase_ =initializer_range lowerCamelCase_ =num_labels lowerCamelCase_ =num_choices lowerCamelCase_ =summary_type lowerCamelCase_ =use_proj lowerCamelCase_ =scope def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase_ =None if self.use_input_lengths: lowerCamelCase_ =( ids_tensor([self.batch_size], vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length lowerCamelCase_ =None if self.use_token_type_ids: lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.n_langs ) lowerCamelCase_ =None lowerCamelCase_ =None lowerCamelCase_ =None if self.use_labels: lowerCamelCase_ =ids_tensor([self.batch_size], self.type_sequence_label_size ) lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.num_labels ) lowerCamelCase_ =ids_tensor([self.batch_size], 2 ).float() lowerCamelCase_ =ids_tensor([self.batch_size], self.num_choices ) lowerCamelCase_ =self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def lowercase__ ( self ): """simple docstring""" return FlaubertConfig( vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =FlaubertModel(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase, lengths=lowerCAmelCase, langs=lowerCAmelCase ) lowerCamelCase_ =model(lowerCAmelCase, langs=lowerCAmelCase ) lowerCamelCase_ =model(lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =FlaubertWithLMHeadModel(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape, () ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =FlaubertForQuestionAnsweringSimple(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase ) lowerCamelCase_ =model(lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =FlaubertForQuestionAnswering(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase ) lowerCamelCase_ =model( lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase, cls_index=lowerCAmelCase, is_impossible=lowerCAmelCase, p_mask=lowerCAmelCase, ) lowerCamelCase_ =model( lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase, cls_index=lowerCAmelCase, is_impossible=lowerCAmelCase, ) ((lowerCamelCase_), ) =result_with_labels.to_tuple() lowerCamelCase_ =model(lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase ) ((lowerCamelCase_), ) =result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape, () ) self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =FlaubertForSequenceClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase ) lowerCamelCase_ =model(lowerCAmelCase, labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape, () ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =self.num_labels lowerCamelCase_ =FlaubertForTokenClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase, attention_mask=lowerCAmelCase, labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =self.num_choices lowerCamelCase_ =FlaubertForMultipleChoice(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() lowerCamelCase_ =token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() lowerCamelCase_ =input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() lowerCamelCase_ =model( lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.prepare_config_and_inputs() ( ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ) =config_and_inputs lowerCamelCase_ ={ '''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase : List[Any] =( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) lowercase : Tuple =( { 'feature-extraction': FlaubertModel, 'fill-mask': FlaubertWithLMHeadModel, 'question-answering': FlaubertForQuestionAnsweringSimple, 'text-classification': FlaubertForSequenceClassification, 'token-classification': FlaubertForTokenClassification, 'zero-shot': FlaubertForSequenceClassification, } if is_torch_available() else {} ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('''Fast''' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=False ): """simple docstring""" lowerCamelCase_ =super()._prepare_for_class(lowerCAmelCase, lowerCAmelCase, return_labels=lowerCAmelCase ) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": lowerCamelCase_ =torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase ) lowerCamelCase_ =torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase ) return inputs_dict def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =FlaubertModelTester(self ) lowerCamelCase_ =ConfigTester(self, config_class=lowerCAmelCase, emb_dim=37 ) def lowercase__ ( self ): """simple docstring""" self.config_tester.run_common_tests() def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*lowerCAmelCase ) @slow def lowercase__ ( self ): """simple docstring""" for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ =FlaubertModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) @slow @require_torch_gpu def lowercase__ ( self ): """simple docstring""" lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return lowerCamelCase_ =True lowerCamelCase_ =model_class(config=lowerCAmelCase ) lowerCamelCase_ =self._prepare_for_class(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =torch.jit.trace( lowerCAmelCase, (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(lowerCAmelCase, os.path.join(lowerCAmelCase, '''traced_model.pt''' ) ) lowerCamelCase_ =torch.jit.load(os.path.join(lowerCAmelCase, '''traced_model.pt''' ), map_location=lowerCAmelCase ) loaded(inputs_dict['''input_ids'''].to(lowerCAmelCase ), inputs_dict['''attention_mask'''].to(lowerCAmelCase ) ) @require_torch class __UpperCamelCase ( unittest.TestCase ): @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' ) lowerCamelCase_ =torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] ) with torch.no_grad(): lowerCamelCase_ =model(lowerCAmelCase )[0] lowerCamelCase_ =torch.Size((1, 11, 768) ) self.assertEqual(output.shape, lowerCAmelCase ) lowerCamelCase_ =torch.tensor( [[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], lowerCAmelCase, atol=1e-4 ) )
676
1
'''simple docstring''' from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, TensorType a_ : Optional[Any] = logging.get_logger(__name__) a_ : List[str] = { """openai/imagegpt-small""": """""", """openai/imagegpt-medium""": """""", """openai/imagegpt-large""": """""", } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Any ='imagegpt' lowercase : Union[str, Any] =['past_key_values'] lowercase : int ={ 'hidden_size': 'n_embd', 'max_position_embeddings': 'n_positions', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self, lowerCAmelCase=512 + 1, lowerCAmelCase=32 * 32, lowerCAmelCase=512, lowerCAmelCase=24, lowerCAmelCase=8, lowerCAmelCase=None, lowerCAmelCase="quick_gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=1e-5, lowerCAmelCase=0.0_2, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=False, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =vocab_size lowerCamelCase_ =n_positions lowerCamelCase_ =n_embd lowerCamelCase_ =n_layer lowerCamelCase_ =n_head lowerCamelCase_ =n_inner lowerCamelCase_ =activation_function lowerCamelCase_ =resid_pdrop lowerCamelCase_ =embd_pdrop lowerCamelCase_ =attn_pdrop lowerCamelCase_ =layer_norm_epsilon lowerCamelCase_ =initializer_range lowerCamelCase_ =scale_attn_weights lowerCamelCase_ =use_cache lowerCamelCase_ =scale_attn_by_inverse_layer_idx lowerCamelCase_ =reorder_and_upcast_attn lowerCamelCase_ =tie_word_embeddings super().__init__(tie_word_embeddings=lowerCAmelCase, **lowerCAmelCase ) class __UpperCamelCase ( lowerCamelCase__ ): @property def lowercase__ ( self ): """simple docstring""" return OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''sequence'''}), ] ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = 1, lowerCAmelCase = -1, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = 3, lowerCAmelCase = 32, lowerCAmelCase = 32, ): """simple docstring""" lowerCamelCase_ =self._generate_dummy_images(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =dict(preprocessor(images=lowerCAmelCase, return_tensors=lowerCAmelCase ) ) return inputs
676
'''simple docstring''' import os from pickle import UnpicklingError from typing import Dict, Tuple import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict, unflatten_dict import transformers from .utils import logging a_ : List[Any] = logging.get_logger(__name__) def a_ ( __snake_case : Optional[int] , __snake_case : str , __snake_case : List[Any] , __snake_case : int=False ) -> List[str]: """simple docstring""" try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise if not is_sharded: lowerCamelCase_ =os.path.abspath(__snake_case ) logger.info(F'''Loading PyTorch weights from {pt_path}''' ) lowerCamelCase_ =torch.load(__snake_case , map_location='''cpu''' ) logger.info(F'''PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.''' ) lowerCamelCase_ =convert_pytorch_state_dict_to_flax(__snake_case , __snake_case ) else: # model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files lowerCamelCase_ =convert_pytorch_sharded_state_dict_to_flax(__snake_case , __snake_case ) return flax_state_dict def a_ ( __snake_case : Tuple[str] , __snake_case : np.ndarray , __snake_case : Dict[str, jnp.ndarray] , __snake_case : str , ) -> (Tuple[str], np.ndarray): """simple docstring""" def is_key_or_prefix_key_in_dict(__snake_case : Tuple[str] ) -> bool: return len(set(__snake_case ) & {key, (model_prefix,) + key} ) > 0 # layer norm lowerCamelCase_ =pt_tuple_key[:-1] + ('''scale''',) if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(__snake_case ): return renamed_pt_tuple_key, pt_tensor # batch norm layer mean lowerCamelCase_ =pt_tuple_key[:-1] + ('''mean''',) if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(__snake_case ): return renamed_pt_tuple_key, pt_tensor # batch norm layer var lowerCamelCase_ =pt_tuple_key[:-1] + ('''var''',) if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(__snake_case ): return renamed_pt_tuple_key, pt_tensor # embedding lowerCamelCase_ =pt_tuple_key[:-1] + ('''embedding''',) if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(__snake_case ): return renamed_pt_tuple_key, pt_tensor # conv layer lowerCamelCase_ =pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(__snake_case ): lowerCamelCase_ =pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowerCamelCase_ =pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(__snake_case ): lowerCamelCase_ =pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowerCamelCase_ =pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowerCamelCase_ =pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 lowerCamelCase_ =None if pt_tuple_key[-3::2] == ("parametrizations", "original0"): lowerCamelCase_ =pt_tuple_key[-2] + '''_g''' elif pt_tuple_key[-3::2] == ("parametrizations", "original1"): lowerCamelCase_ =pt_tuple_key[-2] + '''_v''' if name is not None: lowerCamelCase_ =pt_tuple_key[:-3] + (name,) return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def a_ ( __snake_case : Union[str, Any] , __snake_case : str ) -> str: """simple docstring""" # convert pytorch tensor to numpy lowerCamelCase_ ={k: v.numpy() for k, v in pt_state_dict.items()} lowerCamelCase_ =flax_model.base_model_prefix # use params dict if the model contains batch norm layers if "params" in flax_model.params: lowerCamelCase_ =flax_model.params['''params'''] else: lowerCamelCase_ =flax_model.params lowerCamelCase_ =flatten_dict(__snake_case ) # add batch_stats keys,values to dict if "batch_stats" in flax_model.params: lowerCamelCase_ =flatten_dict(flax_model.params['''batch_stats'''] ) random_flax_state_dict.update(__snake_case ) lowerCamelCase_ ={} lowerCamelCase_ =(model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) lowerCamelCase_ =(model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCamelCase_ =tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary lowerCamelCase_ =pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: lowerCamelCase_ =pt_tuple_key[1:] # Correctly rename weight parameters lowerCamelCase_, lowerCamelCase_ =rename_key_and_reshape_tensor( __snake_case , __snake_case , __snake_case , __snake_case ) # add model prefix if necessary lowerCamelCase_ =(model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: lowerCamelCase_ =(model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1] or "var" in flax_key[-1]: lowerCamelCase_ =jnp.asarray(__snake_case ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(__snake_case , __snake_case ) continue # also add unexpected weight so that warning is thrown lowerCamelCase_ =jnp.asarray(__snake_case ) else: # also add unexpected weight so that warning is thrown lowerCamelCase_ =jnp.asarray(__snake_case ) return unflatten_dict(__snake_case ) def a_ ( __snake_case : Union[str, Any] , __snake_case : List[Any] ) -> Optional[Any]: """simple docstring""" import torch # Load the index lowerCamelCase_ ={} for shard_file in shard_filenames: # load using msgpack utils lowerCamelCase_ =torch.load(__snake_case ) lowerCamelCase_ ={k: v.numpy() for k, v in pt_state_dict.items()} lowerCamelCase_ =flax_model.base_model_prefix # use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict if "batch_stats" in flax_model.params: lowerCamelCase_ =flax_model.params['''params'''] lowerCamelCase_ =flatten_dict(__snake_case ) random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) ) else: lowerCamelCase_ =flax_model.params lowerCamelCase_ =flatten_dict(__snake_case ) lowerCamelCase_ =(model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) lowerCamelCase_ =(model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCamelCase_ =tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary lowerCamelCase_ =pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: lowerCamelCase_ =pt_tuple_key[1:] # Correctly rename weight parameters lowerCamelCase_, lowerCamelCase_ =rename_key_and_reshape_tensor( __snake_case , __snake_case , __snake_case , __snake_case ) # add model prefix if necessary lowerCamelCase_ =(model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: lowerCamelCase_ =(model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1]: lowerCamelCase_ =jnp.asarray(__snake_case ) continue if "var" in flax_key[-1]: lowerCamelCase_ =jnp.asarray(__snake_case ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(__snake_case , __snake_case ) continue # also add unexpected weight so that warning is thrown lowerCamelCase_ =jnp.asarray(__snake_case ) else: # also add unexpected weight so that warning is thrown lowerCamelCase_ =jnp.asarray(__snake_case ) return unflatten_dict(__snake_case ) def a_ ( __snake_case : List[str] , __snake_case : Dict ) -> str: """simple docstring""" lowerCamelCase_ =os.path.abspath(__snake_case ) logger.info(F'''Loading Flax weights from {flax_checkpoint_path}''' ) # import correct flax class lowerCamelCase_ =getattr(__snake_case , '''Flax''' + model.__class__.__name__ ) # load flax weight dict with open(__snake_case , '''rb''' ) as state_f: try: lowerCamelCase_ =from_bytes(__snake_case , state_f.read() ) except UnpicklingError: raise EnvironmentError(F'''Unable to convert {flax_checkpoint_path} to Flax deserializable object. ''' ) return load_flax_weights_in_pytorch_model(__snake_case , __snake_case ) def a_ ( __snake_case : Optional[Any] , __snake_case : Union[str, Any] ) -> Optional[int]: """simple docstring""" try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise # check if we have bf16 weights lowerCamelCase_ =flatten_dict(jax.tree_util.tree_map(lambda __snake_case : x.dtype == jnp.bfloataa , __snake_case ) ).values() if any(__snake_case ): # convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( '''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` ''' '''before loading those in PyTorch model.''' ) lowerCamelCase_ =jax.tree_util.tree_map( lambda __snake_case : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __snake_case ) lowerCamelCase_ =flatten_dict(__snake_case ) lowerCamelCase_ =pt_model.state_dict() lowerCamelCase_ =(pt_model.base_model_prefix in flax_state) and ( pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) lowerCamelCase_ =(pt_model.base_model_prefix not in flax_state) and ( pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) # keep track of unexpected & missing keys lowerCamelCase_ =[] lowerCamelCase_ =set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): lowerCamelCase_ =flax_key_tuple[0] == pt_model.base_model_prefix lowerCamelCase_ ='''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict # adapt flax_key to prepare for loading from/to base model only if load_model_with_head_into_base_model and has_base_model_prefix: lowerCamelCase_ =flax_key_tuple[1:] elif load_base_model_into_model_with_head and require_base_model_prefix: lowerCamelCase_ =(pt_model.base_model_prefix,) + flax_key_tuple # rename flax weights to PyTorch format if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(__snake_case ) not in pt_model_dict: # conv layer lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',) lowerCamelCase_ =jnp.transpose(__snake_case , (3, 2, 0, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(__snake_case ) not in pt_model_dict: # linear layer lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',) lowerCamelCase_ =flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',) # adding batch stats from flax batch norm to pt elif "mean" in flax_key_tuple[-1]: lowerCamelCase_ =flax_key_tuple[:-1] + ('''running_mean''',) elif "var" in flax_key_tuple[-1]: lowerCamelCase_ =flax_key_tuple[:-1] + ('''running_var''',) if "batch_stats" in flax_state: lowerCamelCase_ ='''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header else: lowerCamelCase_ ='''.'''.join(__snake_case ) # We also need to look at `pt_model_dict` and see if there are keys requiring further transformation. lowerCamelCase_ ={} # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 for key in pt_model_dict: lowerCamelCase_ =key.split('''.''' ) lowerCamelCase_ =None if key_components[-3::2] == ["parametrizations", "original0"]: lowerCamelCase_ =key_components[-2] + '''_g''' elif key_components[-3::2] == ["parametrizations", "original1"]: lowerCamelCase_ =key_components[-2] + '''_v''' if name is not None: lowerCamelCase_ =key_components[:-3] + [name] lowerCamelCase_ ='''.'''.join(__snake_case ) lowerCamelCase_ =key if flax_key in special_pt_names: lowerCamelCase_ =special_pt_names[flax_key] if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( F'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected ''' F'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) else: # add weight to pytorch dict lowerCamelCase_ =np.asarray(__snake_case ) if not isinstance(__snake_case , np.ndarray ) else flax_tensor lowerCamelCase_ =torch.from_numpy(__snake_case ) # remove from missing keys missing_keys.remove(__snake_case ) else: # weight is not expected by PyTorch model unexpected_keys.append(__snake_case ) pt_model.load_state_dict(__snake_case ) # re-transform missing_keys to list lowerCamelCase_ =list(__snake_case ) if len(__snake_case ) > 0: logger.warning( '''Some weights of the Flax model were not used when initializing the PyTorch model''' F''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing''' F''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture''' ''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This''' F''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect''' ''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a''' ''' FlaxBertForSequenceClassification model).''' ) else: logger.warning(F'''All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n''' ) if len(__snake_case ) > 0: logger.warning( F'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly''' F''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to''' ''' use it for predictions and inference.''' ) else: logger.warning( F'''All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n''' '''If your task is similar to the task the model of the checkpoint was trained on, ''' F'''you can already use {pt_model.__class__.__name__} for predictions without further training.''' ) return pt_model
676
1
'''simple docstring''' from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_outputs import ( BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import logging from .configuration_regnet import RegNetConfig a_ : int = logging.get_logger(__name__) # General docstring a_ : List[Any] = """RegNetConfig""" # Base docstring a_ : Dict = """facebook/regnet-y-040""" a_ : str = [1, 10_88, 7, 7] # Image classification docstring a_ : int = """facebook/regnet-y-040""" a_ : Any = """tabby, tabby cat""" a_ : Optional[Any] = [ """facebook/regnet-y-040""", # See all regnet models at https://huggingface.co/models?filter=regnet ] class __UpperCamelCase ( nn.Module ): def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = 3, lowerCAmelCase = 1, lowerCAmelCase = 1, lowerCAmelCase = "relu", ): """simple docstring""" super().__init__() lowerCamelCase_ =nn.Convad( lowerCAmelCase, lowerCAmelCase, kernel_size=lowerCAmelCase, stride=lowerCAmelCase, padding=kernel_size // 2, groups=lowerCAmelCase, bias=lowerCAmelCase, ) lowerCamelCase_ =nn.BatchNormad(lowerCAmelCase ) lowerCamelCase_ =ACTaFN[activation] if activation is not None else nn.Identity() def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.convolution(lowerCAmelCase ) lowerCamelCase_ =self.normalization(lowerCAmelCase ) lowerCamelCase_ =self.activation(lowerCAmelCase ) return hidden_state class __UpperCamelCase ( nn.Module ): def __init__( self, lowerCAmelCase ): """simple docstring""" super().__init__() lowerCamelCase_ =RegNetConvLayer( config.num_channels, config.embedding_size, kernel_size=3, stride=2, activation=config.hidden_act ) lowerCamelCase_ =config.num_channels def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( '''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' ) lowerCamelCase_ =self.embedder(lowerCAmelCase ) return hidden_state class __UpperCamelCase ( nn.Module ): def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = 2 ): """simple docstring""" super().__init__() lowerCamelCase_ =nn.Convad(lowerCAmelCase, lowerCAmelCase, kernel_size=1, stride=lowerCAmelCase, bias=lowerCAmelCase ) lowerCamelCase_ =nn.BatchNormad(lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.convolution(lowerCAmelCase ) lowerCamelCase_ =self.normalization(lowerCAmelCase ) return hidden_state class __UpperCamelCase ( nn.Module ): def __init__( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" super().__init__() lowerCamelCase_ =nn.AdaptiveAvgPoolad((1, 1) ) lowerCamelCase_ =nn.Sequential( nn.Convad(lowerCAmelCase, lowerCAmelCase, kernel_size=1 ), nn.ReLU(), nn.Convad(lowerCAmelCase, lowerCAmelCase, kernel_size=1 ), nn.Sigmoid(), ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.pooler(lowerCAmelCase ) lowerCamelCase_ =self.attention(lowerCAmelCase ) lowerCamelCase_ =hidden_state * attention return hidden_state class __UpperCamelCase ( nn.Module ): def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = 1 ): """simple docstring""" super().__init__() lowerCamelCase_ =in_channels != out_channels or stride != 1 lowerCamelCase_ =max(1, out_channels // config.groups_width ) lowerCamelCase_ =( RegNetShortCut(lowerCAmelCase, lowerCAmelCase, stride=lowerCAmelCase ) if should_apply_shortcut else nn.Identity() ) lowerCamelCase_ =nn.Sequential( RegNetConvLayer(lowerCAmelCase, lowerCAmelCase, kernel_size=1, activation=config.hidden_act ), RegNetConvLayer(lowerCAmelCase, lowerCAmelCase, stride=lowerCAmelCase, groups=lowerCAmelCase, activation=config.hidden_act ), RegNetConvLayer(lowerCAmelCase, lowerCAmelCase, kernel_size=1, activation=lowerCAmelCase ), ) lowerCamelCase_ =ACTaFN[config.hidden_act] def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =hidden_state lowerCamelCase_ =self.layer(lowerCAmelCase ) lowerCamelCase_ =self.shortcut(lowerCAmelCase ) hidden_state += residual lowerCamelCase_ =self.activation(lowerCAmelCase ) return hidden_state class __UpperCamelCase ( nn.Module ): def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = 1 ): """simple docstring""" super().__init__() lowerCamelCase_ =in_channels != out_channels or stride != 1 lowerCamelCase_ =max(1, out_channels // config.groups_width ) lowerCamelCase_ =( RegNetShortCut(lowerCAmelCase, lowerCAmelCase, stride=lowerCAmelCase ) if should_apply_shortcut else nn.Identity() ) lowerCamelCase_ =nn.Sequential( RegNetConvLayer(lowerCAmelCase, lowerCAmelCase, kernel_size=1, activation=config.hidden_act ), RegNetConvLayer(lowerCAmelCase, lowerCAmelCase, stride=lowerCAmelCase, groups=lowerCAmelCase, activation=config.hidden_act ), RegNetSELayer(lowerCAmelCase, reduced_channels=int(round(in_channels / 4 ) ) ), RegNetConvLayer(lowerCAmelCase, lowerCAmelCase, kernel_size=1, activation=lowerCAmelCase ), ) lowerCamelCase_ =ACTaFN[config.hidden_act] def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =hidden_state lowerCamelCase_ =self.layer(lowerCAmelCase ) lowerCamelCase_ =self.shortcut(lowerCAmelCase ) hidden_state += residual lowerCamelCase_ =self.activation(lowerCAmelCase ) return hidden_state class __UpperCamelCase ( nn.Module ): def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = 2, lowerCAmelCase = 2, ): """simple docstring""" super().__init__() lowerCamelCase_ =RegNetXLayer if config.layer_type == '''x''' else RegNetYLayer lowerCamelCase_ =nn.Sequential( # downsampling is done in the first layer with stride of 2 layer( lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, stride=lowerCAmelCase, ), *[layer(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) for _ in range(depth - 1 )], ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.layers(lowerCAmelCase ) return hidden_state class __UpperCamelCase ( nn.Module ): def __init__( self, lowerCAmelCase ): """simple docstring""" super().__init__() lowerCamelCase_ =nn.ModuleList([] ) # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( RegNetStage( lowerCAmelCase, config.embedding_size, config.hidden_sizes[0], stride=2 if config.downsample_in_first_stage else 1, depth=config.depths[0], ) ) lowerCamelCase_ =zip(config.hidden_sizes, config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(lowerCAmelCase, config.depths[1:] ): self.stages.append(RegNetStage(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, depth=lowerCAmelCase ) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = False, lowerCAmelCase = True ): """simple docstring""" lowerCamelCase_ =() if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: lowerCamelCase_ =hidden_states + (hidden_state,) lowerCamelCase_ =stage_module(lowerCAmelCase ) if output_hidden_states: lowerCamelCase_ =hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=lowerCAmelCase, hidden_states=lowerCAmelCase ) class __UpperCamelCase ( lowerCamelCase__ ): lowercase : List[str] =RegNetConfig lowercase : Tuple ='regnet' lowercase : int ='pixel_values' lowercase : int =True def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" if isinstance(lowerCAmelCase, nn.Convad ): nn.init.kaiming_normal_(module.weight, mode='''fan_out''', nonlinearity='''relu''' ) elif isinstance(lowerCAmelCase, (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight, 1 ) nn.init.constant_(module.bias, 0 ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=False ): """simple docstring""" if isinstance(lowerCAmelCase, lowerCAmelCase ): lowerCamelCase_ =value a_ : Tuple = R""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ a_ : Union[str, Any] = R""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConvNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( 'The bare RegNet model outputting raw features without any specific head on top.' , lowerCamelCase__ , ) # Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, lowerCAmelCase ): """simple docstring""" super().__init__(lowerCAmelCase ) lowerCamelCase_ =config lowerCamelCase_ =RegNetEmbeddings(lowerCAmelCase ) lowerCamelCase_ =RegNetEncoder(lowerCAmelCase ) lowerCamelCase_ =nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowerCAmelCase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=lowerCAmelCase, config_class=_CONFIG_FOR_DOC, modality='''vision''', expected_output=_EXPECTED_OUTPUT_SHAPE, ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None ): """simple docstring""" lowerCamelCase_ =( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowerCamelCase_ =return_dict if return_dict is not None else self.config.use_return_dict lowerCamelCase_ =self.embedder(lowerCAmelCase ) lowerCamelCase_ =self.encoder( lowerCAmelCase, output_hidden_states=lowerCAmelCase, return_dict=lowerCAmelCase ) lowerCamelCase_ =encoder_outputs[0] lowerCamelCase_ =self.pooler(lowerCAmelCase ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=lowerCAmelCase, pooler_output=lowerCAmelCase, hidden_states=encoder_outputs.hidden_states, ) @add_start_docstrings( '\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , lowerCamelCase__ , ) # Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, lowerCAmelCase ): """simple docstring""" super().__init__(lowerCAmelCase ) lowerCamelCase_ =config.num_labels lowerCamelCase_ =RegNetModel(lowerCAmelCase ) # classification head lowerCamelCase_ =nn.Sequential( nn.Flatten(), nn.Linear(config.hidden_sizes[-1], config.num_labels ) if config.num_labels > 0 else nn.Identity(), ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowerCAmelCase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=lowerCAmelCase, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, ) def lowercase__ ( self, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, ): """simple docstring""" lowerCamelCase_ =return_dict if return_dict is not None else self.config.use_return_dict lowerCamelCase_ =self.regnet(lowerCAmelCase, output_hidden_states=lowerCAmelCase, return_dict=lowerCAmelCase ) lowerCamelCase_ =outputs.pooler_output if return_dict else outputs[1] lowerCamelCase_ =self.classifier(lowerCAmelCase ) lowerCamelCase_ =None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: lowerCamelCase_ ='''regression''' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): lowerCamelCase_ ='''single_label_classification''' else: lowerCamelCase_ ='''multi_label_classification''' if self.config.problem_type == "regression": lowerCamelCase_ =MSELoss() if self.num_labels == 1: lowerCamelCase_ =loss_fct(logits.squeeze(), labels.squeeze() ) else: lowerCamelCase_ =loss_fct(lowerCAmelCase, lowerCAmelCase ) elif self.config.problem_type == "single_label_classification": lowerCamelCase_ =CrossEntropyLoss() lowerCamelCase_ =loss_fct(logits.view(-1, self.num_labels ), labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": lowerCamelCase_ =BCEWithLogitsLoss() lowerCamelCase_ =loss_fct(lowerCAmelCase, lowerCAmelCase ) if not return_dict: lowerCamelCase_ =(logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase, logits=lowerCAmelCase, hidden_states=outputs.hidden_states )
676
'''simple docstring''' def a_ ( __snake_case : str , __snake_case : str ) -> str: """simple docstring""" lowerCamelCase_ =len(__snake_case ) lowerCamelCase_ =len(__snake_case ) lowerCamelCase_ =( first_str_length if first_str_length > second_str_length else second_str_length ) lowerCamelCase_ =[] for char_count in range(__snake_case ): if char_count < first_str_length: output_list.append(first_str[char_count] ) if char_count < second_str_length: output_list.append(second_str[char_count] ) return "".join(__snake_case ) if __name__ == "__main__": print(alternative_string_arrange("""AB""", """XYZ"""), end=""" """)
676
1
'''simple docstring''' import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def a_ ( __snake_case : Optional[Any] , __snake_case : Any=1 ) -> Any: """simple docstring""" if n_shave_prefix_segments >= 0: return ".".join(path.split('''.''' )[n_shave_prefix_segments:] ) else: return ".".join(path.split('''.''' )[:n_shave_prefix_segments] ) def a_ ( __snake_case : Optional[int] , __snake_case : Optional[int]=0 ) -> str: """simple docstring""" lowerCamelCase_ =[] for old_item in old_list: lowerCamelCase_ =old_item.replace('''in_layers.0''' , '''norm1''' ) lowerCamelCase_ =new_item.replace('''in_layers.2''' , '''conv1''' ) lowerCamelCase_ =new_item.replace('''out_layers.0''' , '''norm2''' ) lowerCamelCase_ =new_item.replace('''out_layers.3''' , '''conv2''' ) lowerCamelCase_ =new_item.replace('''emb_layers.1''' , '''time_emb_proj''' ) lowerCamelCase_ =new_item.replace('''skip_connection''' , '''conv_shortcut''' ) lowerCamelCase_ =shave_segments(__snake_case , n_shave_prefix_segments=__snake_case ) mapping.append({'''old''': old_item, '''new''': new_item} ) return mapping def a_ ( __snake_case : List[str] , __snake_case : str=0 ) -> Tuple: """simple docstring""" lowerCamelCase_ =[] for old_item in old_list: lowerCamelCase_ =old_item lowerCamelCase_ =new_item.replace('''norm.weight''' , '''group_norm.weight''' ) lowerCamelCase_ =new_item.replace('''norm.bias''' , '''group_norm.bias''' ) lowerCamelCase_ =new_item.replace('''proj_out.weight''' , '''proj_attn.weight''' ) lowerCamelCase_ =new_item.replace('''proj_out.bias''' , '''proj_attn.bias''' ) lowerCamelCase_ =shave_segments(__snake_case , n_shave_prefix_segments=__snake_case ) mapping.append({'''old''': old_item, '''new''': new_item} ) return mapping def a_ ( __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : str , __snake_case : Dict=None , __snake_case : Optional[int]=None , __snake_case : Dict=None ) -> Optional[int]: """simple docstring""" assert isinstance(__snake_case , __snake_case ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): lowerCamelCase_ =old_checkpoint[path] lowerCamelCase_ =old_tensor.shape[0] // 3 lowerCamelCase_ =(-1, channels) if len(old_tensor.shape ) == 3 else (-1) lowerCamelCase_ =old_tensor.shape[0] // config['''num_head_channels'''] // 3 lowerCamelCase_ =old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =old_tensor.split(channels // num_heads , dim=1 ) lowerCamelCase_ =query.reshape(__snake_case ) lowerCamelCase_ =key.reshape(__snake_case ) lowerCamelCase_ =value.reshape(__snake_case ) for path in paths: lowerCamelCase_ =path['''new'''] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here lowerCamelCase_ =new_path.replace('''middle_block.0''' , '''mid_block.resnets.0''' ) lowerCamelCase_ =new_path.replace('''middle_block.1''' , '''mid_block.attentions.0''' ) lowerCamelCase_ =new_path.replace('''middle_block.2''' , '''mid_block.resnets.1''' ) if additional_replacements is not None: for replacement in additional_replacements: lowerCamelCase_ =new_path.replace(replacement['''old'''] , replacement['''new'''] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: lowerCamelCase_ =old_checkpoint[path['''old''']][:, :, 0] else: lowerCamelCase_ =old_checkpoint[path['''old''']] def a_ ( __snake_case : Union[str, Any] , __snake_case : List[str] ) -> Dict: """simple docstring""" lowerCamelCase_ ={} lowerCamelCase_ =checkpoint['''time_embed.0.weight'''] lowerCamelCase_ =checkpoint['''time_embed.0.bias'''] lowerCamelCase_ =checkpoint['''time_embed.2.weight'''] lowerCamelCase_ =checkpoint['''time_embed.2.bias'''] lowerCamelCase_ =checkpoint['''input_blocks.0.0.weight'''] lowerCamelCase_ =checkpoint['''input_blocks.0.0.bias'''] lowerCamelCase_ =checkpoint['''out.0.weight'''] lowerCamelCase_ =checkpoint['''out.0.bias'''] lowerCamelCase_ =checkpoint['''out.2.weight'''] lowerCamelCase_ =checkpoint['''out.2.bias'''] # Retrieves the keys for the input blocks only lowerCamelCase_ =len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''input_blocks''' in layer} ) lowerCamelCase_ ={ layer_id: [key for key in checkpoint if F'''input_blocks.{layer_id}''' in key] for layer_id in range(__snake_case ) } # Retrieves the keys for the middle blocks only lowerCamelCase_ =len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''middle_block''' in layer} ) lowerCamelCase_ ={ layer_id: [key for key in checkpoint if F'''middle_block.{layer_id}''' in key] for layer_id in range(__snake_case ) } # Retrieves the keys for the output blocks only lowerCamelCase_ =len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''output_blocks''' in layer} ) lowerCamelCase_ ={ layer_id: [key for key in checkpoint if F'''output_blocks.{layer_id}''' in key] for layer_id in range(__snake_case ) } for i in range(1 , __snake_case ): lowerCamelCase_ =(i - 1) // (config['''num_res_blocks'''] + 1) lowerCamelCase_ =(i - 1) % (config['''num_res_blocks'''] + 1) lowerCamelCase_ =[key for key in input_blocks[i] if F'''input_blocks.{i}.0''' in key] lowerCamelCase_ =[key for key in input_blocks[i] if F'''input_blocks.{i}.1''' in key] if F'''input_blocks.{i}.0.op.weight''' in checkpoint: lowerCamelCase_ =checkpoint[ F'''input_blocks.{i}.0.op.weight''' ] lowerCamelCase_ =checkpoint[ F'''input_blocks.{i}.0.op.bias''' ] continue lowerCamelCase_ =renew_resnet_paths(__snake_case ) lowerCamelCase_ ={'''old''': F'''input_blocks.{i}.0''', '''new''': F'''down_blocks.{block_id}.resnets.{layer_in_block_id}'''} lowerCamelCase_ ={'''old''': '''resnets.2.op''', '''new''': '''downsamplers.0.op'''} assign_to_checkpoint( __snake_case , __snake_case , __snake_case , additional_replacements=[meta_path, resnet_op] , config=__snake_case ) if len(__snake_case ): lowerCamelCase_ =renew_attention_paths(__snake_case ) lowerCamelCase_ ={ '''old''': F'''input_blocks.{i}.1''', '''new''': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}''', } lowerCamelCase_ ={ F'''input_blocks.{i}.1.qkv.bias''': { '''key''': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''', '''query''': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''', '''value''': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''', }, F'''input_blocks.{i}.1.qkv.weight''': { '''key''': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''', '''query''': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''', '''value''': F'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''', }, } assign_to_checkpoint( __snake_case , __snake_case , __snake_case , additional_replacements=[meta_path] , attention_paths_to_split=__snake_case , config=__snake_case , ) lowerCamelCase_ =middle_blocks[0] lowerCamelCase_ =middle_blocks[1] lowerCamelCase_ =middle_blocks[2] lowerCamelCase_ =renew_resnet_paths(__snake_case ) assign_to_checkpoint(__snake_case , __snake_case , __snake_case , config=__snake_case ) lowerCamelCase_ =renew_resnet_paths(__snake_case ) assign_to_checkpoint(__snake_case , __snake_case , __snake_case , config=__snake_case ) lowerCamelCase_ =renew_attention_paths(__snake_case ) lowerCamelCase_ ={ '''middle_block.1.qkv.bias''': { '''key''': '''mid_block.attentions.0.key.bias''', '''query''': '''mid_block.attentions.0.query.bias''', '''value''': '''mid_block.attentions.0.value.bias''', }, '''middle_block.1.qkv.weight''': { '''key''': '''mid_block.attentions.0.key.weight''', '''query''': '''mid_block.attentions.0.query.weight''', '''value''': '''mid_block.attentions.0.value.weight''', }, } assign_to_checkpoint( __snake_case , __snake_case , __snake_case , attention_paths_to_split=__snake_case , config=__snake_case ) for i in range(__snake_case ): lowerCamelCase_ =i // (config['''num_res_blocks'''] + 1) lowerCamelCase_ =i % (config['''num_res_blocks'''] + 1) lowerCamelCase_ =[shave_segments(__snake_case , 2 ) for name in output_blocks[i]] lowerCamelCase_ ={} for layer in output_block_layers: lowerCamelCase_, lowerCamelCase_ =layer.split('''.''' )[0], shave_segments(__snake_case , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(__snake_case ) else: lowerCamelCase_ =[layer_name] if len(__snake_case ) > 1: lowerCamelCase_ =[key for key in output_blocks[i] if F'''output_blocks.{i}.0''' in key] lowerCamelCase_ =[key for key in output_blocks[i] if F'''output_blocks.{i}.1''' in key] lowerCamelCase_ =renew_resnet_paths(__snake_case ) lowerCamelCase_ =renew_resnet_paths(__snake_case ) lowerCamelCase_ ={'''old''': F'''output_blocks.{i}.0''', '''new''': F'''up_blocks.{block_id}.resnets.{layer_in_block_id}'''} assign_to_checkpoint(__snake_case , __snake_case , __snake_case , additional_replacements=[meta_path] , config=__snake_case ) if ["conv.weight", "conv.bias"] in output_block_list.values(): lowerCamelCase_ =list(output_block_list.values() ).index(['''conv.weight''', '''conv.bias'''] ) lowerCamelCase_ =checkpoint[ F'''output_blocks.{i}.{index}.conv.weight''' ] lowerCamelCase_ =checkpoint[ F'''output_blocks.{i}.{index}.conv.bias''' ] # Clear attentions as they have been attributed above. if len(__snake_case ) == 2: lowerCamelCase_ =[] if len(__snake_case ): lowerCamelCase_ =renew_attention_paths(__snake_case ) lowerCamelCase_ ={ '''old''': F'''output_blocks.{i}.1''', '''new''': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}''', } lowerCamelCase_ ={ F'''output_blocks.{i}.1.qkv.bias''': { '''key''': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''', '''query''': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''', '''value''': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''', }, F'''output_blocks.{i}.1.qkv.weight''': { '''key''': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''', '''query''': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''', '''value''': F'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''', }, } assign_to_checkpoint( __snake_case , __snake_case , __snake_case , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('''qkv''' in key for key in attentions ) else None , config=__snake_case , ) else: lowerCamelCase_ =renew_resnet_paths(__snake_case , n_shave_prefix_segments=1 ) for path in resnet_0_paths: lowerCamelCase_ ='''.'''.join(['''output_blocks''', str(__snake_case ), path['''old''']] ) lowerCamelCase_ ='''.'''.join(['''up_blocks''', str(__snake_case ), '''resnets''', str(__snake_case ), path['''new''']] ) lowerCamelCase_ =checkpoint[old_path] return new_checkpoint if __name__ == "__main__": a_ : int = argparse.ArgumentParser() parser.add_argument( """--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert.""" ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help="""The config json file corresponding to the architecture.""", ) parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") a_ : Optional[Any] = parser.parse_args() a_ : Tuple = torch.load(args.checkpoint_path) with open(args.config_file) as f: a_ : str = json.loads(f.read()) a_ : Any = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] a_ : List[Any] = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: a_ : Optional[Any] = DDPMScheduler.from_config("""/""".join(args.checkpoint_path.split("""/""")[:-1])) a_ : List[Any] = VQModel.from_pretrained("""/""".join(args.checkpoint_path.split("""/""")[:-1])) a_ : str = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
676
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ : Any = {"""configuration_timm_backbone""": ["""TimmBackboneConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Optional[Any] = ["""TimmBackbone"""] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys a_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
676
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) a_ : List[Any] = { """configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""], """processing_trocr""": ["""TrOCRProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : List[Any] = [ """TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""", """TrOCRForCausalLM""", """TrOCRPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys a_ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
676
'''simple docstring''' import functools def a_ ( __snake_case : str , __snake_case : str ) -> int: """simple docstring""" lowerCamelCase_ =len(__snake_case ) lowerCamelCase_ =len(__snake_case ) @functools.cache def min_distance(__snake_case : int , __snake_case : int ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa lowerCamelCase_ =int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , __snake_case ) , 1 + min_distance(__snake_case , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
676
1
'''simple docstring''' from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging a_ : Tuple = logging.get_logger(__name__) class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Dict =['pixel_values'] def __init__( self, lowerCAmelCase = True, lowerCAmelCase = 1 / 255, lowerCAmelCase = True, lowerCAmelCase = 8, **lowerCAmelCase, ): """simple docstring""" super().__init__(**lowerCAmelCase ) lowerCamelCase_ =do_rescale lowerCamelCase_ =rescale_factor lowerCamelCase_ =do_pad lowerCamelCase_ =pad_size def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, **lowerCAmelCase ): """simple docstring""" return rescale(lowerCAmelCase, scale=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None ): """simple docstring""" lowerCamelCase_, lowerCamelCase_ =get_image_size(lowerCAmelCase ) lowerCamelCase_ =(old_height // size + 1) * size - old_height lowerCamelCase_ =(old_width // size + 1) * size - old_width return pad(lowerCAmelCase, ((0, pad_height), (0, pad_width)), mode='''symmetric''', data_format=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = ChannelDimension.FIRST, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =do_rescale if do_rescale is not None else self.do_rescale lowerCamelCase_ =rescale_factor if rescale_factor is not None else self.rescale_factor lowerCamelCase_ =do_pad if do_pad is not None else self.do_pad lowerCamelCase_ =pad_size if pad_size is not None else self.pad_size lowerCamelCase_ =make_list_of_images(lowerCAmelCase ) if not valid_images(lowerCAmelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) # All transformations expect numpy arrays. lowerCamelCase_ =[to_numpy_array(lowerCAmelCase ) for image in images] if do_rescale: lowerCamelCase_ =[self.rescale(image=lowerCAmelCase, scale=lowerCAmelCase ) for image in images] if do_pad: lowerCamelCase_ =[self.pad(lowerCAmelCase, size=lowerCAmelCase ) for image in images] lowerCamelCase_ =[to_channel_dimension_format(lowerCAmelCase, lowerCAmelCase ) for image in images] lowerCamelCase_ ={'''pixel_values''': images} return BatchFeature(data=lowerCAmelCase, tensor_type=lowerCAmelCase )
676
'''simple docstring''' def a_ ( __snake_case : int ) -> bool: """simple docstring""" if not isinstance(__snake_case , __snake_case ): lowerCamelCase_ =F'''Input value of [number={number}] must be an integer''' raise TypeError(__snake_case ) if number < 0: return False lowerCamelCase_ =number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
676
1
'''simple docstring''' import argparse import torch # Step 1. clone https://github.com/microsoft/unilm # Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd # Step 3. cd unilm # Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink # import classes from unilm.wavlm.WavLM import WavLM as WavLMOrig from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig from transformers import WavLMConfig, WavLMModel, logging logging.set_verbosity_info() a_ : List[Any] = logging.get_logger(__name__) a_ : Any = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""", """self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""", """self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """ctc_proj""", """mask_emb""": """masked_spec_embed""", } a_ : str = [ """ctc_proj""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", ] def a_ ( __snake_case : Dict , __snake_case : int , __snake_case : str , __snake_case : List[str] , __snake_case : Dict ) -> Any: """simple docstring""" for attribute in key.split('''.''' ): lowerCamelCase_ =getattr(__snake_case , __snake_case ) if weight_type is not None: lowerCamelCase_ =getattr(__snake_case , __snake_case ).shape else: lowerCamelCase_ =hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": lowerCamelCase_ =value elif weight_type == "weight_g": lowerCamelCase_ =value elif weight_type == "weight_v": lowerCamelCase_ =value elif weight_type == "bias": lowerCamelCase_ =value else: lowerCamelCase_ =value logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def a_ ( __snake_case : Optional[int] , __snake_case : Dict ) -> List[Any]: """simple docstring""" lowerCamelCase_ =[] lowerCamelCase_ =fairseq_model.state_dict() lowerCamelCase_ =hf_model.feature_extractor for name, value in fairseq_dict.items(): lowerCamelCase_ =False if "conv_layers" in name: load_conv_layer( __snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == '''group''' , ) lowerCamelCase_ =True else: for key, mapped_key in MAPPING.items(): if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: lowerCamelCase_ =True if "*" in mapped_key: lowerCamelCase_ =name.split(__snake_case )[0].split('''.''' )[-2] lowerCamelCase_ =mapped_key.replace('''*''' , __snake_case ) if "weight_g" in name: lowerCamelCase_ ='''weight_g''' elif "weight_v" in name: lowerCamelCase_ ='''weight_v''' elif "bias" in name and "relative_attention_bias" not in name: lowerCamelCase_ ='''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj lowerCamelCase_ ='''weight''' else: lowerCamelCase_ =None set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) continue if not is_used: unused_weights.append(__snake_case ) logger.warning(F'''Unused weights: {unused_weights}''' ) def a_ ( __snake_case : Optional[Any] , __snake_case : Optional[int] , __snake_case : int , __snake_case : List[Any] , __snake_case : List[Any] ) -> Any: """simple docstring""" lowerCamelCase_ =full_name.split('''conv_layers.''' )[-1] lowerCamelCase_ =name.split('''.''' ) lowerCamelCase_ =int(items[0] ) lowerCamelCase_ =int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) lowerCamelCase_ =value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) lowerCamelCase_ =value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) lowerCamelCase_ =value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) lowerCamelCase_ =value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(__snake_case ) @torch.no_grad() def a_ ( __snake_case : Optional[Any] , __snake_case : Tuple , __snake_case : List[str]=None ) -> str: """simple docstring""" # load the pre-trained checkpoints lowerCamelCase_ =torch.load(__snake_case ) lowerCamelCase_ =WavLMConfigOrig(checkpoint['''cfg'''] ) lowerCamelCase_ =WavLMOrig(__snake_case ) model.load_state_dict(checkpoint['''model'''] ) model.eval() if config_path is not None: lowerCamelCase_ =WavLMConfig.from_pretrained(__snake_case ) else: lowerCamelCase_ =WavLMConfig() lowerCamelCase_ =WavLMModel(__snake_case ) recursively_load_weights(__snake_case , __snake_case ) hf_wavlm.save_pretrained(__snake_case ) if __name__ == "__main__": a_ : List[str] = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") a_ : Dict = parser.parse_args() convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
676
'''simple docstring''' from __future__ import annotations a_ : int = list[list[int]] # assigning initial values to the grid a_ : Matrix = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution a_ : Matrix = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def a_ ( __snake_case : Matrix , __snake_case : int , __snake_case : int , __snake_case : int ) -> bool: """simple docstring""" for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def a_ ( __snake_case : Matrix ) -> tuple[int, int] | None: """simple docstring""" for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def a_ ( __snake_case : Matrix ) -> Matrix | None: """simple docstring""" if location := find_empty_location(__snake_case ): lowerCamelCase_, lowerCamelCase_ =location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(__snake_case , __snake_case , __snake_case , __snake_case ): lowerCamelCase_ =digit if sudoku(__snake_case ) is not None: return grid lowerCamelCase_ =0 return None def a_ ( __snake_case : Matrix ) -> None: """simple docstring""" for row in grid: for cell in row: print(__snake_case , end=''' ''' ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print("""\nExample grid:\n""" + """=""" * 20) print_solution(example_grid) print("""\nExample grid solution:""") a_ : Union[str, Any] = sudoku(example_grid) if solution is not None: print_solution(solution) else: print("""Cannot find a solution.""")
676
1
'''simple docstring''' import os import time import numpy as np import onnxruntime as ort a_ : List[str] = """1""" a_ : Tuple = """0""" a_ : Dict = """1""" a_ : List[str] = ort.SessionOptions() a_ : Optional[Any] = ort.GraphOptimizationLevel.ORT_DISABLE_ALL print("""Create inference session...""") a_ : Optional[int] = ["""TensorrtExecutionProvider""", """CUDAExecutionProvider"""] a_ : Dict = ort.InferenceSession("""model.onnx""", sess_options=sess_opt, providers=execution_provider) a_ : Optional[int] = ort.RunOptions() a_ : List[str] = 1_28 a_ : Union[str, Any] = 1 a_ : Tuple = np.ones((batch, sequence), dtype=np.intaa) a_ : Union[str, Any] = np.ones((batch, sequence), dtype=np.intaa) a_ : List[Any] = np.ones((batch, sequence), dtype=np.intaa) print("""Warm up phase...""") sess.run( None, { sess.get_inputs()[0].name: input_ids, sess.get_inputs()[1].name: attention_mask, sess.get_inputs()[2].name: token_type_ids, }, run_options=run_opt, ) print("""Start inference...""") a_ : Dict = time.time() a_ : str = 20_00 a_ : Union[str, Any] = {} for iter in range(max_iters): a_ : Optional[int] = sess.run( None, { sess.get_inputs()[0].name: input_ids, sess.get_inputs()[1].name: attention_mask, sess.get_inputs()[2].name: token_type_ids, }, run_options=run_opt, ) print("""Average Inference Time = {:.3f} ms""".format((time.time() - start_time) * 10_00 / max_iters))
676
'''simple docstring''' from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : Union[str, Any] = logging.get_logger(__name__) a_ : Tuple = { """huggingface/informer-tourism-monthly""": ( """https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json""" ), # See all Informer models at https://huggingface.co/models?filter=informer } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Union[str, Any] ='informer' lowercase : Union[str, Any] ={ 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', 'num_hidden_layers': 'encoder_layers', } def __init__( self, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = "student_t", lowerCAmelCase = "nll", lowerCAmelCase = 1, lowerCAmelCase = None, lowerCAmelCase = "mean", lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = 64, lowerCAmelCase = 32, lowerCAmelCase = 32, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = True, lowerCAmelCase = "gelu", lowerCAmelCase = 0.0_5, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 100, lowerCAmelCase = 0.0_2, lowerCAmelCase=True, lowerCAmelCase = "prob", lowerCAmelCase = 5, lowerCAmelCase = True, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =prediction_length lowerCamelCase_ =context_length or prediction_length lowerCamelCase_ =distribution_output lowerCamelCase_ =loss lowerCamelCase_ =input_size lowerCamelCase_ =num_time_features lowerCamelCase_ =lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7] lowerCamelCase_ =scaling lowerCamelCase_ =num_dynamic_real_features lowerCamelCase_ =num_static_real_features lowerCamelCase_ =num_static_categorical_features # set cardinality if cardinality and num_static_categorical_features > 0: if len(lowerCAmelCase ) != num_static_categorical_features: raise ValueError( '''The cardinality should be a list of the same length as `num_static_categorical_features`''' ) lowerCamelCase_ =cardinality else: lowerCamelCase_ =[0] # set embedding_dimension if embedding_dimension and num_static_categorical_features > 0: if len(lowerCAmelCase ) != num_static_categorical_features: raise ValueError( '''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' ) lowerCamelCase_ =embedding_dimension else: lowerCamelCase_ =[min(50, (cat + 1) // 2 ) for cat in self.cardinality] lowerCamelCase_ =num_parallel_samples # Transformer architecture configuration lowerCamelCase_ =input_size * len(self.lags_sequence ) + self._number_of_features lowerCamelCase_ =d_model lowerCamelCase_ =encoder_attention_heads lowerCamelCase_ =decoder_attention_heads lowerCamelCase_ =encoder_ffn_dim lowerCamelCase_ =decoder_ffn_dim lowerCamelCase_ =encoder_layers lowerCamelCase_ =decoder_layers lowerCamelCase_ =dropout lowerCamelCase_ =attention_dropout lowerCamelCase_ =activation_dropout lowerCamelCase_ =encoder_layerdrop lowerCamelCase_ =decoder_layerdrop lowerCamelCase_ =activation_function lowerCamelCase_ =init_std lowerCamelCase_ =use_cache # Informer lowerCamelCase_ =attention_type lowerCamelCase_ =sampling_factor lowerCamelCase_ =distil super().__init__(is_encoder_decoder=lowerCAmelCase, **lowerCAmelCase ) @property def lowercase__ ( self ): """simple docstring""" return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
676
1
'''simple docstring''' # HF Trainer benchmarking tool # # This tool can be used to run and compare multiple dimensions of the HF Trainers args. # # It then prints a report once in github format with all the information that needs to be shared # with others and second time in a console-friendly format, so it's easier to use for tuning things up. # # The main idea is: # # ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \ # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \ # --target-metric-key train_samples_per_second # # The variations can be any command line argument that you want to compare and not just dtype as in # the example. # # --variations allows you to compare variations in multiple dimensions. # # as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6 # times adding one of: # # 1. --tf32 0 --fp16 0 # 2. --tf32 0 --fp16 1 # 3. --tf32 0 --bf16 1 # 4. --tf32 1 --fp16 0 # 5. --tf32 1 --fp16 1 # 6. --tf32 1 --bf16 1 # # and print the results. This is just a cartesian product - and more than 2 dimensions can be used. # # If you want to rely on defaults, this: # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' # is identical to this: # --variations '--tf32 0|--tf32 1' '|--fp16|--bf16' # # the leading empty variation in the 2nd dimension is a valid variation. # # So here we get the following 6 variations: # # 1. --tf32 0 # 2. --tf32 0 --fp16 # 3. --tf32 0 --bf16 # 4. --tf32 1 # 5. --tf32 1 --fp16 # 6. --tf32 1 --bf16 # # In this particular case we don't know what the default tf32 setting is as it's normally # pytorch-version dependent). That's why it's best to do an explicit setting of each variation: # `--tf32 0|--tf32 1` # # Here is a full example of a train: # # CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \ # --base-cmd \ # ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \ # --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \ # --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \ # --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \ # --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \ # --source_prefix "translate English to Romanian: " --warmup_steps 50 \ # --max_train_samples 20000 --dataloader_num_workers 2 ' \ # --target-metric-key train_samples_per_second --repeat-times 1 --variations \ # '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \ # --repeat-times 1 --base-variation '--tf32 0' # # and here is a possible output: # # # | Variation | Train | Diff | Train | # | | samples | % | loss | # | | per | | | # | | second | | | # |:----------------|----------:|-------:|--------:| # | --tf32 0 | 285.11 | 0 | 2.51 | # | --tf32 1 | 342.09 | 20 | 2.51 | # | --fp16 --tf32 0 | 423.49 | 49 | 2.51 | # | --fp16 --tf32 1 | 423.13 | 48 | 2.51 | # | --bf16 --tf32 0 | 416.80 | 46 | 2.52 | # | --bf16 --tf32 1 | 415.87 | 46 | 2.52 | # # # So you can quickly compare the different outcomes. # # Typically running each experiment once is enough, but if the environment is unstable you can # re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results. # # By default it'll use the lowest result as the base line to use as 100% and then compare the rest to # it as can be seen from the table above, but you can also specify which combination is the one to use as # the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0' # # --target-metric-key is there to tell the program which metrics to compare - the different metric keys are # inside output_dir/all_results.json. e.g., to measure eval performance instead of train use: # --target-metric-key eval_samples_per_second # but of course you will need to adjust the --base-cmd value in the example to perform evaluation as # well (as currently it doesn't) # import argparse import datetime import io import itertools import json import math import os import platform import re import shlex import subprocess import sys from pathlib import Path from statistics import fmean import pandas as pd import torch from tqdm import tqdm import transformers a_ : List[Any] = float("""nan""") class __UpperCamelCase : def __init__( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =sys.stdout lowerCamelCase_ =open(lowerCAmelCase, '''a''' ) def __getattr__( self, lowerCAmelCase ): """simple docstring""" return getattr(self.stdout, lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" self.stdout.write(lowerCAmelCase ) # strip tqdm codes self.file.write(re.sub(R'''^.*\r''', '''''', lowerCAmelCase, 0, re.M ) ) def a_ ( __snake_case : List[Any]=80 , __snake_case : Optional[int]=False ) -> int: """simple docstring""" lowerCamelCase_ =[] # deal with critical env vars lowerCamelCase_ =['''CUDA_VISIBLE_DEVICES'''] for key in env_keys: lowerCamelCase_ =os.environ.get(__snake_case , __snake_case ) if val is not None: cmd.append(F'''{key}={val}''' ) # python executable (not always needed if the script is executable) lowerCamelCase_ =sys.executable if full_python_path else sys.executable.split('''/''' )[-1] cmd.append(__snake_case ) # now the normal args cmd += list(map(shlex.quote , sys.argv ) ) # split up into up to MAX_WIDTH lines with shell multi-line escapes lowerCamelCase_ =[] lowerCamelCase_ ='''''' while len(__snake_case ) > 0: current_line += F'''{cmd.pop(0 )} ''' if len(__snake_case ) == 0 or len(__snake_case ) + len(cmd[0] ) + 1 > max_width - 1: lines.append(__snake_case ) lowerCamelCase_ ='''''' return "\\\n".join(__snake_case ) def a_ ( __snake_case : str , __snake_case : List[Any] ) -> Optional[int]: """simple docstring""" # unwrap multi-line input lowerCamelCase_ =re.sub(r'''[\\\n]+''' , ''' ''' , args.base_cmd ) # remove --output_dir if any and set our own lowerCamelCase_ =re.sub('''--output_dir\s+[^\s]+''' , '''''' , args.base_cmd ) args.base_cmd += F''' --output_dir {output_dir}''' # ensure we have --overwrite_output_dir lowerCamelCase_ =re.sub('''--overwrite_output_dir\s+''' , '''''' , args.base_cmd ) args.base_cmd += " --overwrite_output_dir" return [sys.executable] + shlex.split(args.base_cmd ) def a_ ( __snake_case : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : List[str] , __snake_case : Union[str, Any] ) -> Any: """simple docstring""" # Enable to debug everything but the run itself, to do it fast and see the progress. # This is useful for debugging the output formatting quickly - we can remove it later once # everybody is happy with the output if 0: import random from time import sleep sleep(0 ) return dict( {k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 1_0.3_1, 1_0_0.2, 5_5.6_6_6_6, 2_2_2.2_2_2_2_2_2_2_2] )} , ) lowerCamelCase_ =subprocess.run(__snake_case , capture_output=__snake_case , text=__snake_case ) if verbose: print('''STDOUT''' , result.stdout ) print('''STDERR''' , result.stderr ) # save the streams lowerCamelCase_ =variation.replace(''' ''' , '''-''' ) with open(Path(__snake_case ) / F'''log.{prefix}.stdout.txt''' , '''w''' ) as f: f.write(result.stdout ) with open(Path(__snake_case ) / F'''log.{prefix}.stderr.txt''' , '''w''' ) as f: f.write(result.stderr ) if result.returncode != 0: if verbose: print('''failed''' ) return {target_metric_key: nan} with io.open(F'''{output_dir}/all_results.json''' , '''r''' , encoding='''utf-8''' ) as f: lowerCamelCase_ =json.load(__snake_case ) # filter out just the keys we want return {k: v for k, v in metrics.items() if k in metric_keys} def a_ ( __snake_case : str , __snake_case : Union[str, Any] , __snake_case : str , __snake_case : Tuple , __snake_case : Tuple , __snake_case : Tuple , __snake_case : str , __snake_case : str , __snake_case : Dict , __snake_case : Union[str, Any] , ) -> int: """simple docstring""" lowerCamelCase_ =[] lowerCamelCase_ =[] lowerCamelCase_ =F'''{id}: {variation:<{longest_variation_len}}''' lowerCamelCase_ =F'''{preamble}: ''' lowerCamelCase_ =set(report_metric_keys + [target_metric_key] ) for i in tqdm(range(__snake_case ) , desc=__snake_case , leave=__snake_case ): lowerCamelCase_ =process_run_single( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) lowerCamelCase_ =single_run_metrics[target_metric_key] if not math.isnan(__snake_case ): metrics.append(__snake_case ) results.append(__snake_case ) outcome += "✓" else: outcome += "✘" lowerCamelCase_ =F'''\33[2K\r{outcome}''' if len(__snake_case ) > 0: lowerCamelCase_ ={k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()} lowerCamelCase_ =round(mean_metrics[target_metric_key] , 2 ) lowerCamelCase_ =F'''{outcome} {mean_target}''' if len(__snake_case ) > 1: results_str += F''' {tuple(round(__snake_case , 2 ) for x in results )}''' print(__snake_case ) lowerCamelCase_ =variation return mean_metrics else: print(__snake_case ) return {variation_key: variation, target_metric_key: nan} def a_ ( ) -> List[Any]: """simple docstring""" lowerCamelCase_ =torch.cuda.get_device_properties(torch.device('''cuda''' ) ) return F''' Datetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )} Software: transformers: {transformers.__version__} torch : {torch.__version__} cuda : {torch.version.cuda} python : {platform.python_version()} Hardware: {torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB ''' def a_ ( __snake_case : Any , __snake_case : int , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Union[str, Any] ) -> List[str]: """simple docstring""" lowerCamelCase_ =pd.DataFrame(__snake_case ) lowerCamelCase_ ='''variation''' lowerCamelCase_ ='''diff_%''' lowerCamelCase_ =nan if base_variation is not None and len(df[df[variation_key] == base_variation] ): # this may still return nan lowerCamelCase_ =df.loc[df[variation_key] == base_variation][target_metric_key].item() if math.isnan(__snake_case ): # as a fallback, use the minimal value as the sentinel lowerCamelCase_ =df.loc[df[target_metric_key] != nan][target_metric_key].min() # create diff column if possible if not math.isnan(__snake_case ): lowerCamelCase_ =df.apply( lambda __snake_case : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value ) if not math.isnan(r[target_metric_key] ) else 0 , axis='''columns''' , ) # re-order columns lowerCamelCase_ =[variation_key, target_metric_key, diff_key, *report_metric_keys] lowerCamelCase_ =df.reindex(__snake_case , axis='''columns''' ) # reorder cols # capitalize lowerCamelCase_ =df.rename(str.capitalize , axis='''columns''' ) # make the cols as narrow as possible lowerCamelCase_ =df.rename(lambda __snake_case : c.replace('''_''' , '''<br>''' ) , axis='''columns''' ) lowerCamelCase_ =df.rename(lambda __snake_case : c.replace('''_''' , '''\n''' ) , axis='''columns''' ) lowerCamelCase_ =['''''', '''Copy between the cut-here-lines and paste as is to github or a forum'''] report += ["----------8<-----------------8<--------"] report += ["*** Results:", df_github.to_markdown(index=__snake_case , floatfmt='''.2f''' )] report += ["```"] report += ["*** Setup:", get_versions()] report += ["*** The benchmark command line was:", get_original_command()] report += ["```"] report += ["----------8<-----------------8<--------"] report += ["*** Results (console):", df_console.to_markdown(index=__snake_case , floatfmt='''.2f''' )] print('''\n\n'''.join(__snake_case ) ) def a_ ( ) -> List[Any]: """simple docstring""" lowerCamelCase_ =argparse.ArgumentParser() parser.add_argument( '''--base-cmd''' , default=__snake_case , type=__snake_case , required=__snake_case , help='''Base cmd''' , ) parser.add_argument( '''--variations''' , default=__snake_case , type=__snake_case , nargs='''+''' , required=__snake_case , help='''Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'''' , ) parser.add_argument( '''--base-variation''' , default=__snake_case , type=__snake_case , help='''Baseline variation to compare to. if None the minimal target value will be used to compare against''' , ) parser.add_argument( '''--target-metric-key''' , default=__snake_case , type=__snake_case , required=__snake_case , help='''Target metric key in output_dir/all_results.json, e.g., train_samples_per_second''' , ) parser.add_argument( '''--report-metric-keys''' , default='''''' , type=__snake_case , help='''Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples''' , ) parser.add_argument( '''--repeat-times''' , default=1 , type=__snake_case , help='''How many times to re-run each variation - an average will be reported''' , ) parser.add_argument( '''--output_dir''' , default='''output_benchmark''' , type=__snake_case , help='''The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked''' , ) parser.add_argument( '''--verbose''' , default=__snake_case , action='''store_true''' , help='''Whether to show the outputs of each run or just the benchmark progress''' , ) lowerCamelCase_ =parser.parse_args() lowerCamelCase_ =args.output_dir Path(__snake_case ).mkdir(exist_ok=__snake_case ) lowerCamelCase_ =get_base_command(__snake_case , __snake_case ) # split each dimension into its --foo variations lowerCamelCase_ =[list(map(str.strip , re.split(r'''\|''' , __snake_case ) ) ) for x in args.variations] # build a cartesian product of dimensions and convert those back into cmd-line arg strings, # while stripping white space for inputs that were empty lowerCamelCase_ =list(map(str.strip , map(''' '''.join , itertools.product(*__snake_case ) ) ) ) lowerCamelCase_ =max(len(__snake_case ) for x in variations ) # split wanted keys lowerCamelCase_ =args.report_metric_keys.split() # capture prints into a log file for convenience lowerCamelCase_ =F'''benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S' )}.txt''' print(F'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' ) print(F'''and this script\'s output is also piped into {report_fn}''' ) lowerCamelCase_ =Tee(__snake_case ) print(F'''\n*** Running {len(__snake_case )} benchmarks:''' ) print(F'''Base command: {' '.join(__snake_case )}''' ) lowerCamelCase_ ='''variation''' lowerCamelCase_ =[] for id, variation in enumerate(tqdm(__snake_case , desc='''Total completion: ''' , leave=__snake_case ) ): lowerCamelCase_ =base_cmd + variation.split() results.append( process_run( id + 1 , __snake_case , __snake_case , __snake_case , __snake_case , args.target_metric_key , __snake_case , args.repeat_times , __snake_case , args.verbose , ) ) process_results(__snake_case , args.target_metric_key , __snake_case , args.base_variation , __snake_case ) if __name__ == "__main__": main()
676
'''simple docstring''' from __future__ import annotations def a_ ( __snake_case : int ) -> list[int]: """simple docstring""" lowerCamelCase_ =[True] * limit lowerCamelCase_ =False lowerCamelCase_ =False lowerCamelCase_ =True for i in range(3 , int(limit**0.5 + 1 ) , 2 ): lowerCamelCase_ =i * 2 while index < limit: lowerCamelCase_ =False lowerCamelCase_ =index + i lowerCamelCase_ =[2] for i in range(3 , __snake_case , 2 ): if is_prime[i]: primes.append(__snake_case ) return primes def a_ ( __snake_case : int = 100_0000 ) -> int: """simple docstring""" lowerCamelCase_ =prime_sieve(__snake_case ) lowerCamelCase_ =0 lowerCamelCase_ =0 for i in range(len(__snake_case ) ): for j in range(i + length , len(__snake_case ) ): lowerCamelCase_ =sum(primes[i:j] ) if sol >= ceiling: break if sol in primes: lowerCamelCase_ =j - i lowerCamelCase_ =sol return largest if __name__ == "__main__": print(F"""{solution() = }""")
676
1
'''simple docstring''' import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class __UpperCamelCase : def __init__( self, lowerCAmelCase, lowerCAmelCase=3, lowerCAmelCase=32, lowerCAmelCase=3, lowerCAmelCase=10, lowerCAmelCase=[8, 16, 32, 64], lowerCAmelCase=[1, 1, 2, 1], lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase="relu", lowerCAmelCase=3, lowerCAmelCase=None, lowerCAmelCase=["stage2", "stage3", "stage4"], lowerCAmelCase=[2, 3, 4], lowerCAmelCase=1, ): """simple docstring""" lowerCamelCase_ =parent lowerCamelCase_ =batch_size lowerCamelCase_ =image_size lowerCamelCase_ =num_channels lowerCamelCase_ =embeddings_size lowerCamelCase_ =hidden_sizes lowerCamelCase_ =depths lowerCamelCase_ =is_training lowerCamelCase_ =use_labels lowerCamelCase_ =hidden_act lowerCamelCase_ =num_labels lowerCamelCase_ =scope lowerCamelCase_ =len(lowerCAmelCase ) lowerCamelCase_ =out_features lowerCamelCase_ =out_indices lowerCamelCase_ =num_groups def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase_ =None if self.use_labels: lowerCamelCase_ =ids_tensor([self.batch_size], self.num_labels ) lowerCamelCase_ =self.get_config() return config, pixel_values, labels def lowercase__ ( self ): """simple docstring""" return BitConfig( num_channels=self.num_channels, embeddings_size=self.embeddings_size, hidden_sizes=self.hidden_sizes, depths=self.depths, hidden_act=self.hidden_act, num_labels=self.num_labels, out_features=self.out_features, out_indices=self.out_indices, num_groups=self.num_groups, ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =BitModel(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase ) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.num_labels lowerCamelCase_ =BitForImageClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase, labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =BitBackbone(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ), len(config.out_features ) ) self.parent.assertListEqual(model.channels, config.hidden_sizes[1:] ) # verify backbone works with out_features=None lowerCamelCase_ =None lowerCamelCase_ =BitBackbone(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ), 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ), 1 ) self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]] ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.prepare_config_and_inputs() lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =config_and_inputs lowerCamelCase_ ={'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase : List[str] =(BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () lowercase : Union[str, Any] =( {'feature-extraction': BitModel, 'image-classification': BitForImageClassification} if is_torch_available() else {} ) lowercase : List[Any] =False lowercase : int =False lowercase : Union[str, Any] =False lowercase : Optional[int] =False lowercase : Tuple =False def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =BitModelTester(self ) lowerCamelCase_ =ConfigTester(self, config_class=lowerCAmelCase, has_text_modality=lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowercase__ ( self ): """simple docstring""" return @unittest.skip(reason='''Bit does not output attentions''' ) def lowercase__ ( self ): """simple docstring""" pass @unittest.skip(reason='''Bit does not use inputs_embeds''' ) def lowercase__ ( self ): """simple docstring""" pass @unittest.skip(reason='''Bit does not support input and output embeddings''' ) def lowercase__ ( self ): """simple docstring""" pass def lowercase__ ( self ): """simple docstring""" lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ =model_class(lowerCAmelCase ) lowerCamelCase_ =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase_ =[*signature.parameters.keys()] lowerCamelCase_ =['''pixel_values'''] self.assertListEqual(arg_names[:1], lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ =model_class(config=lowerCAmelCase ) for name, module in model.named_modules(): if isinstance(lowerCAmelCase, (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ), msg=f'''Parameter {name} of model {model_class} seems not properly initialized''', ) self.assertTrue( torch.all(module.bias == 0 ), msg=f'''Parameter {name} of model {model_class} seems not properly initialized''', ) def lowercase__ ( self ): """simple docstring""" def check_hidden_states_output(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): lowerCamelCase_ =model_class(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() with torch.no_grad(): lowerCamelCase_ =model(**self._prepare_for_class(lowerCAmelCase, lowerCAmelCase ) ) lowerCamelCase_ =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCamelCase_ =self.model_tester.num_stages self.assertEqual(len(lowerCAmelCase ), expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], ) lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ =['''preactivation''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: lowerCamelCase_ =layer_type lowerCamelCase_ =True check_hidden_states_output(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase_ =True check_hidden_states_output(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) @unittest.skip(reason='''Bit does not use feedforward chunking''' ) def lowercase__ ( self ): """simple docstring""" pass def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase ) @slow def lowercase__ ( self ): """simple docstring""" for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ =BitModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) def a_ ( ) -> int: """simple docstring""" lowerCamelCase_ =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __UpperCamelCase ( unittest.TestCase ): @cached_property def lowercase__ ( self ): """simple docstring""" return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCAmelCase ) lowerCamelCase_ =self.default_image_processor lowerCamelCase_ =prepare_img() lowerCamelCase_ =image_processor(images=lowerCAmelCase, return_tensors='''pt''' ).to(lowerCAmelCase ) # forward pass with torch.no_grad(): lowerCamelCase_ =model(**lowerCAmelCase ) # verify the logits lowerCamelCase_ =torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape, lowerCAmelCase ) lowerCamelCase_ =torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]] ).to(lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCAmelCase, atol=1e-4 ) ) @require_torch class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ): lowercase : Any =(BitBackbone,) if is_torch_available() else () lowercase : Union[str, Any] =BitConfig lowercase : Union[str, Any] =False def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =BitModelTester(self )
676
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" super().__init__() # make sure scheduler can always be converted to DDIM lowerCamelCase_ =DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=lowerCAmelCase, scheduler=lowerCAmelCase ) @torch.no_grad() def __call__( self, lowerCAmelCase = 1, lowerCAmelCase = None, lowerCAmelCase = 0.0, lowerCAmelCase = 50, lowerCAmelCase = None, lowerCAmelCase = "pil", lowerCAmelCase = True, ): """simple docstring""" if isinstance(self.unet.config.sample_size, lowerCAmelCase ): lowerCamelCase_ =( batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size, ) else: lowerCamelCase_ =(batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) if isinstance(lowerCAmelCase, lowerCAmelCase ) and len(lowerCAmelCase ) != batch_size: raise ValueError( f'''You have passed a list of generators of length {len(lowerCAmelCase )}, but requested an effective batch''' f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) lowerCamelCase_ =randn_tensor(lowerCAmelCase, generator=lowerCAmelCase, device=self.device, dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(lowerCAmelCase ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output lowerCamelCase_ =self.unet(lowerCAmelCase, lowerCAmelCase ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 lowerCamelCase_ =self.scheduler.step( lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, eta=lowerCAmelCase, use_clipped_model_output=lowerCAmelCase, generator=lowerCAmelCase ).prev_sample lowerCamelCase_ =(image / 2 + 0.5).clamp(0, 1 ) lowerCamelCase_ =image.cpu().permute(0, 2, 3, 1 ).numpy() if output_type == "pil": lowerCamelCase_ =self.numpy_to_pil(lowerCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowerCAmelCase )
676
1
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : Tuple = logging.get_logger(__name__) a_ : int = { """microsoft/unispeech-sat-base-100h-libri-ft""": ( """https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json""" ), # See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : List[Any] ='unispeech-sat' def __init__( self, lowerCAmelCase=32, lowerCAmelCase=768, lowerCAmelCase=12, lowerCAmelCase=12, lowerCAmelCase=3_072, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=0.0_2, lowerCAmelCase=1e-5, lowerCAmelCase="group", lowerCAmelCase="gelu", lowerCAmelCase=(512, 512, 512, 512, 512, 512, 512), lowerCAmelCase=(5, 2, 2, 2, 2, 2, 2), lowerCAmelCase=(10, 3, 3, 3, 3, 2, 2), lowerCAmelCase=False, lowerCAmelCase=128, lowerCAmelCase=16, lowerCAmelCase=False, lowerCAmelCase=True, lowerCAmelCase=0.0_5, lowerCAmelCase=10, lowerCAmelCase=2, lowerCAmelCase=0.0, lowerCAmelCase=10, lowerCAmelCase=0, lowerCAmelCase=320, lowerCAmelCase=2, lowerCAmelCase=0.1, lowerCAmelCase=100, lowerCAmelCase=256, lowerCAmelCase=256, lowerCAmelCase=0.1, lowerCAmelCase="mean", lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=256, lowerCAmelCase=(512, 512, 512, 512, 1_500), lowerCAmelCase=(5, 3, 3, 1, 1), lowerCAmelCase=(1, 2, 3, 1, 1), lowerCAmelCase=512, lowerCAmelCase=0, lowerCAmelCase=1, lowerCAmelCase=2, lowerCAmelCase=504, **lowerCAmelCase, ): """simple docstring""" super().__init__(**lowerCAmelCase, pad_token_id=lowerCAmelCase, bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase ) lowerCamelCase_ =hidden_size lowerCamelCase_ =feat_extract_norm lowerCamelCase_ =feat_extract_activation lowerCamelCase_ =list(lowerCAmelCase ) lowerCamelCase_ =list(lowerCAmelCase ) lowerCamelCase_ =list(lowerCAmelCase ) lowerCamelCase_ =conv_bias lowerCamelCase_ =num_conv_pos_embeddings lowerCamelCase_ =num_conv_pos_embedding_groups lowerCamelCase_ =len(self.conv_dim ) lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =intermediate_size lowerCamelCase_ =hidden_act lowerCamelCase_ =num_attention_heads lowerCamelCase_ =hidden_dropout lowerCamelCase_ =attention_dropout lowerCamelCase_ =activation_dropout lowerCamelCase_ =feat_proj_dropout lowerCamelCase_ =final_dropout lowerCamelCase_ =layerdrop lowerCamelCase_ =layer_norm_eps lowerCamelCase_ =initializer_range lowerCamelCase_ =vocab_size lowerCamelCase_ =num_clusters lowerCamelCase_ =do_stable_layer_norm lowerCamelCase_ =use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowerCamelCase_ =apply_spec_augment lowerCamelCase_ =mask_time_prob lowerCamelCase_ =mask_time_length lowerCamelCase_ =mask_time_min_masks lowerCamelCase_ =mask_feature_prob lowerCamelCase_ =mask_feature_length lowerCamelCase_ =mask_feature_min_masks # parameters for pretraining with codevector quantized representations lowerCamelCase_ =num_codevectors_per_group lowerCamelCase_ =num_codevector_groups lowerCamelCase_ =contrastive_logits_temperature lowerCamelCase_ =feat_quantizer_dropout lowerCamelCase_ =num_negatives lowerCamelCase_ =codevector_dim lowerCamelCase_ =proj_codevector_dim lowerCamelCase_ =diversity_loss_weight # ctc loss lowerCamelCase_ =ctc_loss_reduction lowerCamelCase_ =ctc_zero_infinity # SequenceClassification-specific parameter. Feel free to ignore for other classes. lowerCamelCase_ =classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. lowerCamelCase_ =list(lowerCAmelCase ) lowerCamelCase_ =list(lowerCAmelCase ) lowerCamelCase_ =list(lowerCAmelCase ) lowerCamelCase_ =xvector_output_dim @property def lowercase__ ( self ): """simple docstring""" return functools.reduce(operator.mul, self.conv_stride, 1 )
676
'''simple docstring''' from maths.prime_check import is_prime def a_ ( __snake_case : int ) -> int: """simple docstring""" if not isinstance(__snake_case , __snake_case ): lowerCamelCase_ =F'''Input value of [number={number}] must be an integer''' raise TypeError(__snake_case ) if is_prime(__snake_case ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
676
1
'''simple docstring''' def a_ ( __snake_case : int , __snake_case : int ) -> str: """simple docstring""" if a < 0 or b < 0: raise ValueError('''the value of both inputs must be positive''' ) lowerCamelCase_ =str(bin(__snake_case ) )[2:] # remove the leading "0b" lowerCamelCase_ =str(bin(__snake_case ) )[2:] # remove the leading "0b" lowerCamelCase_ =max(len(__snake_case ) , len(__snake_case ) ) return "0b" + "".join( str(int(char_a == '''1''' and char_b == '''1''' ) ) for char_a, char_b in zip(a_binary.zfill(__snake_case ) , b_binary.zfill(__snake_case ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
676
'''simple docstring''' # DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin, SchedulerOutput @dataclass class __UpperCamelCase ( lowerCamelCase__ ): lowercase : torch.FloatTensor lowercase : torch.FloatTensor class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): lowercase : Tuple =1 @register_to_config def __init__( self, lowerCAmelCase = 2_000, lowerCAmelCase = 0.1_5, lowerCAmelCase = 0.0_1, lowerCAmelCase = 1_3_4_8.0, lowerCAmelCase = 1e-5, lowerCAmelCase = 1, ): """simple docstring""" lowerCamelCase_ =sigma_max # setable values lowerCamelCase_ =None self.set_sigmas(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ): """simple docstring""" return sample def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None ): """simple docstring""" lowerCamelCase_ =sampling_eps if sampling_eps is not None else self.config.sampling_eps lowerCamelCase_ =torch.linspace(1, lowerCAmelCase, lowerCAmelCase, device=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None ): """simple docstring""" lowerCamelCase_ =sigma_min if sigma_min is not None else self.config.sigma_min lowerCamelCase_ =sigma_max if sigma_max is not None else self.config.sigma_max lowerCamelCase_ =sampling_eps if sampling_eps is not None else self.config.sampling_eps if self.timesteps is None: self.set_timesteps(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps) lowerCamelCase_ =torch.exp(torch.linspace(math.log(lowerCAmelCase ), math.log(lowerCAmelCase ), lowerCAmelCase ) ) lowerCamelCase_ =torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" return torch.where( timesteps == 0, torch.zeros_like(t.to(timesteps.device ) ), self.discrete_sigmas[timesteps - 1].to(timesteps.device ), ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = True, ): """simple docstring""" if self.timesteps is None: raise ValueError( '''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' ) lowerCamelCase_ =timestep * torch.ones( sample.shape[0], device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0]) lowerCamelCase_ =(timestep * (len(self.timesteps ) - 1)).long() # mps requires indices to be in the same device, so we use cpu as is the default with cuda lowerCamelCase_ =timesteps.to(self.discrete_sigmas.device ) lowerCamelCase_ =self.discrete_sigmas[timesteps].to(sample.device ) lowerCamelCase_ =self.get_adjacent_sigma(lowerCAmelCase, lowerCAmelCase ).to(sample.device ) lowerCamelCase_ =torch.zeros_like(lowerCAmelCase ) lowerCamelCase_ =(sigma**2 - adjacent_sigma**2) ** 0.5 # equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x) # also equation 47 shows the analog from SDE models to ancestral sampling methods lowerCamelCase_ =diffusion.flatten() while len(diffusion.shape ) < len(sample.shape ): lowerCamelCase_ =diffusion.unsqueeze(-1 ) lowerCamelCase_ =drift - diffusion**2 * model_output # equation 6: sample noise for the diffusion term of lowerCamelCase_ =randn_tensor( sample.shape, layout=sample.layout, generator=lowerCAmelCase, device=sample.device, dtype=sample.dtype ) lowerCamelCase_ =sample - drift # subtract because `dt` is a small negative timestep # TODO is the variable diffusion the correct scaling term for the noise? lowerCamelCase_ =prev_sample_mean + diffusion * noise # add impact of diffusion field g if not return_dict: return (prev_sample, prev_sample_mean) return SdeVeOutput(prev_sample=lowerCAmelCase, prev_sample_mean=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = True, ): """simple docstring""" if self.timesteps is None: raise ValueError( '''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' ) # For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z" # sample noise for correction lowerCamelCase_ =randn_tensor(sample.shape, layout=sample.layout, generator=lowerCAmelCase ).to(sample.device ) # compute step size from the model_output, the noise, and the snr lowerCamelCase_ =torch.norm(model_output.reshape(model_output.shape[0], -1 ), dim=-1 ).mean() lowerCamelCase_ =torch.norm(noise.reshape(noise.shape[0], -1 ), dim=-1 ).mean() lowerCamelCase_ =(self.config.snr * noise_norm / grad_norm) ** 2 * 2 lowerCamelCase_ =step_size * torch.ones(sample.shape[0] ).to(sample.device ) # self.repeat_scalar(step_size, sample.shape[0]) # compute corrected sample: model_output term and noise term lowerCamelCase_ =step_size.flatten() while len(step_size.shape ) < len(sample.shape ): lowerCamelCase_ =step_size.unsqueeze(-1 ) lowerCamelCase_ =sample + step_size * model_output lowerCamelCase_ =prev_sample_mean + ((step_size * 2) ** 0.5) * noise if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =timesteps.to(original_samples.device ) lowerCamelCase_ =self.discrete_sigmas.to(original_samples.device )[timesteps] lowerCamelCase_ =( noise * sigmas[:, None, None, None] if noise is not None else torch.randn_like(lowerCAmelCase ) * sigmas[:, None, None, None] ) lowerCamelCase_ =noise + original_samples return noisy_samples def __len__( self ): """simple docstring""" return self.config.num_train_timesteps
676
1
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging a_ : str = logging.get_logger(__name__) a_ : Tuple = { """Helsinki-NLP/opus-mt-en-de""": """https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json""", # See all Marian models at https://huggingface.co/models?filter=marian } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[Any] ='marian' lowercase : Union[str, Any] =['past_key_values'] lowercase : List[Any] ={'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self, lowerCAmelCase=58_101, lowerCAmelCase=None, lowerCAmelCase=1_024, lowerCAmelCase=12, lowerCAmelCase=4_096, lowerCAmelCase=16, lowerCAmelCase=12, lowerCAmelCase=4_096, lowerCAmelCase=16, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase="gelu", lowerCAmelCase=1_024, lowerCAmelCase=0.1, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=58_100, lowerCAmelCase=False, lowerCAmelCase=58_100, lowerCAmelCase=0, lowerCAmelCase=0, lowerCAmelCase=True, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =vocab_size lowerCamelCase_ =decoder_vocab_size or vocab_size lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =d_model lowerCamelCase_ =encoder_ffn_dim lowerCamelCase_ =encoder_layers lowerCamelCase_ =encoder_attention_heads lowerCamelCase_ =decoder_ffn_dim lowerCamelCase_ =decoder_layers lowerCamelCase_ =decoder_attention_heads lowerCamelCase_ =dropout lowerCamelCase_ =attention_dropout lowerCamelCase_ =activation_dropout lowerCamelCase_ =activation_function lowerCamelCase_ =init_std lowerCamelCase_ =encoder_layerdrop lowerCamelCase_ =decoder_layerdrop lowerCamelCase_ =use_cache lowerCamelCase_ =encoder_layers lowerCamelCase_ =scale_embedding # scale factor will be sqrt(d_model) if True lowerCamelCase_ =share_encoder_decoder_embeddings super().__init__( pad_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, is_encoder_decoder=lowerCAmelCase, decoder_start_token_id=lowerCAmelCase, forced_eos_token_id=lowerCAmelCase, **lowerCAmelCase, ) class __UpperCamelCase ( lowerCamelCase__ ): @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs def lowercase__ ( self ): """simple docstring""" if self.task in ["default", "seq2seq-lm"]: lowerCamelCase_ =OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: lowerCamelCase_ ={0: '''batch'''} lowerCamelCase_ ={0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: lowerCamelCase_ ={0: '''batch''', 1: '''decoder_sequence'''} lowerCamelCase_ ={0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(lowerCAmelCase, direction='''inputs''' ) elif self.task == "causal-lm": # TODO: figure this case out. lowerCamelCase_ =OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: lowerCamelCase_, lowerCamelCase_ =self.num_layers for i in range(lowerCAmelCase ): lowerCamelCase_ ={0: '''batch''', 2: '''past_sequence + sequence'''} lowerCamelCase_ ={0: '''batch''', 2: '''past_sequence + sequence'''} else: lowerCamelCase_ =OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}), ('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}), ] ) return common_inputs @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs def lowercase__ ( self ): """simple docstring""" if self.task in ["default", "seq2seq-lm"]: lowerCamelCase_ =super().outputs else: lowerCamelCase_ =super(lowerCAmelCase, self ).outputs if self.use_past: lowerCamelCase_, lowerCamelCase_ =self.num_layers for i in range(lowerCAmelCase ): lowerCamelCase_ ={0: '''batch''', 2: '''past_sequence + sequence'''} lowerCamelCase_ ={0: '''batch''', 2: '''past_sequence + sequence'''} return common_outputs def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = -1, lowerCAmelCase = -1, lowerCAmelCase = False, lowerCAmelCase = None, ): """simple docstring""" lowerCamelCase_ =self._generate_dummy_inputs_for_encoder_and_decoder( lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) # Generate decoder inputs lowerCamelCase_ =seq_length if not self.use_past else 1 lowerCamelCase_ =self._generate_dummy_inputs_for_encoder_and_decoder( lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ ={f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()} lowerCamelCase_ =dict(**lowerCAmelCase, **lowerCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch lowerCamelCase_, lowerCamelCase_ =common_inputs['''input_ids'''].shape lowerCamelCase_ =common_inputs['''decoder_input_ids'''].shape[1] lowerCamelCase_, lowerCamelCase_ =self.num_attention_heads lowerCamelCase_ =( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCamelCase_ =decoder_seq_length + 3 lowerCamelCase_ =( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) lowerCamelCase_ =torch.cat( [common_inputs['''decoder_attention_mask'''], torch.ones(lowerCAmelCase, lowerCAmelCase )], dim=1 ) lowerCamelCase_ =[] # If the number of encoder and decoder layers are present in the model configuration, both are considered lowerCamelCase_, lowerCamelCase_ =self.num_layers lowerCamelCase_ =min(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =max(lowerCAmelCase, lowerCAmelCase ) - min_num_layers lowerCamelCase_ ='''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder''' for _ in range(lowerCAmelCase ): common_inputs["past_key_values"].append( ( torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase ), ) ) # TODO: test this. lowerCamelCase_ =encoder_shape if remaining_side_name == '''encoder''' else decoder_shape for _ in range(lowerCAmelCase, lowerCAmelCase ): common_inputs["past_key_values"].append((torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) ) return common_inputs def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = -1, lowerCAmelCase = -1, lowerCAmelCase = False, lowerCAmelCase = None, ): """simple docstring""" lowerCamelCase_ =self._generate_dummy_inputs_for_encoder_and_decoder( lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch lowerCamelCase_, lowerCamelCase_ =common_inputs['''input_ids'''].shape # Not using the same length for past_key_values lowerCamelCase_ =seqlen + 2 lowerCamelCase_, lowerCamelCase_ =self.num_layers lowerCamelCase_, lowerCamelCase_ =self.num_attention_heads lowerCamelCase_ =( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCamelCase_ =common_inputs['''attention_mask'''].dtype lowerCamelCase_ =torch.cat( [common_inputs['''attention_mask'''], torch.ones(lowerCAmelCase, lowerCAmelCase, dtype=lowerCAmelCase )], dim=1 ) lowerCamelCase_ =[ (torch.zeros(lowerCAmelCase ), torch.zeros(lowerCAmelCase )) for _ in range(lowerCAmelCase ) ] return common_inputs def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = -1, lowerCAmelCase = -1, lowerCAmelCase = False, lowerCAmelCase = None, ): """simple docstring""" lowerCamelCase_ =compute_effective_axis_dimension( lowerCAmelCase, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX lowerCamelCase_ =tokenizer.num_special_tokens_to_add(lowerCAmelCase ) lowerCamelCase_ =compute_effective_axis_dimension( lowerCAmelCase, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=lowerCAmelCase ) # Generate dummy inputs according to compute batch and sequence lowerCamelCase_ =[''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size lowerCamelCase_ =dict(tokenizer(lowerCAmelCase, return_tensors=lowerCAmelCase ) ) return common_inputs def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = -1, lowerCAmelCase = -1, lowerCAmelCase = False, lowerCAmelCase = None, ): """simple docstring""" if self.task in ["default", "seq2seq-lm"]: lowerCamelCase_ =self._generate_dummy_inputs_for_default_and_seqaseq_lm( lowerCAmelCase, batch_size=lowerCAmelCase, seq_length=lowerCAmelCase, is_pair=lowerCAmelCase, framework=lowerCAmelCase ) else: lowerCamelCase_ =self._generate_dummy_inputs_for_causal_lm( lowerCAmelCase, batch_size=lowerCAmelCase, seq_length=lowerCAmelCase, is_pair=lowerCAmelCase, framework=lowerCAmelCase ) return common_inputs def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" if self.task in ["default", "seq2seq-lm"]: lowerCamelCase_ =super()._flatten_past_key_values_(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) else: lowerCamelCase_ =super(lowerCAmelCase, self )._flatten_past_key_values_( lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) @property def lowercase__ ( self ): """simple docstring""" return 1e-4
676
'''simple docstring''' def a_ ( __snake_case : int , __snake_case : int ) -> str: """simple docstring""" if not isinstance(__snake_case , __snake_case ): raise ValueError('''iterations must be defined as integers''' ) if not isinstance(__snake_case , __snake_case ) or not number >= 1: raise ValueError( '''starting number must be and integer and be more than 0''' ) if not iterations >= 1: raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' ) lowerCamelCase_ ='''''' while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(__snake_case ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
676
1
'''simple docstring''' import json import os import tempfile import datasets from utils import generate_example_dataset, get_duration a_ : List[str] = 5_00_00 a_ : Optional[Any] = 50_00 a_ , a_ : Optional[Any] = os.path.split(__file__) a_ : Union[str, Any] = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json""")) @get_duration def a_ ( __snake_case : datasets.Dataset , __snake_case : str ) -> int: """simple docstring""" for i in range(__snake_case ): lowerCamelCase_ =dataset[i] @get_duration def a_ ( __snake_case : datasets.Dataset , __snake_case : Tuple , __snake_case : Optional[int] ) -> Dict: """simple docstring""" for i in range(0 , len(__snake_case ) , __snake_case ): lowerCamelCase_ =dataset[i : i + batch_size] @get_duration def a_ ( __snake_case : datasets.Dataset , __snake_case : Any , __snake_case : Union[str, Any] ) -> Dict: """simple docstring""" with dataset.formatted_as(type=__snake_case ): for i in range(__snake_case ): lowerCamelCase_ =dataset[i] @get_duration def a_ ( __snake_case : datasets.Dataset , __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : List[str] ) -> Dict: """simple docstring""" with dataset.formatted_as(type=__snake_case ): for i in range(0 , __snake_case , __snake_case ): lowerCamelCase_ =dataset[i : i + batch_size] def a_ ( ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ ={'''num examples''': SPEED_TEST_N_EXAMPLES} lowerCamelCase_ =[ (read, {'''length''': SMALL_TEST}), (read, {'''length''': SPEED_TEST_N_EXAMPLES}), (read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}), (read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 100}), (read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1000}), (read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}), (read_formatted, {'''type''': '''pandas''', '''length''': SMALL_TEST}), (read_formatted, {'''type''': '''torch''', '''length''': SMALL_TEST}), (read_formatted, {'''type''': '''tensorflow''', '''length''': SMALL_TEST}), (read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}), (read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1000}), ] lowerCamelCase_ =[ (read, {'''length''': SMALL_TEST}), (read, {'''length''': SPEED_TEST_N_EXAMPLES}), (read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}), (read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 100}), (read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1000}), (read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}), (read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}), (read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1000}), ] with tempfile.TemporaryDirectory() as tmp_dir: print('''generating dataset''' ) lowerCamelCase_ =datasets.Features( {'''list''': datasets.Sequence(datasets.Value('''float32''' ) ), '''numbers''': datasets.Value('''float32''' )} ) lowerCamelCase_ =generate_example_dataset( os.path.join(__snake_case , '''dataset.arrow''' ) , __snake_case , num_examples=__snake_case , seq_shapes={'''list''': (100,)} , ) print('''first set of iterations''' ) for func, kwargs in functions: print(func.__name__ , str(__snake_case ) ) lowerCamelCase_ =func(__snake_case , **__snake_case ) print('''shuffling dataset''' ) lowerCamelCase_ =dataset.shuffle() print('''Second set of iterations (after shuffling''' ) for func, kwargs in functions_shuffled: print('''shuffled ''' , func.__name__ , str(__snake_case ) ) lowerCamelCase_ =func( __snake_case , **__snake_case ) with open(__snake_case , '''wb''' ) as f: f.write(json.dumps(__snake_case ).encode('''utf-8''' ) ) if __name__ == "__main__": # useful to run the profiler benchmark_iterating()
676
'''simple docstring''' from typing import List import numpy as np def a_ ( __snake_case : dict ) -> int: """simple docstring""" lowerCamelCase_ ={key: len(__snake_case ) for key, value in gen_kwargs.items() if isinstance(__snake_case , __snake_case )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( '''Sharding is ambiguous for this dataset: ''' + '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n''' + '''\n'''.join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() ) + '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, ''' + '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.''' ) ) lowerCamelCase_ =max(lists_lengths.values() , default=0 ) return max(1 , __snake_case ) def a_ ( __snake_case : int , __snake_case : int ) -> List[range]: """simple docstring""" lowerCamelCase_ =[] for group_idx in range(__snake_case ): lowerCamelCase_ =num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break lowerCamelCase_ =shards_indices_per_group[-1].stop if shards_indices_per_group else 0 lowerCamelCase_ =range(__snake_case , start + num_shards_to_add ) shards_indices_per_group.append(__snake_case ) return shards_indices_per_group def a_ ( __snake_case : dict , __snake_case : int ) -> List[dict]: """simple docstring""" lowerCamelCase_ =_number_of_shards_in_gen_kwargs(__snake_case ) if num_shards == 1: return [dict(__snake_case )] else: lowerCamelCase_ =_distribute_shards(num_shards=__snake_case , max_num_jobs=__snake_case ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(__snake_case , __snake_case ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(__snake_case ) ) ] def a_ ( __snake_case : List[dict] ) -> dict: """simple docstring""" return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , __snake_case ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def a_ ( __snake_case : np.random.Generator , __snake_case : dict ) -> dict: """simple docstring""" lowerCamelCase_ ={len(__snake_case ) for value in gen_kwargs.values() if isinstance(__snake_case , __snake_case )} lowerCamelCase_ ={} for size in list_sizes: lowerCamelCase_ =list(range(__snake_case ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes lowerCamelCase_ =dict(__snake_case ) for key, value in shuffled_kwargs.items(): if isinstance(__snake_case , __snake_case ): lowerCamelCase_ =[value[i] for i in indices_per_size[len(__snake_case )]] return shuffled_kwargs
676
1
'''simple docstring''' from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING a_ : List[str] = logging.get_logger(__name__) @add_end_docstrings(lowerCamelCase__ ) class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" super().__init__(*lowerCAmelCase, **lowerCAmelCase ) self.check_model_type(lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_, lowerCamelCase_ ={}, {} if padding is not None: lowerCamelCase_ =padding if truncation is not None: lowerCamelCase_ =truncation if top_k is not None: lowerCamelCase_ =top_k return preprocess_params, {}, postprocess_params def __call__( self, lowerCAmelCase, lowerCAmelCase = None, **lowerCAmelCase ): """simple docstring""" if isinstance(lowerCAmelCase, (Image.Image, str) ) and isinstance(lowerCAmelCase, lowerCAmelCase ): lowerCamelCase_ ={'''image''': image, '''question''': question} else: lowerCamelCase_ =image lowerCamelCase_ =super().__call__(lowerCAmelCase, **lowerCAmelCase ) return results def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=False, lowerCAmelCase=False ): """simple docstring""" lowerCamelCase_ =load_image(inputs['''image'''] ) lowerCamelCase_ =self.tokenizer( inputs['''question'''], return_tensors=self.framework, padding=lowerCAmelCase, truncation=lowerCAmelCase ) lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework ) model_inputs.update(lowerCAmelCase ) return model_inputs def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.model(**lowerCAmelCase ) return model_outputs def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=5 ): """simple docstring""" if top_k > self.model.config.num_labels: lowerCamelCase_ =self.model.config.num_labels if self.framework == "pt": lowerCamelCase_ =model_outputs.logits.sigmoid()[0] lowerCamelCase_, lowerCamelCase_ =probs.topk(lowerCAmelCase ) else: raise ValueError(f'''Unsupported framework: {self.framework}''' ) lowerCamelCase_ =scores.tolist() lowerCamelCase_ =ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowerCAmelCase, lowerCAmelCase )]
676
'''simple docstring''' import argparse import logging import pickle import random import time import numpy as np from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO ) a_ : int = logging.getLogger(__name__) def a_ ( ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ =argparse.ArgumentParser( description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' ) parser.add_argument('''--file_path''' , type=__snake_case , default='''data/dump.txt''' , help='''The path to the data.''' ) parser.add_argument('''--tokenizer_type''' , type=__snake_case , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] ) parser.add_argument('''--tokenizer_name''' , type=__snake_case , default='''bert-base-uncased''' , help='''The tokenizer to use.''' ) parser.add_argument('''--dump_file''' , type=__snake_case , default='''data/dump''' , help='''The dump file prefix.''' ) lowerCamelCase_ =parser.parse_args() logger.info(F'''Loading Tokenizer ({args.tokenizer_name})''' ) if args.tokenizer_type == "bert": lowerCamelCase_ =BertTokenizer.from_pretrained(args.tokenizer_name ) lowerCamelCase_ =tokenizer.special_tokens_map['''cls_token'''] # `[CLS]` lowerCamelCase_ =tokenizer.special_tokens_map['''sep_token'''] # `[SEP]` elif args.tokenizer_type == "roberta": lowerCamelCase_ =RobertaTokenizer.from_pretrained(args.tokenizer_name ) lowerCamelCase_ =tokenizer.special_tokens_map['''cls_token'''] # `<s>` lowerCamelCase_ =tokenizer.special_tokens_map['''sep_token'''] # `</s>` elif args.tokenizer_type == "gpt2": lowerCamelCase_ =GPTaTokenizer.from_pretrained(args.tokenizer_name ) lowerCamelCase_ =tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>` lowerCamelCase_ =tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>` logger.info(F'''Loading text from {args.file_path}''' ) with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp: lowerCamelCase_ =fp.readlines() logger.info('''Start encoding''' ) logger.info(F'''{len(__snake_case )} examples to process.''' ) lowerCamelCase_ =[] lowerCamelCase_ =0 lowerCamelCase_ =1_0000 lowerCamelCase_ =time.time() for text in data: lowerCamelCase_ =F'''{bos} {text.strip()} {sep}''' lowerCamelCase_ =tokenizer.encode(__snake_case , add_special_tokens=__snake_case ) rslt.append(__snake_case ) iter += 1 if iter % interval == 0: lowerCamelCase_ =time.time() logger.info(F'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' ) lowerCamelCase_ =time.time() logger.info('''Finished binarization''' ) logger.info(F'''{len(__snake_case )} examples processed.''' ) lowerCamelCase_ =F'''{args.dump_file}.{args.tokenizer_name}.pickle''' lowerCamelCase_ =tokenizer.vocab_size if vocab_size < (1 << 16): lowerCamelCase_ =[np.uintaa(__snake_case ) for d in rslt] else: lowerCamelCase_ =[np.intaa(__snake_case ) for d in rslt] random.shuffle(rslt_ ) logger.info(F'''Dump to {dp_file}''' ) with open(__snake_case , '''wb''' ) as handle: pickle.dump(rslt_ , __snake_case , protocol=pickle.HIGHEST_PROTOCOL ) if __name__ == "__main__": main()
676
1
'''simple docstring''' import argparse import os import re import packaging.version a_ : Union[str, Any] = """examples/""" a_ : Dict = { """examples""": (re.compile(R"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""), """init""": (re.compile(R"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""), """setup""": (re.compile(R"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), R"""\1version=\"VERSION\","""), """doc""": (re.compile(R"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""), } a_ : int = { """init""": """src/diffusers/__init__.py""", """setup""": """setup.py""", } a_ : Tuple = """README.md""" def a_ ( __snake_case : Optional[Any] , __snake_case : Optional[int] , __snake_case : str ) -> Tuple: """simple docstring""" with open(__snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: lowerCamelCase_ =f.read() lowerCamelCase_, lowerCamelCase_ =REPLACE_PATTERNS[pattern] lowerCamelCase_ =replace.replace('''VERSION''' , __snake_case ) lowerCamelCase_ =re_pattern.sub(__snake_case , __snake_case ) with open(__snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.write(__snake_case ) def a_ ( __snake_case : Any ) -> Dict: """simple docstring""" for folder, directories, fnames in os.walk(__snake_case ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove('''research_projects''' ) if "legacy" in directories: directories.remove('''legacy''' ) for fname in fnames: if fname.endswith('''.py''' ): update_version_in_file(os.path.join(__snake_case , __snake_case ) , __snake_case , pattern='''examples''' ) def a_ ( __snake_case : str , __snake_case : List[str]=False ) -> Any: """simple docstring""" for pattern, fname in REPLACE_FILES.items(): update_version_in_file(__snake_case , __snake_case , __snake_case ) if not patch: update_version_in_examples(__snake_case ) def a_ ( ) -> List[str]: """simple docstring""" lowerCamelCase_ ='''🤗 Transformers currently provides the following architectures''' lowerCamelCase_ ='''1. Want to contribute a new model?''' with open(__snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: lowerCamelCase_ =f.readlines() # Find the start of the list. lowerCamelCase_ =0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 lowerCamelCase_ =start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith('''1.''' ): lowerCamelCase_ =lines[index].replace( '''https://huggingface.co/docs/diffusers/main/model_doc''' , '''https://huggingface.co/docs/diffusers/model_doc''' , ) index += 1 with open(__snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(__snake_case ) def a_ ( ) -> List[str]: """simple docstring""" with open(REPLACE_FILES['''init'''] , '''r''' ) as f: lowerCamelCase_ =f.read() lowerCamelCase_ =REPLACE_PATTERNS['''init'''][0].search(__snake_case ).groups()[0] return packaging.version.parse(__snake_case ) def a_ ( __snake_case : Optional[int]=False ) -> str: """simple docstring""" lowerCamelCase_ =get_version() if patch and default_version.is_devrelease: raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' ) if default_version.is_devrelease: lowerCamelCase_ =default_version.base_version elif patch: lowerCamelCase_ =F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}''' else: lowerCamelCase_ =F'''{default_version.major}.{default_version.minor + 1}.0''' # Now let's ask nicely if that's the right one. lowerCamelCase_ =input(F'''Which version are you releasing? [{default_version}]''' ) if len(__snake_case ) == 0: lowerCamelCase_ =default_version print(F'''Updating version to {version}.''' ) global_version_update(__snake_case , patch=__snake_case ) def a_ ( ) -> List[Any]: """simple docstring""" lowerCamelCase_ =get_version() lowerCamelCase_ =F'''{current_version.major}.{current_version.minor + 1}.0.dev0''' lowerCamelCase_ =current_version.base_version # Check with the user we got that right. lowerCamelCase_ =input(F'''Which version are we developing now? [{dev_version}]''' ) if len(__snake_case ) == 0: lowerCamelCase_ =dev_version print(F'''Updating version to {version}.''' ) global_version_update(__snake_case ) # print("Cleaning main README, don't forget to run `make fix-copies`.") # clean_main_ref_in_model_list() if __name__ == "__main__": a_ : str = argparse.ArgumentParser() parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""") parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""") a_ : List[str] = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print("""Nothing to do after a patch :-)""") else: post_release_work()
676
'''simple docstring''' import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : str = logging.get_logger(__name__) a_ : int = { """RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""", } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : List[str] ='mvp' lowercase : List[str] =['past_key_values'] lowercase : Dict ={'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self, lowerCAmelCase=50_267, lowerCAmelCase=1_024, lowerCAmelCase=12, lowerCAmelCase=4_096, lowerCAmelCase=16, lowerCAmelCase=12, lowerCAmelCase=4_096, lowerCAmelCase=16, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase="gelu", lowerCAmelCase=1_024, lowerCAmelCase=0.1, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=0.0, lowerCAmelCase=False, lowerCAmelCase=True, lowerCAmelCase=1, lowerCAmelCase=0, lowerCAmelCase=2, lowerCAmelCase=True, lowerCAmelCase=2, lowerCAmelCase=2, lowerCAmelCase=False, lowerCAmelCase=100, lowerCAmelCase=800, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =vocab_size lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =d_model lowerCamelCase_ =encoder_ffn_dim lowerCamelCase_ =encoder_layers lowerCamelCase_ =encoder_attention_heads lowerCamelCase_ =decoder_ffn_dim lowerCamelCase_ =decoder_layers lowerCamelCase_ =decoder_attention_heads lowerCamelCase_ =dropout lowerCamelCase_ =attention_dropout lowerCamelCase_ =activation_dropout lowerCamelCase_ =activation_function lowerCamelCase_ =init_std lowerCamelCase_ =encoder_layerdrop lowerCamelCase_ =decoder_layerdrop lowerCamelCase_ =classifier_dropout lowerCamelCase_ =use_cache lowerCamelCase_ =encoder_layers lowerCamelCase_ =scale_embedding # scale factor will be sqrt(d_model) if True lowerCamelCase_ =use_prompt lowerCamelCase_ =prompt_length lowerCamelCase_ =prompt_mid_dim super().__init__( pad_token_id=lowerCAmelCase, bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, is_encoder_decoder=lowerCAmelCase, decoder_start_token_id=lowerCAmelCase, forced_eos_token_id=lowerCAmelCase, **lowerCAmelCase, ) if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''', lowerCAmelCase ): lowerCamelCase_ =self.bos_token_id warnings.warn( f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. ''' '''The config can simply be saved and uploaded again to be fixed.''' )
676
1
'''simple docstring''' import json import logging import os import sys from pathlib import Path import finetune_rag from transformers.file_utils import is_apex_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, require_ray, require_torch_gpu, require_torch_multi_gpu, ) logging.basicConfig(level=logging.DEBUG) a_ : int = logging.getLogger() a_ : Any = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class __UpperCamelCase ( lowerCamelCase__ ): def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" os.makedirs(lowerCAmelCase, exist_ok=lowerCAmelCase ) lowerCamelCase_ ={'''source''': '''What is love ?''', '''target''': '''life'''} lowerCamelCase_ ={'''train''': 12, '''val''': 2, '''test''': 2} for split in ["train", "test", "val"]: for field in ["source", "target"]: lowerCamelCase_ ='''\n'''.join([contents[field]] * n_lines[split] ) with open(os.path.join(lowerCAmelCase, f'''{split}.{field}''' ), '''w''' ) as f: f.write(lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = "pytorch" ): """simple docstring""" lowerCamelCase_ =self.get_auto_remove_tmp_dir() lowerCamelCase_ =os.path.join(lowerCAmelCase, '''output''' ) lowerCamelCase_ =os.path.join(lowerCAmelCase, '''data''' ) self._create_dummy_data(data_dir=lowerCAmelCase ) lowerCamelCase_ =f''' --data_dir {data_dir} \ --output_dir {output_dir} \ --model_name_or_path facebook/rag-sequence-base \ --model_type rag_sequence \ --do_train \ --do_predict \ --n_val -1 \ --val_check_interval 1.0 \ --train_batch_size 2 \ --eval_batch_size 1 \ --max_source_length 25 \ --max_target_length 25 \ --val_max_target_length 25 \ --test_max_target_length 25 \ --label_smoothing 0.1 \ --dropout 0.1 \ --attention_dropout 0.1 \ --weight_decay 0.001 \ --adam_epsilon 1e-08 \ --max_grad_norm 0.1 \ --lr_scheduler polynomial \ --learning_rate 3e-04 \ --num_train_epochs 1 \ --warmup_steps 4 \ --gradient_accumulation_steps 1 \ --distributed-port 8787 \ --use_dummy_dataset 1 \ --distributed_retriever {distributed_retriever} \ '''.split() if gpus > 0: testargs.append(f'''--gpus={gpus}''' ) if is_apex_available(): testargs.append('''--fp16''' ) else: testargs.append('''--gpus=0''' ) testargs.append('''--distributed_backend=ddp_cpu''' ) testargs.append('''--num_processes=2''' ) lowerCamelCase_ =[sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs execute_subprocess_async(lowerCAmelCase, env=self.get_env() ) lowerCamelCase_ =os.path.join(lowerCAmelCase, '''metrics.json''' ) with open(lowerCAmelCase ) as f: lowerCamelCase_ =json.load(lowerCAmelCase ) return result @require_torch_gpu def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self._run_finetune(gpus=1 ) self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''], 0.2 ) @require_torch_multi_gpu def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self._run_finetune(gpus=2 ) self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''], 0.2 ) @require_torch_gpu @require_ray def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self._run_finetune(gpus=1, distributed_retriever='''ray''' ) self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''], 0.2 ) @require_torch_multi_gpu @require_ray def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self._run_finetune(gpus=1, distributed_retriever='''ray''' ) self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''], 0.2 )
676
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a_ : int = logging.get_logger(__name__) a_ : str = {"""vocab_file""": """spiece.model"""} a_ : Optional[int] = { """vocab_file""": { """bert_for_seq_generation""": ( """https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model""" ), } } a_ : List[Any] = {"""bert_for_seq_generation""": 5_12} class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[int] =VOCAB_FILES_NAMES lowercase : Optional[int] =PRETRAINED_VOCAB_FILES_MAP lowercase : Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : List[int] =[] lowercase : str =['input_ids', 'attention_mask'] def __init__( self, lowerCAmelCase, lowerCAmelCase="<s>", lowerCAmelCase="</s>", lowerCAmelCase="<unk>", lowerCAmelCase="<pad>", lowerCAmelCase="<::::>", lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ ={} if sp_model_kwargs is None else sp_model_kwargs # Add extra_ids to the special token list super().__init__( bos_token=lowerCAmelCase, eos_token=lowerCAmelCase, unk_token=lowerCAmelCase, pad_token=lowerCAmelCase, sep_token=lowerCAmelCase, sp_model_kwargs=self.sp_model_kwargs, **lowerCAmelCase, ) lowerCamelCase_ =vocab_file lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowerCAmelCase ) @property def lowercase__ ( self ): """simple docstring""" return self.sp_model.get_piece_size() def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ={self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): """simple docstring""" lowerCamelCase_ =self.__dict__.copy() lowerCamelCase_ =None return state def __setstate__( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =d # for backward compatibility if not hasattr(self, '''sp_model_kwargs''' ): lowerCamelCase_ ={} lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return self.sp_model.encode(lowerCAmelCase, out_type=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return self.sp_model.piece_to_id(lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.sp_model.IdToPiece(lowerCAmelCase ) return token def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =[] lowerCamelCase_ ='''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(lowerCAmelCase ) + token lowerCamelCase_ =[] else: current_sub_tokens.append(lowerCAmelCase ) out_string += self.sp_model.decode(lowerCAmelCase ) return out_string.strip() def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ): """simple docstring""" if not os.path.isdir(lowerCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCamelCase_ =os.path.join( lowerCAmelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file, lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(lowerCAmelCase, '''wb''' ) as fi: lowerCamelCase_ =self.sp_model.serialized_model_proto() fi.write(lowerCAmelCase ) return (out_vocab_file,)
676
1
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() a_ : List[str] = logging.get_logger(__name__) a_ : Optional[Any] = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """adapter_layer""": """encoder.layers.*.adapter_layer""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", """pooling_layer.linear""": """projector""", """pooling_layer.projection""": """classifier""", } a_ : Dict = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", """projector""", """classifier""", ] def a_ ( __snake_case : str ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ ={} with open(__snake_case , '''r''' ) as file: for line_number, line in enumerate(__snake_case ): lowerCamelCase_ =line.strip() if line: lowerCamelCase_ =line.split() lowerCamelCase_ =line_number lowerCamelCase_ =words[0] lowerCamelCase_ =value return result def a_ ( __snake_case : Any , __snake_case : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : List[str] ) -> List[str]: """simple docstring""" for attribute in key.split('''.''' ): lowerCamelCase_ =getattr(__snake_case , __snake_case ) lowerCamelCase_ =None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(__snake_case ): lowerCamelCase_ =PARAM_MAPPING[full_name.split('''.''' )[-1]] lowerCamelCase_ ='''param''' if weight_type is not None and weight_type != "param": lowerCamelCase_ =getattr(__snake_case , __snake_case ).shape elif weight_type is not None and weight_type == "param": lowerCamelCase_ =hf_pointer for attribute in hf_param_name.split('''.''' ): lowerCamelCase_ =getattr(__snake_case , __snake_case ) lowerCamelCase_ =shape_pointer.shape # let's reduce dimension lowerCamelCase_ =value[0] else: lowerCamelCase_ =hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": lowerCamelCase_ =value elif weight_type == "weight_g": lowerCamelCase_ =value elif weight_type == "weight_v": lowerCamelCase_ =value elif weight_type == "bias": lowerCamelCase_ =value elif weight_type == "param": for attribute in hf_param_name.split('''.''' ): lowerCamelCase_ =getattr(__snake_case , __snake_case ) lowerCamelCase_ =value else: lowerCamelCase_ =value logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def a_ ( __snake_case : int , __snake_case : Tuple , __snake_case : Dict , __snake_case : int , __snake_case : Dict ) -> Dict: """simple docstring""" lowerCamelCase_ =None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(__snake_case ): lowerCamelCase_ =PARAM_MAPPING[full_name.split('''.''' )[-1]] lowerCamelCase_ ='''param''' if weight_type is not None and weight_type != "param": lowerCamelCase_ ='''.'''.join([key, weight_type] ) elif weight_type is not None and weight_type == "param": lowerCamelCase_ ='''.'''.join([key, hf_param_name] ) else: lowerCamelCase_ =key lowerCamelCase_ =value if '''lm_head''' in full_key else value[0] a_ : Tuple = { """W_a""": """linear_1.weight""", """W_b""": """linear_2.weight""", """b_a""": """linear_1.bias""", """b_b""": """linear_2.bias""", """ln_W""": """norm.weight""", """ln_b""": """norm.bias""", } def a_ ( __snake_case : Optional[int] , __snake_case : Optional[int] , __snake_case : Any=None , __snake_case : int=None ) -> List[str]: """simple docstring""" lowerCamelCase_ =False for key, mapped_key in MAPPING.items(): lowerCamelCase_ ='''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: lowerCamelCase_ =True if "*" in mapped_key: lowerCamelCase_ =name.split(__snake_case )[0].split('''.''' )[-2] lowerCamelCase_ =mapped_key.replace('''*''' , __snake_case ) if "weight_g" in name: lowerCamelCase_ ='''weight_g''' elif "weight_v" in name: lowerCamelCase_ ='''weight_v''' elif "bias" in name: lowerCamelCase_ ='''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj lowerCamelCase_ ='''weight''' else: lowerCamelCase_ =None if hf_dict is not None: rename_dict(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) else: set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) return is_used return is_used def a_ ( __snake_case : Dict , __snake_case : Tuple , __snake_case : str ) -> List[Any]: """simple docstring""" lowerCamelCase_ =[] lowerCamelCase_ =fairseq_model.state_dict() lowerCamelCase_ =hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): lowerCamelCase_ =False if "conv_layers" in name: load_conv_layer( __snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == '''group''' , ) lowerCamelCase_ =True else: lowerCamelCase_ =load_wavaveca_layer(__snake_case , __snake_case , __snake_case ) if not is_used: unused_weights.append(__snake_case ) logger.warning(F'''Unused weights: {unused_weights}''' ) def a_ ( __snake_case : List[Any] , __snake_case : Dict , __snake_case : Any , __snake_case : List[str] , __snake_case : Tuple ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ =full_name.split('''conv_layers.''' )[-1] lowerCamelCase_ =name.split('''.''' ) lowerCamelCase_ =int(items[0] ) lowerCamelCase_ =int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) lowerCamelCase_ =value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) lowerCamelCase_ =value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) lowerCamelCase_ =value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) lowerCamelCase_ =value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(__snake_case ) @torch.no_grad() def a_ ( __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Optional[int]=None , __snake_case : Any=None , __snake_case : Tuple=True , __snake_case : Union[str, Any]=False ) -> str: """simple docstring""" if config_path is not None: lowerCamelCase_ =WavaVecaConfig.from_pretrained(__snake_case ) else: lowerCamelCase_ =WavaVecaConfig() if is_seq_class: lowerCamelCase_ =read_txt_into_dict(__snake_case ) lowerCamelCase_ =idalabel lowerCamelCase_ =WavaVecaForSequenceClassification(__snake_case ) lowerCamelCase_ =WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__snake_case , return_attention_mask=__snake_case , ) feature_extractor.save_pretrained(__snake_case ) elif is_finetuned: if dict_path: lowerCamelCase_ =Dictionary.load(__snake_case ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq lowerCamelCase_ =target_dict.pad_index lowerCamelCase_ =target_dict.bos_index lowerCamelCase_ =target_dict.eos_index lowerCamelCase_ =len(target_dict.symbols ) lowerCamelCase_ =os.path.join(__snake_case , '''vocab.json''' ) if not os.path.isdir(__snake_case ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__snake_case ) ) return os.makedirs(__snake_case , exist_ok=__snake_case ) lowerCamelCase_ =target_dict.indices # fairseq has the <pad> and <s> switched lowerCamelCase_ =0 lowerCamelCase_ =1 with open(__snake_case , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(__snake_case , __snake_case ) lowerCamelCase_ =WavaVecaCTCTokenizer( __snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__snake_case , ) lowerCamelCase_ =True if config.feat_extract_norm == '''layer''' else False lowerCamelCase_ =WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__snake_case , return_attention_mask=__snake_case , ) lowerCamelCase_ =WavaVecaProcessor(feature_extractor=__snake_case , tokenizer=__snake_case ) processor.save_pretrained(__snake_case ) lowerCamelCase_ =WavaVecaForCTC(__snake_case ) else: lowerCamelCase_ =WavaVecaForPreTraining(__snake_case ) if is_finetuned or is_seq_class: lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: lowerCamelCase_ =argparse.Namespace(task='''audio_pretraining''' ) lowerCamelCase_ =fairseq.tasks.setup_task(__snake_case ) lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__snake_case ) lowerCamelCase_ =model[0].eval() recursively_load_weights(__snake_case , __snake_case , not is_finetuned ) hf_wavavec.save_pretrained(__snake_case ) if __name__ == "__main__": a_ : Union[str, Any] = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) parser.add_argument( """--is_seq_class""", action="""store_true""", help="""Whether the model to convert is a fine-tuned sequence classification model or not""", ) a_ : Any = parser.parse_args() a_ : Tuple = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
676
'''simple docstring''' from collections.abc import Sequence def a_ ( __snake_case : Sequence[float] , __snake_case : float ) -> float: """simple docstring""" return sum(c * (x**i) for i, c in enumerate(__snake_case ) ) def a_ ( __snake_case : Sequence[float] , __snake_case : float ) -> float: """simple docstring""" lowerCamelCase_ =0.0 for coeff in reversed(__snake_case ): lowerCamelCase_ =result * x + coeff return result if __name__ == "__main__": a_ : Optional[int] = (0.0, 0.0, 5.0, 9.3, 7.0) a_ : Tuple = 10.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
676
1
'''simple docstring''' from ....configuration_utils import PretrainedConfig from ....utils import logging a_ : Optional[int] = logging.get_logger(__name__) a_ : Tuple = { """Visual-Attention-Network/van-base""": ( """https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json""" ), } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : int ='van' def __init__( self, lowerCAmelCase=224, lowerCAmelCase=3, lowerCAmelCase=[7, 3, 3, 3], lowerCAmelCase=[4, 2, 2, 2], lowerCAmelCase=[64, 128, 320, 512], lowerCAmelCase=[3, 3, 12, 3], lowerCAmelCase=[8, 8, 4, 4], lowerCAmelCase="gelu", lowerCAmelCase=0.0_2, lowerCAmelCase=1e-6, lowerCAmelCase=1e-2, lowerCAmelCase=0.0, lowerCAmelCase=0.0, **lowerCAmelCase, ): """simple docstring""" super().__init__(**lowerCAmelCase ) lowerCamelCase_ =image_size lowerCamelCase_ =num_channels lowerCamelCase_ =patch_sizes lowerCamelCase_ =strides lowerCamelCase_ =hidden_sizes lowerCamelCase_ =depths lowerCamelCase_ =mlp_ratios lowerCamelCase_ =hidden_act lowerCamelCase_ =initializer_range lowerCamelCase_ =layer_norm_eps lowerCamelCase_ =layer_scale_init_value lowerCamelCase_ =drop_path_rate lowerCamelCase_ =dropout_rate
676
'''simple docstring''' import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[int] =['image_processor', 'tokenizer'] lowercase : str ='CLIPImageProcessor' lowercase : Optional[Any] =('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast') def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''', lowerCAmelCase, ) lowerCamelCase_ =kwargs.pop('''feature_extractor''' ) lowerCamelCase_ =image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(lowerCAmelCase, lowerCAmelCase ) def __call__( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ): """simple docstring""" if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: lowerCamelCase_ =self.tokenizer(lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase ) if images is not None: lowerCamelCase_ =self.image_processor(lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase ) if text is not None and images is not None: lowerCamelCase_ =image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**lowerCAmelCase ), tensor_type=lowerCAmelCase ) def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase ) @property def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.tokenizer.model_input_names lowerCamelCase_ =self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
676
1
'''simple docstring''' def a_ ( __snake_case : str , __snake_case : str ) -> bool: """simple docstring""" lowerCamelCase_ =len(__snake_case ) lowerCamelCase_ =len(__snake_case ) lowerCamelCase_ =[[False for _ in range(m + 1 )] for _ in range(n + 1 )] lowerCamelCase_ =True for i in range(__snake_case ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: lowerCamelCase_ =True if a[i].islower(): lowerCamelCase_ =True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
676
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING a_ : Optional[Any] = logging.get_logger(__name__) @add_end_docstrings(lowerCamelCase__ ) class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" super().__init__(*lowerCAmelCase, **lowerCAmelCase ) requires_backends(self, '''vision''' ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING ) def lowercase__ ( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None ): """simple docstring""" lowerCamelCase_ ={} lowerCamelCase_ ={} if prompt is not None: lowerCamelCase_ =prompt if generate_kwargs is not None: lowerCamelCase_ =generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: lowerCamelCase_ ={} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( '''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,''' ''' please use only one''' ) lowerCamelCase_ =max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return super().__call__(lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ): """simple docstring""" lowerCamelCase_ =load_image(lowerCAmelCase ) if prompt is not None: if not isinstance(lowerCAmelCase, lowerCAmelCase ): raise ValueError( f'''Received an invalid text input, got - {type(lowerCAmelCase )} - but expected a single string. ''' '''Note also that one single text can be provided for conditional image to text generation.''' ) lowerCamelCase_ =self.model.config.model_type if model_type == "git": lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework ) lowerCamelCase_ =self.tokenizer(text=lowerCAmelCase, add_special_tokens=lowerCAmelCase ).input_ids lowerCamelCase_ =[self.tokenizer.cls_token_id] + input_ids lowerCamelCase_ =torch.tensor(lowerCAmelCase ).unsqueeze(0 ) model_inputs.update({'''input_ids''': input_ids} ) elif model_type == "pix2struct": lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, header_text=lowerCAmelCase, return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework ) lowerCamelCase_ =self.tokenizer(lowerCAmelCase, return_tensors=self.framework ) model_inputs.update(lowerCAmelCase ) else: raise ValueError(f'''Model type {model_type} does not support conditional text generation''' ) else: lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: lowerCamelCase_ =None return model_inputs def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ): """simple docstring""" if ( "input_ids" in model_inputs and isinstance(model_inputs['''input_ids'''], lowerCAmelCase ) and all(x is None for x in model_inputs['''input_ids'''] ) ): lowerCamelCase_ =None if generate_kwargs is None: lowerCamelCase_ ={} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. lowerCamelCase_ =model_inputs.pop(self.model.main_input_name ) lowerCamelCase_ =self.model.generate(lowerCAmelCase, **lowerCAmelCase, **lowerCAmelCase ) return model_outputs def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =[] for output_ids in model_outputs: lowerCamelCase_ ={ '''generated_text''': self.tokenizer.decode( lowerCAmelCase, skip_special_tokens=lowerCAmelCase, ) } records.append(lowerCAmelCase ) return records
676
1
'''simple docstring''' import warnings from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a_ : Optional[Any] = logging.get_logger(__name__) a_ : int = { """nvidia/segformer-b0-finetuned-ade-512-512""": ( """https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json""" ), # See all SegFormer models at https://huggingface.co/models?filter=segformer } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : str ='segformer' def __init__( self, lowerCAmelCase=3, lowerCAmelCase=4, lowerCAmelCase=[2, 2, 2, 2], lowerCAmelCase=[8, 4, 2, 1], lowerCAmelCase=[32, 64, 160, 256], lowerCAmelCase=[7, 3, 3, 3], lowerCAmelCase=[4, 2, 2, 2], lowerCAmelCase=[1, 2, 5, 8], lowerCAmelCase=[4, 4, 4, 4], lowerCAmelCase="gelu", lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase=0.1, lowerCAmelCase=0.0_2, lowerCAmelCase=0.1, lowerCAmelCase=1e-6, lowerCAmelCase=256, lowerCAmelCase=255, **lowerCAmelCase, ): """simple docstring""" super().__init__(**lowerCAmelCase ) if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False: warnings.warn( '''Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be''' ''' removed, as the behaviour will default to that of reshape_last_stage = True.''', lowerCAmelCase, ) lowerCamelCase_ =num_channels lowerCamelCase_ =num_encoder_blocks lowerCamelCase_ =depths lowerCamelCase_ =sr_ratios lowerCamelCase_ =hidden_sizes lowerCamelCase_ =patch_sizes lowerCamelCase_ =strides lowerCamelCase_ =mlp_ratios lowerCamelCase_ =num_attention_heads lowerCamelCase_ =hidden_act lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =attention_probs_dropout_prob lowerCamelCase_ =classifier_dropout_prob lowerCamelCase_ =initializer_range lowerCamelCase_ =drop_path_rate lowerCamelCase_ =layer_norm_eps lowerCamelCase_ =decoder_hidden_size lowerCamelCase_ =kwargs.get('''reshape_last_stage''', lowerCAmelCase ) lowerCamelCase_ =semantic_loss_ignore_index class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[int] =version.parse('1.11' ) @property def lowercase__ ( self ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def lowercase__ ( self ): """simple docstring""" return 1e-4 @property def lowercase__ ( self ): """simple docstring""" return 12
676
'''simple docstring''' import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def a_ ( __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Any ) -> str: """simple docstring""" # Initialise PyTorch model lowerCamelCase_ =BertConfig.from_json_file(__snake_case ) print(F'''Building PyTorch model from configuration: {config}''' ) lowerCamelCase_ =BertForPreTraining(__snake_case ) # Load weights from tf checkpoint load_tf_weights_in_bert(__snake_case , __snake_case , __snake_case ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , __snake_case ) if __name__ == "__main__": a_ : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--bert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained BERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) a_ : Optional[Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
676
1
'''simple docstring''' import unittest import numpy as np import torch from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class __UpperCamelCase ( unittest.TestCase ): @property def lowercase__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCamelCase_ =UNetaDModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=3, out_channels=3, down_block_types=('''DownBlock2D''', '''AttnDownBlock2D'''), up_block_types=('''AttnUpBlock2D''', '''UpBlock2D'''), ) return model def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.dummy_uncond_unet lowerCamelCase_ =ScoreSdeVeScheduler() lowerCamelCase_ =ScoreSdeVePipeline(unet=lowerCAmelCase, scheduler=lowerCAmelCase ) sde_ve.to(lowerCAmelCase ) sde_ve.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =torch.manual_seed(0 ) lowerCamelCase_ =sde_ve(num_inference_steps=2, output_type='''numpy''', generator=lowerCAmelCase ).images lowerCamelCase_ =torch.manual_seed(0 ) lowerCamelCase_ =sde_ve(num_inference_steps=2, output_type='''numpy''', generator=lowerCAmelCase, return_dict=lowerCAmelCase )[ 0 ] lowerCamelCase_ =image[0, -3:, -3:, -1] lowerCamelCase_ =image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCamelCase_ =np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class __UpperCamelCase ( unittest.TestCase ): def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''google/ncsnpp-church-256''' lowerCamelCase_ =UNetaDModel.from_pretrained(lowerCAmelCase ) lowerCamelCase_ =ScoreSdeVeScheduler.from_pretrained(lowerCAmelCase ) lowerCamelCase_ =ScoreSdeVePipeline(unet=lowerCAmelCase, scheduler=lowerCAmelCase ) sde_ve.to(lowerCAmelCase ) sde_ve.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =torch.manual_seed(0 ) lowerCamelCase_ =sde_ve(num_inference_steps=10, output_type='''numpy''', generator=lowerCAmelCase ).images lowerCamelCase_ =image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) lowerCamelCase_ =np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
676
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : Union[str, Any] = logging.get_logger(__name__) a_ : Optional[int] = { """BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""", # See all AltCLIP models at https://huggingface.co/models?filter=altclip } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[Any] ='altclip_text_model' def __init__( self, lowerCAmelCase=250_002, lowerCAmelCase=1_024, lowerCAmelCase=24, lowerCAmelCase=16, lowerCAmelCase=4_096, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=514, lowerCAmelCase=1, lowerCAmelCase=0.0_2, lowerCAmelCase=0.0_2, lowerCAmelCase=1e-05, lowerCAmelCase=1, lowerCAmelCase=0, lowerCAmelCase=2, lowerCAmelCase="absolute", lowerCAmelCase=True, lowerCAmelCase=768, **lowerCAmelCase, ): """simple docstring""" super().__init__(pad_token_id=lowerCAmelCase, bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, **lowerCAmelCase ) lowerCamelCase_ =vocab_size lowerCamelCase_ =hidden_size lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =hidden_act lowerCamelCase_ =intermediate_size lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =attention_probs_dropout_prob lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =type_vocab_size lowerCamelCase_ =initializer_range lowerCamelCase_ =initializer_factor lowerCamelCase_ =layer_norm_eps lowerCamelCase_ =position_embedding_type lowerCamelCase_ =use_cache lowerCamelCase_ =project_dim class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Dict ='altclip_vision_model' def __init__( self, lowerCAmelCase=768, lowerCAmelCase=3_072, lowerCAmelCase=512, lowerCAmelCase=12, lowerCAmelCase=12, lowerCAmelCase=3, lowerCAmelCase=224, lowerCAmelCase=32, lowerCAmelCase="quick_gelu", lowerCAmelCase=1e-5, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=1.0, **lowerCAmelCase, ): """simple docstring""" super().__init__(**lowerCAmelCase ) lowerCamelCase_ =hidden_size lowerCamelCase_ =intermediate_size lowerCamelCase_ =projection_dim lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =num_channels lowerCamelCase_ =patch_size lowerCamelCase_ =image_size lowerCamelCase_ =initializer_range lowerCamelCase_ =initializer_factor lowerCamelCase_ =attention_dropout lowerCamelCase_ =layer_norm_eps lowerCamelCase_ =hidden_act @classmethod def lowercase__ ( cls, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" cls._set_token_in_kwargs(lowerCAmelCase ) lowerCamelCase_, lowerCamelCase_ =cls.get_config_dict(lowerCAmelCase, **lowerCAmelCase ) # get the vision config dict if we are loading from AltCLIPConfig if config_dict.get('''model_type''' ) == "altclip": lowerCamelCase_ =config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls, '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(lowerCAmelCase, **lowerCAmelCase ) class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Dict ='altclip' lowercase : str =True def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=768, lowerCAmelCase=2.6_5_9_2, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =kwargs.pop('''text_config_dict''', lowerCAmelCase ) lowerCamelCase_ =kwargs.pop('''vision_config_dict''', lowerCAmelCase ) super().__init__(**lowerCAmelCase ) # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`. if text_config_dict is not None: if text_config is None: lowerCamelCase_ ={} # This is the complete result when using `text_config_dict`. lowerCamelCase_ =AltCLIPTextConfig(**lowerCAmelCase ).to_dict() # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different. for key, value in _text_config_dict.items(): if key in text_config and value != text_config[key] and key not in ["transformers_version"]: # If specified in `text_config_dict` if key in text_config_dict: lowerCamelCase_ =( f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. ''' f'''The value `text_config_dict["{key}"]` will be used instead.''' ) # If inferred from default argument values (just to be super careful) else: lowerCamelCase_ =( f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The ''' f'''value `text_config["{key}"]` will be overriden.''' ) logger.warning(lowerCAmelCase ) # Update all values in `text_config` with the ones in `_text_config_dict`. text_config.update(_text_config_dict ) if vision_config_dict is not None: if vision_config is None: lowerCamelCase_ ={} # This is the complete result when using `vision_config_dict`. lowerCamelCase_ =AltCLIPVisionConfig(**lowerCAmelCase ).to_dict() # convert keys to string instead of integer if "id2label" in _vision_config_dict: lowerCamelCase_ ={ str(lowerCAmelCase ): value for key, value in _vision_config_dict['''id2label'''].items() } # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different. for key, value in _vision_config_dict.items(): if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]: # If specified in `vision_config_dict` if key in vision_config_dict: lowerCamelCase_ =( f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different ''' f'''values. The value `vision_config_dict["{key}"]` will be used instead.''' ) # If inferred from default argument values (just to be super careful) else: lowerCamelCase_ =( f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. ''' f'''The value `vision_config["{key}"]` will be overriden.''' ) logger.warning(lowerCAmelCase ) # Update all values in `vision_config` with the ones in `_vision_config_dict`. vision_config.update(_vision_config_dict ) if text_config is None: lowerCamelCase_ ={} logger.info('''`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.''' ) if vision_config is None: lowerCamelCase_ ={} logger.info('''`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.''' ) lowerCamelCase_ =AltCLIPTextConfig(**lowerCAmelCase ) lowerCamelCase_ =AltCLIPVisionConfig(**lowerCAmelCase ) lowerCamelCase_ =projection_dim lowerCamelCase_ =logit_scale_init_value lowerCamelCase_ =1.0 @classmethod def lowercase__ ( cls, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =copy.deepcopy(self.__dict__ ) lowerCamelCase_ =self.text_config.to_dict() lowerCamelCase_ =self.vision_config.to_dict() lowerCamelCase_ =self.__class__.model_type return output
676
1
'''simple docstring''' import math import sys def a_ ( __snake_case : str ) -> str: """simple docstring""" lowerCamelCase_ ='''''' try: with open(__snake_case , '''rb''' ) as binary_file: lowerCamelCase_ =binary_file.read() for dat in data: lowerCamelCase_ =F'''{dat:08b}''' result += curr_byte return result except OSError: print('''File not accessible''' ) sys.exit() def a_ ( __snake_case : str ) -> str: """simple docstring""" lowerCamelCase_ ={'''0''': '''0''', '''1''': '''1'''} lowerCamelCase_, lowerCamelCase_ ='''''', '''''' lowerCamelCase_ =len(__snake_case ) for i in range(len(__snake_case ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue lowerCamelCase_ =lexicon[curr_string] result += last_match_id lowerCamelCase_ =last_match_id + '''0''' if math.loga(__snake_case ).is_integer(): lowerCamelCase_ ={} for curr_key in list(__snake_case ): lowerCamelCase_ =lexicon.pop(__snake_case ) lowerCamelCase_ =new_lex lowerCamelCase_ =last_match_id + '''1''' index += 1 lowerCamelCase_ ='''''' return result def a_ ( __snake_case : str , __snake_case : str ) -> None: """simple docstring""" lowerCamelCase_ =8 try: with open(__snake_case , '''wb''' ) as opened_file: lowerCamelCase_ =[ to_write[i : i + byte_length] for i in range(0 , len(__snake_case ) , __snake_case ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append('''10000000''' ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array[:-1]: opened_file.write(int(__snake_case , 2 ).to_bytes(1 , byteorder='''big''' ) ) except OSError: print('''File not accessible''' ) sys.exit() def a_ ( __snake_case : str ) -> str: """simple docstring""" lowerCamelCase_ =0 for letter in data_bits: if letter == "1": break counter += 1 lowerCamelCase_ =data_bits[counter:] lowerCamelCase_ =data_bits[counter + 1 :] return data_bits def a_ ( __snake_case : str , __snake_case : str ) -> None: """simple docstring""" lowerCamelCase_ =read_file_binary(__snake_case ) lowerCamelCase_ =remove_prefix(__snake_case ) lowerCamelCase_ =decompress_data(__snake_case ) write_file_binary(__snake_case , __snake_case ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
676
'''simple docstring''' import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, lowerCAmelCase, lowerCAmelCase=13, lowerCAmelCase=7, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=2, lowerCAmelCase=99, lowerCAmelCase=0, lowerCAmelCase=32, lowerCAmelCase=5, lowerCAmelCase=4, lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=12, lowerCAmelCase=2, lowerCAmelCase=0.0_2, lowerCAmelCase=3, lowerCAmelCase=4, lowerCAmelCase="last", lowerCAmelCase=None, lowerCAmelCase=None, ): """simple docstring""" lowerCamelCase_ =parent lowerCamelCase_ =batch_size lowerCamelCase_ =seq_length lowerCamelCase_ =is_training lowerCamelCase_ =use_input_lengths lowerCamelCase_ =use_token_type_ids lowerCamelCase_ =use_labels lowerCamelCase_ =gelu_activation lowerCamelCase_ =sinusoidal_embeddings lowerCamelCase_ =causal lowerCamelCase_ =asm lowerCamelCase_ =n_langs lowerCamelCase_ =vocab_size lowerCamelCase_ =n_special lowerCamelCase_ =hidden_size lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =attention_probs_dropout_prob lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =type_vocab_size lowerCamelCase_ =type_sequence_label_size lowerCamelCase_ =initializer_range lowerCamelCase_ =num_labels lowerCamelCase_ =num_choices lowerCamelCase_ =summary_type lowerCamelCase_ =use_proj lowerCamelCase_ =scope def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase_ =None if self.use_input_lengths: lowerCamelCase_ =( ids_tensor([self.batch_size], vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length lowerCamelCase_ =None if self.use_token_type_ids: lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.n_langs ) lowerCamelCase_ =None lowerCamelCase_ =None lowerCamelCase_ =None if self.use_labels: lowerCamelCase_ =ids_tensor([self.batch_size], self.type_sequence_label_size ) lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.num_labels ) lowerCamelCase_ =ids_tensor([self.batch_size], 2 ).float() lowerCamelCase_ =ids_tensor([self.batch_size], self.num_choices ) lowerCamelCase_ =self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def lowercase__ ( self ): """simple docstring""" return FlaubertConfig( vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =FlaubertModel(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase, lengths=lowerCAmelCase, langs=lowerCAmelCase ) lowerCamelCase_ =model(lowerCAmelCase, langs=lowerCAmelCase ) lowerCamelCase_ =model(lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =FlaubertWithLMHeadModel(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape, () ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =FlaubertForQuestionAnsweringSimple(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase ) lowerCamelCase_ =model(lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =FlaubertForQuestionAnswering(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase ) lowerCamelCase_ =model( lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase, cls_index=lowerCAmelCase, is_impossible=lowerCAmelCase, p_mask=lowerCAmelCase, ) lowerCamelCase_ =model( lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase, cls_index=lowerCAmelCase, is_impossible=lowerCAmelCase, ) ((lowerCamelCase_), ) =result_with_labels.to_tuple() lowerCamelCase_ =model(lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase ) ((lowerCamelCase_), ) =result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape, () ) self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =FlaubertForSequenceClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase ) lowerCamelCase_ =model(lowerCAmelCase, labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape, () ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =self.num_labels lowerCamelCase_ =FlaubertForTokenClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase, attention_mask=lowerCAmelCase, labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =self.num_choices lowerCamelCase_ =FlaubertForMultipleChoice(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() lowerCamelCase_ =token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() lowerCamelCase_ =input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() lowerCamelCase_ =model( lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.prepare_config_and_inputs() ( ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ) =config_and_inputs lowerCamelCase_ ={ '''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase : List[Any] =( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) lowercase : Tuple =( { 'feature-extraction': FlaubertModel, 'fill-mask': FlaubertWithLMHeadModel, 'question-answering': FlaubertForQuestionAnsweringSimple, 'text-classification': FlaubertForSequenceClassification, 'token-classification': FlaubertForTokenClassification, 'zero-shot': FlaubertForSequenceClassification, } if is_torch_available() else {} ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('''Fast''' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=False ): """simple docstring""" lowerCamelCase_ =super()._prepare_for_class(lowerCAmelCase, lowerCAmelCase, return_labels=lowerCAmelCase ) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": lowerCamelCase_ =torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase ) lowerCamelCase_ =torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase ) return inputs_dict def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =FlaubertModelTester(self ) lowerCamelCase_ =ConfigTester(self, config_class=lowerCAmelCase, emb_dim=37 ) def lowercase__ ( self ): """simple docstring""" self.config_tester.run_common_tests() def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*lowerCAmelCase ) @slow def lowercase__ ( self ): """simple docstring""" for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ =FlaubertModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) @slow @require_torch_gpu def lowercase__ ( self ): """simple docstring""" lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return lowerCamelCase_ =True lowerCamelCase_ =model_class(config=lowerCAmelCase ) lowerCamelCase_ =self._prepare_for_class(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =torch.jit.trace( lowerCAmelCase, (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(lowerCAmelCase, os.path.join(lowerCAmelCase, '''traced_model.pt''' ) ) lowerCamelCase_ =torch.jit.load(os.path.join(lowerCAmelCase, '''traced_model.pt''' ), map_location=lowerCAmelCase ) loaded(inputs_dict['''input_ids'''].to(lowerCAmelCase ), inputs_dict['''attention_mask'''].to(lowerCAmelCase ) ) @require_torch class __UpperCamelCase ( unittest.TestCase ): @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' ) lowerCamelCase_ =torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] ) with torch.no_grad(): lowerCamelCase_ =model(lowerCAmelCase )[0] lowerCamelCase_ =torch.Size((1, 11, 768) ) self.assertEqual(output.shape, lowerCAmelCase ) lowerCamelCase_ =torch.tensor( [[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], lowerCAmelCase, atol=1e-4 ) )
676
1
'''simple docstring''' def a_ ( __snake_case : float ) -> float: """simple docstring""" return 10 - x * x def a_ ( __snake_case : float , __snake_case : float ) -> float: """simple docstring""" # Bolzano theory in order to find if there is a root between a and b if equation(__snake_case ) * equation(__snake_case ) >= 0: raise ValueError('''Wrong space!''' ) lowerCamelCase_ =a while (b - a) >= 0.0_1: # Find middle point lowerCamelCase_ =(a + b) / 2 # Check if middle point is root if equation(__snake_case ) == 0.0: break # Decide the side to repeat the steps if equation(__snake_case ) * equation(__snake_case ) < 0: lowerCamelCase_ =c else: lowerCamelCase_ =c return c if __name__ == "__main__": import doctest doctest.testmod() print(bisection(-2, 5)) print(bisection(0, 6))
676
'''simple docstring''' import os from pickle import UnpicklingError from typing import Dict, Tuple import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict, unflatten_dict import transformers from .utils import logging a_ : List[Any] = logging.get_logger(__name__) def a_ ( __snake_case : Optional[int] , __snake_case : str , __snake_case : List[Any] , __snake_case : int=False ) -> List[str]: """simple docstring""" try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise if not is_sharded: lowerCamelCase_ =os.path.abspath(__snake_case ) logger.info(F'''Loading PyTorch weights from {pt_path}''' ) lowerCamelCase_ =torch.load(__snake_case , map_location='''cpu''' ) logger.info(F'''PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.''' ) lowerCamelCase_ =convert_pytorch_state_dict_to_flax(__snake_case , __snake_case ) else: # model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files lowerCamelCase_ =convert_pytorch_sharded_state_dict_to_flax(__snake_case , __snake_case ) return flax_state_dict def a_ ( __snake_case : Tuple[str] , __snake_case : np.ndarray , __snake_case : Dict[str, jnp.ndarray] , __snake_case : str , ) -> (Tuple[str], np.ndarray): """simple docstring""" def is_key_or_prefix_key_in_dict(__snake_case : Tuple[str] ) -> bool: return len(set(__snake_case ) & {key, (model_prefix,) + key} ) > 0 # layer norm lowerCamelCase_ =pt_tuple_key[:-1] + ('''scale''',) if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(__snake_case ): return renamed_pt_tuple_key, pt_tensor # batch norm layer mean lowerCamelCase_ =pt_tuple_key[:-1] + ('''mean''',) if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(__snake_case ): return renamed_pt_tuple_key, pt_tensor # batch norm layer var lowerCamelCase_ =pt_tuple_key[:-1] + ('''var''',) if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(__snake_case ): return renamed_pt_tuple_key, pt_tensor # embedding lowerCamelCase_ =pt_tuple_key[:-1] + ('''embedding''',) if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(__snake_case ): return renamed_pt_tuple_key, pt_tensor # conv layer lowerCamelCase_ =pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(__snake_case ): lowerCamelCase_ =pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowerCamelCase_ =pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(__snake_case ): lowerCamelCase_ =pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowerCamelCase_ =pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowerCamelCase_ =pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 lowerCamelCase_ =None if pt_tuple_key[-3::2] == ("parametrizations", "original0"): lowerCamelCase_ =pt_tuple_key[-2] + '''_g''' elif pt_tuple_key[-3::2] == ("parametrizations", "original1"): lowerCamelCase_ =pt_tuple_key[-2] + '''_v''' if name is not None: lowerCamelCase_ =pt_tuple_key[:-3] + (name,) return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def a_ ( __snake_case : Union[str, Any] , __snake_case : str ) -> str: """simple docstring""" # convert pytorch tensor to numpy lowerCamelCase_ ={k: v.numpy() for k, v in pt_state_dict.items()} lowerCamelCase_ =flax_model.base_model_prefix # use params dict if the model contains batch norm layers if "params" in flax_model.params: lowerCamelCase_ =flax_model.params['''params'''] else: lowerCamelCase_ =flax_model.params lowerCamelCase_ =flatten_dict(__snake_case ) # add batch_stats keys,values to dict if "batch_stats" in flax_model.params: lowerCamelCase_ =flatten_dict(flax_model.params['''batch_stats'''] ) random_flax_state_dict.update(__snake_case ) lowerCamelCase_ ={} lowerCamelCase_ =(model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) lowerCamelCase_ =(model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCamelCase_ =tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary lowerCamelCase_ =pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: lowerCamelCase_ =pt_tuple_key[1:] # Correctly rename weight parameters lowerCamelCase_, lowerCamelCase_ =rename_key_and_reshape_tensor( __snake_case , __snake_case , __snake_case , __snake_case ) # add model prefix if necessary lowerCamelCase_ =(model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: lowerCamelCase_ =(model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1] or "var" in flax_key[-1]: lowerCamelCase_ =jnp.asarray(__snake_case ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(__snake_case , __snake_case ) continue # also add unexpected weight so that warning is thrown lowerCamelCase_ =jnp.asarray(__snake_case ) else: # also add unexpected weight so that warning is thrown lowerCamelCase_ =jnp.asarray(__snake_case ) return unflatten_dict(__snake_case ) def a_ ( __snake_case : Union[str, Any] , __snake_case : List[Any] ) -> Optional[Any]: """simple docstring""" import torch # Load the index lowerCamelCase_ ={} for shard_file in shard_filenames: # load using msgpack utils lowerCamelCase_ =torch.load(__snake_case ) lowerCamelCase_ ={k: v.numpy() for k, v in pt_state_dict.items()} lowerCamelCase_ =flax_model.base_model_prefix # use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict if "batch_stats" in flax_model.params: lowerCamelCase_ =flax_model.params['''params'''] lowerCamelCase_ =flatten_dict(__snake_case ) random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) ) else: lowerCamelCase_ =flax_model.params lowerCamelCase_ =flatten_dict(__snake_case ) lowerCamelCase_ =(model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) lowerCamelCase_ =(model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCamelCase_ =tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary lowerCamelCase_ =pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: lowerCamelCase_ =pt_tuple_key[1:] # Correctly rename weight parameters lowerCamelCase_, lowerCamelCase_ =rename_key_and_reshape_tensor( __snake_case , __snake_case , __snake_case , __snake_case ) # add model prefix if necessary lowerCamelCase_ =(model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: lowerCamelCase_ =(model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1]: lowerCamelCase_ =jnp.asarray(__snake_case ) continue if "var" in flax_key[-1]: lowerCamelCase_ =jnp.asarray(__snake_case ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(__snake_case , __snake_case ) continue # also add unexpected weight so that warning is thrown lowerCamelCase_ =jnp.asarray(__snake_case ) else: # also add unexpected weight so that warning is thrown lowerCamelCase_ =jnp.asarray(__snake_case ) return unflatten_dict(__snake_case ) def a_ ( __snake_case : List[str] , __snake_case : Dict ) -> str: """simple docstring""" lowerCamelCase_ =os.path.abspath(__snake_case ) logger.info(F'''Loading Flax weights from {flax_checkpoint_path}''' ) # import correct flax class lowerCamelCase_ =getattr(__snake_case , '''Flax''' + model.__class__.__name__ ) # load flax weight dict with open(__snake_case , '''rb''' ) as state_f: try: lowerCamelCase_ =from_bytes(__snake_case , state_f.read() ) except UnpicklingError: raise EnvironmentError(F'''Unable to convert {flax_checkpoint_path} to Flax deserializable object. ''' ) return load_flax_weights_in_pytorch_model(__snake_case , __snake_case ) def a_ ( __snake_case : Optional[Any] , __snake_case : Union[str, Any] ) -> Optional[int]: """simple docstring""" try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise # check if we have bf16 weights lowerCamelCase_ =flatten_dict(jax.tree_util.tree_map(lambda __snake_case : x.dtype == jnp.bfloataa , __snake_case ) ).values() if any(__snake_case ): # convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( '''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` ''' '''before loading those in PyTorch model.''' ) lowerCamelCase_ =jax.tree_util.tree_map( lambda __snake_case : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __snake_case ) lowerCamelCase_ =flatten_dict(__snake_case ) lowerCamelCase_ =pt_model.state_dict() lowerCamelCase_ =(pt_model.base_model_prefix in flax_state) and ( pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) lowerCamelCase_ =(pt_model.base_model_prefix not in flax_state) and ( pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) # keep track of unexpected & missing keys lowerCamelCase_ =[] lowerCamelCase_ =set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): lowerCamelCase_ =flax_key_tuple[0] == pt_model.base_model_prefix lowerCamelCase_ ='''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict # adapt flax_key to prepare for loading from/to base model only if load_model_with_head_into_base_model and has_base_model_prefix: lowerCamelCase_ =flax_key_tuple[1:] elif load_base_model_into_model_with_head and require_base_model_prefix: lowerCamelCase_ =(pt_model.base_model_prefix,) + flax_key_tuple # rename flax weights to PyTorch format if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(__snake_case ) not in pt_model_dict: # conv layer lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',) lowerCamelCase_ =jnp.transpose(__snake_case , (3, 2, 0, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(__snake_case ) not in pt_model_dict: # linear layer lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',) lowerCamelCase_ =flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: lowerCamelCase_ =flax_key_tuple[:-1] + ('''weight''',) # adding batch stats from flax batch norm to pt elif "mean" in flax_key_tuple[-1]: lowerCamelCase_ =flax_key_tuple[:-1] + ('''running_mean''',) elif "var" in flax_key_tuple[-1]: lowerCamelCase_ =flax_key_tuple[:-1] + ('''running_var''',) if "batch_stats" in flax_state: lowerCamelCase_ ='''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header else: lowerCamelCase_ ='''.'''.join(__snake_case ) # We also need to look at `pt_model_dict` and see if there are keys requiring further transformation. lowerCamelCase_ ={} # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 for key in pt_model_dict: lowerCamelCase_ =key.split('''.''' ) lowerCamelCase_ =None if key_components[-3::2] == ["parametrizations", "original0"]: lowerCamelCase_ =key_components[-2] + '''_g''' elif key_components[-3::2] == ["parametrizations", "original1"]: lowerCamelCase_ =key_components[-2] + '''_v''' if name is not None: lowerCamelCase_ =key_components[:-3] + [name] lowerCamelCase_ ='''.'''.join(__snake_case ) lowerCamelCase_ =key if flax_key in special_pt_names: lowerCamelCase_ =special_pt_names[flax_key] if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( F'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected ''' F'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) else: # add weight to pytorch dict lowerCamelCase_ =np.asarray(__snake_case ) if not isinstance(__snake_case , np.ndarray ) else flax_tensor lowerCamelCase_ =torch.from_numpy(__snake_case ) # remove from missing keys missing_keys.remove(__snake_case ) else: # weight is not expected by PyTorch model unexpected_keys.append(__snake_case ) pt_model.load_state_dict(__snake_case ) # re-transform missing_keys to list lowerCamelCase_ =list(__snake_case ) if len(__snake_case ) > 0: logger.warning( '''Some weights of the Flax model were not used when initializing the PyTorch model''' F''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing''' F''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture''' ''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This''' F''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect''' ''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a''' ''' FlaxBertForSequenceClassification model).''' ) else: logger.warning(F'''All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n''' ) if len(__snake_case ) > 0: logger.warning( F'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly''' F''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to''' ''' use it for predictions and inference.''' ) else: logger.warning( F'''All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n''' '''If your task is similar to the task the model of the checkpoint was trained on, ''' F'''you can already use {pt_model.__class__.__name__} for predictions without further training.''' ) return pt_model
676
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) a_ : Optional[int] = { """configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""], """processing_layoutlmv2""": ["""LayoutLMv2Processor"""], """tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Any = ["""LayoutLMv2TokenizerFast"""] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Optional[Any] = ["""LayoutLMv2FeatureExtractor"""] a_ : int = ["""LayoutLMv2ImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Dict = [ """LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""", """LayoutLMv2ForQuestionAnswering""", """LayoutLMv2ForSequenceClassification""", """LayoutLMv2ForTokenClassification""", """LayoutLMv2Layer""", """LayoutLMv2Model""", """LayoutLMv2PreTrainedModel""", ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys a_ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
676
'''simple docstring''' def a_ ( __snake_case : str , __snake_case : str ) -> str: """simple docstring""" lowerCamelCase_ =len(__snake_case ) lowerCamelCase_ =len(__snake_case ) lowerCamelCase_ =( first_str_length if first_str_length > second_str_length else second_str_length ) lowerCamelCase_ =[] for char_count in range(__snake_case ): if char_count < first_str_length: output_list.append(first_str[char_count] ) if char_count < second_str_length: output_list.append(second_str[char_count] ) return "".join(__snake_case ) if __name__ == "__main__": print(alternative_string_arrange("""AB""", """XYZ"""), end=""" """)
676
1
'''simple docstring''' import unittest from .lib import ( Matrix, Vector, axpy, square_zero_matrix, unit_basis_vector, zero_vector, ) class __UpperCamelCase ( unittest.TestCase ): def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =Vector([1, 2, 3] ) self.assertEqual(x.component(0 ), 1 ) self.assertEqual(x.component(2 ), 3 ) lowerCamelCase_ =Vector() def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =Vector([0, 0, 0, 0, 0, 1] ) self.assertEqual(str(lowerCAmelCase ), '''(0,0,0,0,0,1)''' ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =Vector([1, 2, 3, 4] ) self.assertEqual(len(lowerCAmelCase ), 4 ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =Vector([1, 2] ) lowerCamelCase_ =Vector([1, 2, 3, 4, 5] ) lowerCamelCase_ =Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ) lowerCamelCase_ =Vector([1, -1, 1, -1, 2, -3, 4, -5] ) self.assertAlmostEqual(x.euclidean_length(), 2.2_3_6, 3 ) self.assertAlmostEqual(y.euclidean_length(), 7.4_1_6, 3 ) self.assertEqual(z.euclidean_length(), 0 ) self.assertAlmostEqual(w.euclidean_length(), 7.6_1_6, 3 ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =Vector([1, 2, 3] ) lowerCamelCase_ =Vector([1, 1, 1] ) self.assertEqual((x + y).component(0 ), 2 ) self.assertEqual((x + y).component(1 ), 3 ) self.assertEqual((x + y).component(2 ), 4 ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =Vector([1, 2, 3] ) lowerCamelCase_ =Vector([1, 1, 1] ) self.assertEqual((x - y).component(0 ), 0 ) self.assertEqual((x - y).component(1 ), 1 ) self.assertEqual((x - y).component(2 ), 2 ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =Vector([1, 2, 3] ) lowerCamelCase_ =Vector([2, -1, 4] ) # for test of dot product lowerCamelCase_ =Vector([1, -2, -1] ) self.assertEqual(str(x * 3.0 ), '''(3.0,6.0,9.0)''' ) self.assertEqual((a * b), 0 ) def lowercase__ ( self ): """simple docstring""" self.assertEqual(str(zero_vector(10 ) ).count('''0''' ), 10 ) def lowercase__ ( self ): """simple docstring""" self.assertEqual(str(unit_basis_vector(3, 1 ) ), '''(0,1,0)''' ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =Vector([1, 2, 3] ) lowerCamelCase_ =Vector([1, 0, 1] ) self.assertEqual(str(axpy(2, lowerCAmelCase, lowerCAmelCase ) ), '''(3,4,7)''' ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =Vector([1, 0, 0, 0, 0, 0] ) lowerCamelCase_ =x.copy() self.assertEqual(str(lowerCAmelCase ), str(lowerCAmelCase ) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =Vector([1, 0, 0] ) x.change_component(0, 0 ) x.change_component(1, 1 ) self.assertEqual(str(lowerCAmelCase ), '''(0,1,0)''' ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3 ) self.assertEqual('''|1,2,3|\n|2,4,5|\n|6,7,8|\n''', str(lowerCAmelCase ) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3 ) lowerCamelCase_ =[[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(minors[x][y], a.minor(lowerCAmelCase, lowerCAmelCase ) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3 ) lowerCamelCase_ =[[-3, 14, -10], [5, -10, 5], [-2, 1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(cofactors[x][y], a.cofactor(lowerCAmelCase, lowerCAmelCase ) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3 ) self.assertEqual(-5, a.determinant() ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]], 3, 3 ) lowerCamelCase_ =Vector([1, 2, 3] ) self.assertEqual('''(14,32,50)''', str(a * x ) ) self.assertEqual('''|2,4,6|\n|8,10,12|\n|14,16,18|\n''', str(a * 2 ) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3 ) a.change_component(0, 2, 5 ) self.assertEqual('''|1,2,5|\n|2,4,5|\n|6,7,8|\n''', str(lowerCAmelCase ) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3 ) self.assertEqual(7, a.component(2, 1 ), 0.0_1 ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3 ) lowerCamelCase_ =Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]], 3, 3 ) self.assertEqual('''|2,4,10|\n|4,8,10|\n|12,14,18|\n''', str(a + b ) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]], 3, 3 ) lowerCamelCase_ =Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]], 3, 3 ) self.assertEqual('''|0,0,-4|\n|0,0,0|\n|0,0,-2|\n''', str(a - b ) ) def lowercase__ ( self ): """simple docstring""" self.assertEqual( '''|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n''', str(square_zero_matrix(5 ) ), ) if __name__ == "__main__": unittest.main()
676
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ : Any = {"""configuration_timm_backbone""": ["""TimmBackboneConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Optional[Any] = ["""TimmBackbone"""] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys a_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
676
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : Union[str, Any] = logging.get_logger(__name__) a_ : Optional[Any] = { """RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""", """RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""", """RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""", """RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""", """RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""", """RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""", """RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""", """RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""", """RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""", """RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""", } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : str ='rwkv' lowercase : List[Any] ={'max_position_embeddings': 'context_length'} def __init__( self, lowerCAmelCase=50_277, lowerCAmelCase=1_024, lowerCAmelCase=4_096, lowerCAmelCase=32, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=1e-5, lowerCAmelCase=0, lowerCAmelCase=0, lowerCAmelCase=6, lowerCAmelCase=False, lowerCAmelCase=True, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =vocab_size lowerCamelCase_ =context_length lowerCamelCase_ =hidden_size lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =attention_hidden_size if attention_hidden_size is not None else hidden_size lowerCamelCase_ =intermediate_size if intermediate_size is not None else 4 * hidden_size lowerCamelCase_ =layer_norm_epsilon lowerCamelCase_ =rescale_every lowerCamelCase_ =use_cache lowerCamelCase_ =bos_token_id lowerCamelCase_ =eos_token_id super().__init__( tie_word_embeddings=lowerCAmelCase, bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, **lowerCAmelCase )
676
'''simple docstring''' import functools def a_ ( __snake_case : str , __snake_case : str ) -> int: """simple docstring""" lowerCamelCase_ =len(__snake_case ) lowerCamelCase_ =len(__snake_case ) @functools.cache def min_distance(__snake_case : int , __snake_case : int ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa lowerCamelCase_ =int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , __snake_case ) , 1 + min_distance(__snake_case , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
676
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_chinese_clip import ChineseCLIPImageProcessor a_ : Dict = logging.get_logger(__name__) class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" warnings.warn( '''The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use ChineseCLIPImageProcessor instead.''', lowerCAmelCase, ) super().__init__(*lowerCAmelCase, **lowerCAmelCase )
676
'''simple docstring''' def a_ ( __snake_case : int ) -> bool: """simple docstring""" if not isinstance(__snake_case , __snake_case ): lowerCamelCase_ =F'''Input value of [number={number}] must be an integer''' raise TypeError(__snake_case ) if number < 0: return False lowerCamelCase_ =number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
676
1
'''simple docstring''' import string def a_ ( __snake_case : str ) -> None: """simple docstring""" for key in range(len(string.ascii_uppercase ) ): lowerCamelCase_ ='''''' for symbol in message: if symbol in string.ascii_uppercase: lowerCamelCase_ =string.ascii_uppercase.find(__snake_case ) lowerCamelCase_ =num - key if num < 0: lowerCamelCase_ =num + len(string.ascii_uppercase ) lowerCamelCase_ =translated + string.ascii_uppercase[num] else: lowerCamelCase_ =translated + symbol print(F'''Decryption using Key #{key}: {translated}''' ) def a_ ( ) -> None: """simple docstring""" lowerCamelCase_ =input('''Encrypted message: ''' ) lowerCamelCase_ =message.upper() decrypt(__snake_case ) if __name__ == "__main__": import doctest doctest.testmod() main()
676
'''simple docstring''' from __future__ import annotations a_ : int = list[list[int]] # assigning initial values to the grid a_ : Matrix = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution a_ : Matrix = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def a_ ( __snake_case : Matrix , __snake_case : int , __snake_case : int , __snake_case : int ) -> bool: """simple docstring""" for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def a_ ( __snake_case : Matrix ) -> tuple[int, int] | None: """simple docstring""" for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def a_ ( __snake_case : Matrix ) -> Matrix | None: """simple docstring""" if location := find_empty_location(__snake_case ): lowerCamelCase_, lowerCamelCase_ =location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(__snake_case , __snake_case , __snake_case , __snake_case ): lowerCamelCase_ =digit if sudoku(__snake_case ) is not None: return grid lowerCamelCase_ =0 return None def a_ ( __snake_case : Matrix ) -> None: """simple docstring""" for row in grid: for cell in row: print(__snake_case , end=''' ''' ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print("""\nExample grid:\n""" + """=""" * 20) print_solution(example_grid) print("""\nExample grid solution:""") a_ : Union[str, Any] = sudoku(example_grid) if solution is not None: print_solution(solution) else: print("""Cannot find a solution.""")
676
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) a_ : int = { """configuration_vision_text_dual_encoder""": ["""VisionTextDualEncoderConfig"""], """processing_vision_text_dual_encoder""": ["""VisionTextDualEncoderProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : int = ["""VisionTextDualEncoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Dict = ["""FlaxVisionTextDualEncoderModel"""] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : int = ["""TFVisionTextDualEncoderModel"""] if TYPE_CHECKING: from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel else: import sys a_ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
676
'''simple docstring''' from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : Union[str, Any] = logging.get_logger(__name__) a_ : Tuple = { """huggingface/informer-tourism-monthly""": ( """https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json""" ), # See all Informer models at https://huggingface.co/models?filter=informer } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Union[str, Any] ='informer' lowercase : Union[str, Any] ={ 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', 'num_hidden_layers': 'encoder_layers', } def __init__( self, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = "student_t", lowerCAmelCase = "nll", lowerCAmelCase = 1, lowerCAmelCase = None, lowerCAmelCase = "mean", lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = 64, lowerCAmelCase = 32, lowerCAmelCase = 32, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = True, lowerCAmelCase = "gelu", lowerCAmelCase = 0.0_5, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 100, lowerCAmelCase = 0.0_2, lowerCAmelCase=True, lowerCAmelCase = "prob", lowerCAmelCase = 5, lowerCAmelCase = True, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =prediction_length lowerCamelCase_ =context_length or prediction_length lowerCamelCase_ =distribution_output lowerCamelCase_ =loss lowerCamelCase_ =input_size lowerCamelCase_ =num_time_features lowerCamelCase_ =lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7] lowerCamelCase_ =scaling lowerCamelCase_ =num_dynamic_real_features lowerCamelCase_ =num_static_real_features lowerCamelCase_ =num_static_categorical_features # set cardinality if cardinality and num_static_categorical_features > 0: if len(lowerCAmelCase ) != num_static_categorical_features: raise ValueError( '''The cardinality should be a list of the same length as `num_static_categorical_features`''' ) lowerCamelCase_ =cardinality else: lowerCamelCase_ =[0] # set embedding_dimension if embedding_dimension and num_static_categorical_features > 0: if len(lowerCAmelCase ) != num_static_categorical_features: raise ValueError( '''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' ) lowerCamelCase_ =embedding_dimension else: lowerCamelCase_ =[min(50, (cat + 1) // 2 ) for cat in self.cardinality] lowerCamelCase_ =num_parallel_samples # Transformer architecture configuration lowerCamelCase_ =input_size * len(self.lags_sequence ) + self._number_of_features lowerCamelCase_ =d_model lowerCamelCase_ =encoder_attention_heads lowerCamelCase_ =decoder_attention_heads lowerCamelCase_ =encoder_ffn_dim lowerCamelCase_ =decoder_ffn_dim lowerCamelCase_ =encoder_layers lowerCamelCase_ =decoder_layers lowerCamelCase_ =dropout lowerCamelCase_ =attention_dropout lowerCamelCase_ =activation_dropout lowerCamelCase_ =encoder_layerdrop lowerCamelCase_ =decoder_layerdrop lowerCamelCase_ =activation_function lowerCamelCase_ =init_std lowerCamelCase_ =use_cache # Informer lowerCamelCase_ =attention_type lowerCamelCase_ =sampling_factor lowerCamelCase_ =distil super().__init__(is_encoder_decoder=lowerCAmelCase, **lowerCAmelCase ) @property def lowercase__ ( self ): """simple docstring""" return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
676
1
'''simple docstring''' # Lint as: python3 import itertools import os import re a_ : Optional[Any] = re.compile(R"""([A-Z]+)([A-Z][a-z])""") a_ : List[str] = re.compile(R"""([a-z\d])([A-Z])""") a_ : Optional[Any] = re.compile(R"""(?<!_)_(?!_)""") a_ : Optional[int] = re.compile(R"""(_{2,})""") a_ : List[Any] = R"""^\w+(\.\w+)*$""" a_ : Any = R"""<>:/\|?*""" def a_ ( __snake_case : Union[str, Any] ) -> Any: """simple docstring""" lowerCamelCase_ =_uppercase_uppercase_re.sub(r'''\1_\2''' , __snake_case ) lowerCamelCase_ =_lowercase_uppercase_re.sub(r'''\1_\2''' , __snake_case ) return name.lower() def a_ ( __snake_case : Optional[int] ) -> int: """simple docstring""" lowerCamelCase_ =_single_underscore_re.split(__snake_case ) lowerCamelCase_ =[_multiple_underscores_re.split(__snake_case ) for n in name] return "".join(n.capitalize() for n in itertools.chain.from_iterable(__snake_case ) if n != '''''' ) def a_ ( __snake_case : Optional[Any] ) -> Optional[int]: """simple docstring""" if os.path.basename(__snake_case ) != name: raise ValueError(F'''Should be a dataset name, not a path: {name}''' ) return camelcase_to_snakecase(__snake_case ) def a_ ( __snake_case : Any , __snake_case : Union[str, Any] ) -> List[str]: """simple docstring""" if os.path.basename(__snake_case ) != name: raise ValueError(F'''Should be a dataset name, not a path: {name}''' ) if not re.match(_split_re , __snake_case ): raise ValueError(F'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''' ) return F'''{filename_prefix_for_name(__snake_case )}-{split}''' def a_ ( __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : Any , __snake_case : int=None ) -> str: """simple docstring""" lowerCamelCase_ =filename_prefix_for_split(__snake_case , __snake_case ) if filetype_suffix: prefix += F'''.{filetype_suffix}''' lowerCamelCase_ =os.path.join(__snake_case , __snake_case ) return F'''{filepath}*''' def a_ ( __snake_case : Any , __snake_case : str , __snake_case : Dict , __snake_case : Union[str, Any]=None , __snake_case : Dict=None ) -> Optional[int]: """simple docstring""" lowerCamelCase_ =filename_prefix_for_split(__snake_case , __snake_case ) lowerCamelCase_ =os.path.join(__snake_case , __snake_case ) if shard_lengths: lowerCamelCase_ =len(__snake_case ) lowerCamelCase_ =[F'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(__snake_case )] if filetype_suffix: lowerCamelCase_ =[filename + F'''.{filetype_suffix}''' for filename in filenames] return filenames else: lowerCamelCase_ =prefix if filetype_suffix: filename += F'''.{filetype_suffix}''' return [filename]
676
'''simple docstring''' from __future__ import annotations def a_ ( __snake_case : int ) -> list[int]: """simple docstring""" lowerCamelCase_ =[True] * limit lowerCamelCase_ =False lowerCamelCase_ =False lowerCamelCase_ =True for i in range(3 , int(limit**0.5 + 1 ) , 2 ): lowerCamelCase_ =i * 2 while index < limit: lowerCamelCase_ =False lowerCamelCase_ =index + i lowerCamelCase_ =[2] for i in range(3 , __snake_case , 2 ): if is_prime[i]: primes.append(__snake_case ) return primes def a_ ( __snake_case : int = 100_0000 ) -> int: """simple docstring""" lowerCamelCase_ =prime_sieve(__snake_case ) lowerCamelCase_ =0 lowerCamelCase_ =0 for i in range(len(__snake_case ) ): for j in range(i + length , len(__snake_case ) ): lowerCamelCase_ =sum(primes[i:j] ) if sol >= ceiling: break if sol in primes: lowerCamelCase_ =j - i lowerCamelCase_ =sol return largest if __name__ == "__main__": print(F"""{solution() = }""")
676
1
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL a_ : int = logging.get_logger(__name__) class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Tuple =['pixel_values'] def __init__( self, lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = PILImageResampling.BILINEAR, lowerCAmelCase = True, lowerCAmelCase = 1 / 255, lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" super().__init__(**lowerCAmelCase ) lowerCamelCase_ =size if size is not None else {'''shortest_edge''': 384} lowerCamelCase_ =get_size_dict(lowerCAmelCase, default_to_square=lowerCAmelCase ) lowerCamelCase_ =do_resize lowerCamelCase_ =size # Default value set here for backwards compatibility where the value in config is None lowerCamelCase_ =crop_pct if crop_pct is not None else 224 / 256 lowerCamelCase_ =resample lowerCamelCase_ =do_rescale lowerCamelCase_ =rescale_factor lowerCamelCase_ =do_normalize lowerCamelCase_ =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowerCamelCase_ =image_std if image_std is not None else IMAGENET_STANDARD_STD def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = PILImageResampling.BICUBIC, lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =get_size_dict(lowerCAmelCase, default_to_square=lowerCAmelCase ) if "shortest_edge" not in size: raise ValueError(f'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' ) lowerCamelCase_ =size['''shortest_edge'''] if shortest_edge < 384: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct lowerCamelCase_ =int(shortest_edge / crop_pct ) lowerCamelCase_ =get_resize_output_image_size(lowerCAmelCase, size=lowerCAmelCase, default_to_square=lowerCAmelCase ) lowerCamelCase_ =resize(image=lowerCAmelCase, size=lowerCAmelCase, resample=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=lowerCAmelCase, size=(shortest_edge, shortest_edge), data_format=lowerCAmelCase, **lowerCAmelCase ) else: # warping (no cropping) when evaluated at 384 or larger return resize( lowerCAmelCase, size=(shortest_edge, shortest_edge), resample=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" return rescale(lowerCAmelCase, scale=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" return normalize(lowerCAmelCase, mean=lowerCAmelCase, std=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = ChannelDimension.FIRST, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =do_resize if do_resize is not None else self.do_resize lowerCamelCase_ =crop_pct if crop_pct is not None else self.crop_pct lowerCamelCase_ =resample if resample is not None else self.resample lowerCamelCase_ =do_rescale if do_rescale is not None else self.do_rescale lowerCamelCase_ =rescale_factor if rescale_factor is not None else self.rescale_factor lowerCamelCase_ =do_normalize if do_normalize is not None else self.do_normalize lowerCamelCase_ =image_mean if image_mean is not None else self.image_mean lowerCamelCase_ =image_std if image_std is not None else self.image_std lowerCamelCase_ =size if size is not None else self.size lowerCamelCase_ =get_size_dict(lowerCAmelCase, default_to_square=lowerCAmelCase ) lowerCamelCase_ =make_list_of_images(lowerCAmelCase ) if not valid_images(lowerCAmelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_resize and size["shortest_edge"] < 384 and crop_pct is None: raise ValueError('''crop_pct must be specified if size < 384.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. lowerCamelCase_ =[to_numpy_array(lowerCAmelCase ) for image in images] if do_resize: lowerCamelCase_ =[self.resize(image=lowerCAmelCase, size=lowerCAmelCase, crop_pct=lowerCAmelCase, resample=lowerCAmelCase ) for image in images] if do_rescale: lowerCamelCase_ =[self.rescale(image=lowerCAmelCase, scale=lowerCAmelCase ) for image in images] if do_normalize: lowerCamelCase_ =[self.normalize(image=lowerCAmelCase, mean=lowerCAmelCase, std=lowerCAmelCase ) for image in images] lowerCamelCase_ =[to_channel_dimension_format(lowerCAmelCase, lowerCAmelCase ) for image in images] lowerCamelCase_ ={'''pixel_values''': images} return BatchFeature(data=lowerCAmelCase, tensor_type=lowerCAmelCase )
676
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" super().__init__() # make sure scheduler can always be converted to DDIM lowerCamelCase_ =DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=lowerCAmelCase, scheduler=lowerCAmelCase ) @torch.no_grad() def __call__( self, lowerCAmelCase = 1, lowerCAmelCase = None, lowerCAmelCase = 0.0, lowerCAmelCase = 50, lowerCAmelCase = None, lowerCAmelCase = "pil", lowerCAmelCase = True, ): """simple docstring""" if isinstance(self.unet.config.sample_size, lowerCAmelCase ): lowerCamelCase_ =( batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size, ) else: lowerCamelCase_ =(batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) if isinstance(lowerCAmelCase, lowerCAmelCase ) and len(lowerCAmelCase ) != batch_size: raise ValueError( f'''You have passed a list of generators of length {len(lowerCAmelCase )}, but requested an effective batch''' f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) lowerCamelCase_ =randn_tensor(lowerCAmelCase, generator=lowerCAmelCase, device=self.device, dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(lowerCAmelCase ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output lowerCamelCase_ =self.unet(lowerCAmelCase, lowerCAmelCase ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 lowerCamelCase_ =self.scheduler.step( lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, eta=lowerCAmelCase, use_clipped_model_output=lowerCAmelCase, generator=lowerCAmelCase ).prev_sample lowerCamelCase_ =(image / 2 + 0.5).clamp(0, 1 ) lowerCamelCase_ =image.cpu().permute(0, 2, 3, 1 ).numpy() if output_type == "pil": lowerCamelCase_ =self.numpy_to_pil(lowerCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowerCAmelCase )
676
1
'''simple docstring''' import importlib import inspect import json import os import re import shutil import sys from pathlib import Path from typing import Dict, Optional, Union from urllib import request from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info from packaging import version from .. import __version__ from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging a_ : Dict = ( """https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py""" ) a_ : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name def a_ ( ) -> Dict: """simple docstring""" lowerCamelCase_ ='''https://pypi.org/pypi/diffusers/json''' lowerCamelCase_ =json.loads(request.urlopen(__snake_case ).read() )['''releases'''].keys() return sorted(__snake_case , key=lambda __snake_case : version.Version(__snake_case ) ) def a_ ( ) -> Optional[int]: """simple docstring""" # This function has already been executed if HF_MODULES_CACHE already is in the Python path. if HF_MODULES_CACHE in sys.path: return sys.path.append(__snake_case ) os.makedirs(__snake_case , exist_ok=__snake_case ) lowerCamelCase_ =Path(__snake_case ) / '''__init__.py''' if not init_path.exists(): init_path.touch() def a_ ( __snake_case : Union[str, os.PathLike] ) -> List[str]: """simple docstring""" init_hf_modules() lowerCamelCase_ =Path(__snake_case ) / name # If the parent module does not exist yet, recursively create it. if not dynamic_module_path.parent.exists(): create_dynamic_module(dynamic_module_path.parent ) os.makedirs(__snake_case , exist_ok=__snake_case ) lowerCamelCase_ =dynamic_module_path / '''__init__.py''' if not init_path.exists(): init_path.touch() def a_ ( __snake_case : List[Any] ) -> Optional[int]: """simple docstring""" with open(__snake_case , '''r''' , encoding='''utf-8''' ) as f: lowerCamelCase_ =f.read() # Imports of the form `import .xxx` lowerCamelCase_ =re.findall('''^\s*import\s+\.(\S+)\s*$''' , __snake_case , flags=re.MULTILINE ) # Imports of the form `from .xxx import yyy` relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , __snake_case , flags=re.MULTILINE ) # Unique-ify return list(set(__snake_case ) ) def a_ ( __snake_case : Union[str, Any] ) -> List[str]: """simple docstring""" lowerCamelCase_ =False lowerCamelCase_ =[module_file] lowerCamelCase_ =[] # Let's recurse through all relative imports while not no_change: lowerCamelCase_ =[] for f in files_to_check: new_imports.extend(get_relative_imports(__snake_case ) ) lowerCamelCase_ =Path(__snake_case ).parent lowerCamelCase_ =[str(module_path / m ) for m in new_imports] lowerCamelCase_ =[f for f in new_import_files if f not in all_relative_imports] lowerCamelCase_ =[F'''{f}.py''' for f in new_import_files] lowerCamelCase_ =len(__snake_case ) == 0 all_relative_imports.extend(__snake_case ) return all_relative_imports def a_ ( __snake_case : Any ) -> str: """simple docstring""" with open(__snake_case , '''r''' , encoding='''utf-8''' ) as f: lowerCamelCase_ =f.read() # Imports of the form `import xxx` lowerCamelCase_ =re.findall('''^\s*import\s+(\S+)\s*$''' , __snake_case , flags=re.MULTILINE ) # Imports of the form `from xxx import yyy` imports += re.findall('''^\s*from\s+(\S+)\s+import''' , __snake_case , flags=re.MULTILINE ) # Only keep the top-level module lowerCamelCase_ =[imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )] # Unique-ify and test we got them all lowerCamelCase_ =list(set(__snake_case ) ) lowerCamelCase_ =[] for imp in imports: try: importlib.import_module(__snake_case ) except ImportError: missing_packages.append(__snake_case ) if len(__snake_case ) > 0: raise ImportError( '''This modeling file requires the following packages that were not found in your environment: ''' F'''{', '.join(__snake_case )}. Run `pip install {' '.join(__snake_case )}`''' ) return get_relative_imports(__snake_case ) def a_ ( __snake_case : Optional[int] , __snake_case : int ) -> Tuple: """simple docstring""" lowerCamelCase_ =module_path.replace(os.path.sep , '''.''' ) lowerCamelCase_ =importlib.import_module(__snake_case ) if class_name is None: return find_pipeline_class(__snake_case ) return getattr(__snake_case , __snake_case ) def a_ ( __snake_case : List[Any] ) -> Any: """simple docstring""" from ..pipelines import DiffusionPipeline lowerCamelCase_ =dict(inspect.getmembers(__snake_case , inspect.isclass ) ) lowerCamelCase_ =None for cls_name, cls in cls_members.items(): if ( cls_name != DiffusionPipeline.__name__ and issubclass(cls , __snake_case ) and cls.__module__.split('''.''' )[0] != "diffusers" ): if pipeline_class is not None: raise ValueError( F'''Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:''' F''' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in''' F''' {loaded_module}.''' ) lowerCamelCase_ =cls return pipeline_class def a_ ( __snake_case : Union[str, os.PathLike] , __snake_case : str , __snake_case : Optional[Union[str, os.PathLike]] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : Optional[Dict[str, str]] = None , __snake_case : Optional[Union[bool, str]] = None , __snake_case : Optional[str] = None , __snake_case : bool = False , ) -> List[Any]: """simple docstring""" lowerCamelCase_ =str(__snake_case ) lowerCamelCase_ =os.path.join(__snake_case , __snake_case ) if os.path.isfile(__snake_case ): lowerCamelCase_ =module_file_or_url lowerCamelCase_ ='''local''' elif pretrained_model_name_or_path.count('''/''' ) == 0: lowerCamelCase_ =get_diffusers_versions() # cut ".dev0" lowerCamelCase_ ='''v''' + '''.'''.join(__version__.split('''.''' )[:3] ) # retrieve github version that matches if revision is None: lowerCamelCase_ =latest_version if latest_version[1:] in available_versions else '''main''' logger.info(F'''Defaulting to latest_version: {revision}.''' ) elif revision in available_versions: lowerCamelCase_ =F'''v{revision}''' elif revision == "main": lowerCamelCase_ =revision else: raise ValueError( F'''`custom_revision`: {revision} does not exist. Please make sure to choose one of''' F''' {', '.join(available_versions + ['main'] )}.''' ) # community pipeline on GitHub lowerCamelCase_ =COMMUNITY_PIPELINES_URL.format(revision=__snake_case , pipeline=__snake_case ) try: lowerCamelCase_ =cached_download( __snake_case , cache_dir=__snake_case , force_download=__snake_case , proxies=__snake_case , resume_download=__snake_case , local_files_only=__snake_case , use_auth_token=__snake_case , ) lowerCamelCase_ ='''git''' lowerCamelCase_ =pretrained_model_name_or_path + '''.py''' except EnvironmentError: logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' ) raise else: try: # Load from URL or cache if already cached lowerCamelCase_ =hf_hub_download( __snake_case , __snake_case , cache_dir=__snake_case , force_download=__snake_case , proxies=__snake_case , resume_download=__snake_case , local_files_only=__snake_case , use_auth_token=__snake_case , ) lowerCamelCase_ =os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) ) except EnvironmentError: logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' ) raise # Check we have all the requirements in our environment lowerCamelCase_ =check_imports(__snake_case ) # Now we move the module inside our cached dynamic modules. lowerCamelCase_ =DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule create_dynamic_module(__snake_case ) lowerCamelCase_ =Path(__snake_case ) / full_submodule if submodule == "local" or submodule == "git": # We always copy local files (we could hash the file to see if there was a change, and give them the name of # that hash, to only copy when there is a modification but it seems overkill for now). # The only reason we do the copy is to avoid putting too many folders in sys.path. shutil.copy(__snake_case , submodule_path / module_file ) for module_needed in modules_needed: lowerCamelCase_ =F'''{module_needed}.py''' shutil.copy(os.path.join(__snake_case , __snake_case ) , submodule_path / module_needed ) else: # Get the commit hash # TODO: we will get this info in the etag soon, so retrieve it from there and not here. if isinstance(__snake_case , __snake_case ): lowerCamelCase_ =use_auth_token elif use_auth_token is True: lowerCamelCase_ =HfFolder.get_token() else: lowerCamelCase_ =None lowerCamelCase_ =model_info(__snake_case , revision=__snake_case , token=__snake_case ).sha # The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the # benefit of versioning. lowerCamelCase_ =submodule_path / commit_hash lowerCamelCase_ =full_submodule + os.path.sep + commit_hash create_dynamic_module(__snake_case ) if not (submodule_path / module_file).exists(): shutil.copy(__snake_case , submodule_path / module_file ) # Make sure we also have every file with relative for module_needed in modules_needed: if not (submodule_path / module_needed).exists(): get_cached_module_file( __snake_case , F'''{module_needed}.py''' , cache_dir=__snake_case , force_download=__snake_case , resume_download=__snake_case , proxies=__snake_case , use_auth_token=__snake_case , revision=__snake_case , local_files_only=__snake_case , ) return os.path.join(__snake_case , __snake_case ) def a_ ( __snake_case : Union[str, os.PathLike] , __snake_case : str , __snake_case : Optional[str] = None , __snake_case : Optional[Union[str, os.PathLike]] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : Optional[Dict[str, str]] = None , __snake_case : Optional[Union[bool, str]] = None , __snake_case : Optional[str] = None , __snake_case : bool = False , **__snake_case : Optional[Any] , ) -> int: """simple docstring""" lowerCamelCase_ =get_cached_module_file( __snake_case , __snake_case , cache_dir=__snake_case , force_download=__snake_case , resume_download=__snake_case , proxies=__snake_case , use_auth_token=__snake_case , revision=__snake_case , local_files_only=__snake_case , ) return get_class_in_module(__snake_case , final_module.replace('''.py''' , '''''' ) )
676
'''simple docstring''' from maths.prime_check import is_prime def a_ ( __snake_case : int ) -> int: """simple docstring""" if not isinstance(__snake_case , __snake_case ): lowerCamelCase_ =F'''Input value of [number={number}] must be an integer''' raise TypeError(__snake_case ) if is_prime(__snake_case ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
676
1
'''simple docstring''' import math import time from typing import Dict, List, Optional from torch.utils.data import Dataset from transformers import SeqaSeqTrainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, *lowerCAmelCase, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ): """simple docstring""" super().__init__(*lowerCAmelCase, **lowerCAmelCase ) lowerCamelCase_ =eval_examples lowerCamelCase_ =post_process_function def lowercase__ ( self, lowerCAmelCase = None, lowerCAmelCase=None, lowerCAmelCase = None, lowerCAmelCase = "eval", **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =gen_kwargs.copy() lowerCamelCase_ =( gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''' ) is not None else self.args.generation_max_length ) lowerCamelCase_ =( gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''' ) is not None else self.args.generation_num_beams ) lowerCamelCase_ =gen_kwargs lowerCamelCase_ =self.eval_dataset if eval_dataset is None else eval_dataset lowerCamelCase_ =self.get_eval_dataloader(lowerCAmelCase ) lowerCamelCase_ =self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. lowerCamelCase_ =self.compute_metrics lowerCamelCase_ =None lowerCamelCase_ =time.time() lowerCamelCase_ =self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: lowerCamelCase_ =eval_loop( lowerCAmelCase, description='''Evaluation''', prediction_loss_only=True if compute_metrics is None else None, ignore_keys=lowerCAmelCase, metric_key_prefix=lowerCAmelCase, ) finally: lowerCamelCase_ =compute_metrics lowerCamelCase_ =self.args.eval_batch_size * self.args.world_size if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( lowerCAmelCase, lowerCAmelCase, num_samples=output.num_samples, num_steps=math.ceil(output.num_samples / total_batch_size ), ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default lowerCamelCase_ =self.post_process_function(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =self.compute_metrics(lowerCAmelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f'''{metric_key_prefix}_''' ): lowerCamelCase_ =metrics.pop(lowerCAmelCase ) metrics.update(output.metrics ) else: lowerCamelCase_ =output.metrics if self.args.should_log: # Only the main node log the results by default self.log(lowerCAmelCase ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) lowerCamelCase_ =self.callback_handler.on_evaluate(self.args, self.state, self.control, lowerCAmelCase ) return metrics def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=None, lowerCAmelCase = "test", **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =gen_kwargs.copy() lowerCamelCase_ =self.get_test_dataloader(lowerCAmelCase ) # Temporarily disable metric computation, we will do it in the loop here. lowerCamelCase_ =self.compute_metrics lowerCamelCase_ =None lowerCamelCase_ =time.time() lowerCamelCase_ =self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: lowerCamelCase_ =eval_loop( lowerCAmelCase, description='''Prediction''', prediction_loss_only=True if compute_metrics is None else None, ignore_keys=lowerCAmelCase, metric_key_prefix=lowerCAmelCase, ) finally: lowerCamelCase_ =compute_metrics lowerCamelCase_ =self.args.eval_batch_size * self.args.world_size if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( lowerCAmelCase, lowerCAmelCase, num_samples=output.num_samples, num_steps=math.ceil(output.num_samples / total_batch_size ), ) ) if self.post_process_function is None or self.compute_metrics is None: return output lowerCamelCase_ =self.post_process_function(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, '''predict''' ) lowerCamelCase_ =self.compute_metrics(lowerCAmelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f'''{metric_key_prefix}_''' ): lowerCamelCase_ =metrics.pop(lowerCAmelCase ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions, label_ids=predictions.label_ids, metrics=lowerCAmelCase )
676
'''simple docstring''' # DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin, SchedulerOutput @dataclass class __UpperCamelCase ( lowerCamelCase__ ): lowercase : torch.FloatTensor lowercase : torch.FloatTensor class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): lowercase : Tuple =1 @register_to_config def __init__( self, lowerCAmelCase = 2_000, lowerCAmelCase = 0.1_5, lowerCAmelCase = 0.0_1, lowerCAmelCase = 1_3_4_8.0, lowerCAmelCase = 1e-5, lowerCAmelCase = 1, ): """simple docstring""" lowerCamelCase_ =sigma_max # setable values lowerCamelCase_ =None self.set_sigmas(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ): """simple docstring""" return sample def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None ): """simple docstring""" lowerCamelCase_ =sampling_eps if sampling_eps is not None else self.config.sampling_eps lowerCamelCase_ =torch.linspace(1, lowerCAmelCase, lowerCAmelCase, device=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None ): """simple docstring""" lowerCamelCase_ =sigma_min if sigma_min is not None else self.config.sigma_min lowerCamelCase_ =sigma_max if sigma_max is not None else self.config.sigma_max lowerCamelCase_ =sampling_eps if sampling_eps is not None else self.config.sampling_eps if self.timesteps is None: self.set_timesteps(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps) lowerCamelCase_ =torch.exp(torch.linspace(math.log(lowerCAmelCase ), math.log(lowerCAmelCase ), lowerCAmelCase ) ) lowerCamelCase_ =torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" return torch.where( timesteps == 0, torch.zeros_like(t.to(timesteps.device ) ), self.discrete_sigmas[timesteps - 1].to(timesteps.device ), ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = True, ): """simple docstring""" if self.timesteps is None: raise ValueError( '''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' ) lowerCamelCase_ =timestep * torch.ones( sample.shape[0], device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0]) lowerCamelCase_ =(timestep * (len(self.timesteps ) - 1)).long() # mps requires indices to be in the same device, so we use cpu as is the default with cuda lowerCamelCase_ =timesteps.to(self.discrete_sigmas.device ) lowerCamelCase_ =self.discrete_sigmas[timesteps].to(sample.device ) lowerCamelCase_ =self.get_adjacent_sigma(lowerCAmelCase, lowerCAmelCase ).to(sample.device ) lowerCamelCase_ =torch.zeros_like(lowerCAmelCase ) lowerCamelCase_ =(sigma**2 - adjacent_sigma**2) ** 0.5 # equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x) # also equation 47 shows the analog from SDE models to ancestral sampling methods lowerCamelCase_ =diffusion.flatten() while len(diffusion.shape ) < len(sample.shape ): lowerCamelCase_ =diffusion.unsqueeze(-1 ) lowerCamelCase_ =drift - diffusion**2 * model_output # equation 6: sample noise for the diffusion term of lowerCamelCase_ =randn_tensor( sample.shape, layout=sample.layout, generator=lowerCAmelCase, device=sample.device, dtype=sample.dtype ) lowerCamelCase_ =sample - drift # subtract because `dt` is a small negative timestep # TODO is the variable diffusion the correct scaling term for the noise? lowerCamelCase_ =prev_sample_mean + diffusion * noise # add impact of diffusion field g if not return_dict: return (prev_sample, prev_sample_mean) return SdeVeOutput(prev_sample=lowerCAmelCase, prev_sample_mean=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = True, ): """simple docstring""" if self.timesteps is None: raise ValueError( '''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' ) # For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z" # sample noise for correction lowerCamelCase_ =randn_tensor(sample.shape, layout=sample.layout, generator=lowerCAmelCase ).to(sample.device ) # compute step size from the model_output, the noise, and the snr lowerCamelCase_ =torch.norm(model_output.reshape(model_output.shape[0], -1 ), dim=-1 ).mean() lowerCamelCase_ =torch.norm(noise.reshape(noise.shape[0], -1 ), dim=-1 ).mean() lowerCamelCase_ =(self.config.snr * noise_norm / grad_norm) ** 2 * 2 lowerCamelCase_ =step_size * torch.ones(sample.shape[0] ).to(sample.device ) # self.repeat_scalar(step_size, sample.shape[0]) # compute corrected sample: model_output term and noise term lowerCamelCase_ =step_size.flatten() while len(step_size.shape ) < len(sample.shape ): lowerCamelCase_ =step_size.unsqueeze(-1 ) lowerCamelCase_ =sample + step_size * model_output lowerCamelCase_ =prev_sample_mean + ((step_size * 2) ** 0.5) * noise if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =timesteps.to(original_samples.device ) lowerCamelCase_ =self.discrete_sigmas.to(original_samples.device )[timesteps] lowerCamelCase_ =( noise * sigmas[:, None, None, None] if noise is not None else torch.randn_like(lowerCAmelCase ) * sigmas[:, None, None, None] ) lowerCamelCase_ =noise + original_samples return noisy_samples def __len__( self ): """simple docstring""" return self.config.num_train_timesteps
676
1
'''simple docstring''' import math import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from .attention_processor import Attention from .embeddings import get_timestep_embedding from .modeling_utils import ModelMixin class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): @register_to_config def __init__( self, lowerCAmelCase = 128, lowerCAmelCase = 256, lowerCAmelCase = 2_0_0_0.0, lowerCAmelCase = 768, lowerCAmelCase = 12, lowerCAmelCase = 12, lowerCAmelCase = 64, lowerCAmelCase = 2_048, lowerCAmelCase = 0.1, ): """simple docstring""" super().__init__() lowerCamelCase_ =nn.Sequential( nn.Linear(lowerCAmelCase, d_model * 4, bias=lowerCAmelCase ), nn.SiLU(), nn.Linear(d_model * 4, d_model * 4, bias=lowerCAmelCase ), nn.SiLU(), ) lowerCamelCase_ =nn.Embedding(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =False lowerCamelCase_ =nn.Linear(lowerCAmelCase, lowerCAmelCase, bias=lowerCAmelCase ) lowerCamelCase_ =nn.Dropout(p=lowerCAmelCase ) lowerCamelCase_ =nn.ModuleList() for lyr_num in range(lowerCAmelCase ): # FiLM conditional T5 decoder lowerCamelCase_ =DecoderLayer(d_model=lowerCAmelCase, d_kv=lowerCAmelCase, num_heads=lowerCAmelCase, d_ff=lowerCAmelCase, dropout_rate=lowerCAmelCase ) self.decoders.append(lowerCAmelCase ) lowerCamelCase_ =TaLayerNorm(lowerCAmelCase ) lowerCamelCase_ =nn.Dropout(p=lowerCAmelCase ) lowerCamelCase_ =nn.Linear(lowerCAmelCase, lowerCAmelCase, bias=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =torch.mul(query_input.unsqueeze(-1 ), key_input.unsqueeze(-2 ) ) return mask.unsqueeze(-3 ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =decoder_input_tokens.shape assert decoder_noise_time.shape == (batch,) # decoder_noise_time is in [0, 1), so rescale to expected timing range. lowerCamelCase_ =get_timestep_embedding( decoder_noise_time * self.config.max_decoder_noise_time, embedding_dim=self.config.d_model, max_period=self.config.max_decoder_noise_time, ).to(dtype=self.dtype ) lowerCamelCase_ =self.conditioning_emb(lowerCAmelCase ).unsqueeze(1 ) assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4) lowerCamelCase_ =decoder_input_tokens.shape[1] # If we want to use relative positions for audio context, we can just offset # this sequence by the length of encodings_and_masks. lowerCamelCase_ =torch.broadcast_to( torch.arange(lowerCAmelCase, device=decoder_input_tokens.device ), (batch, seq_length), ) lowerCamelCase_ =self.position_encoding(lowerCAmelCase ) lowerCamelCase_ =self.continuous_inputs_projection(lowerCAmelCase ) inputs += position_encodings lowerCamelCase_ =self.dropout(lowerCAmelCase ) # decoder: No padding present. lowerCamelCase_ =torch.ones( decoder_input_tokens.shape[:2], device=decoder_input_tokens.device, dtype=inputs.dtype ) # Translate encoding masks to encoder-decoder masks. lowerCamelCase_ =[(x, self.encoder_decoder_mask(lowerCAmelCase, lowerCAmelCase )) for x, y in encodings_and_masks] # cross attend style: concat encodings lowerCamelCase_ =torch.cat([x[0] for x in encodings_and_encdec_masks], dim=1 ) lowerCamelCase_ =torch.cat([x[1] for x in encodings_and_encdec_masks], dim=-1 ) for lyr in self.decoders: lowerCamelCase_ =lyr( lowerCAmelCase, conditioning_emb=lowerCAmelCase, encoder_hidden_states=lowerCAmelCase, encoder_attention_mask=lowerCAmelCase, )[0] lowerCamelCase_ =self.decoder_norm(lowerCAmelCase ) lowerCamelCase_ =self.post_dropout(lowerCAmelCase ) lowerCamelCase_ =self.spec_out(lowerCAmelCase ) return spec_out class __UpperCamelCase ( nn.Module ): def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=1e-6 ): """simple docstring""" super().__init__() lowerCamelCase_ =nn.ModuleList() # cond self attention: layer 0 self.layer.append( TaLayerSelfAttentionCond(d_model=lowerCAmelCase, d_kv=lowerCAmelCase, num_heads=lowerCAmelCase, dropout_rate=lowerCAmelCase ) ) # cross attention: layer 1 self.layer.append( TaLayerCrossAttention( d_model=lowerCAmelCase, d_kv=lowerCAmelCase, num_heads=lowerCAmelCase, dropout_rate=lowerCAmelCase, layer_norm_epsilon=lowerCAmelCase, ) ) # Film Cond MLP + dropout: last layer self.layer.append( TaLayerFFCond(d_model=lowerCAmelCase, d_ff=lowerCAmelCase, dropout_rate=lowerCAmelCase, layer_norm_epsilon=lowerCAmelCase ) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, ): """simple docstring""" lowerCamelCase_ =self.layer[0]( lowerCAmelCase, conditioning_emb=lowerCAmelCase, attention_mask=lowerCAmelCase, ) if encoder_hidden_states is not None: lowerCamelCase_ =torch.where(encoder_attention_mask > 0, 0, -1e10 ).to( encoder_hidden_states.dtype ) lowerCamelCase_ =self.layer[1]( lowerCAmelCase, key_value_states=lowerCAmelCase, attention_mask=lowerCAmelCase, ) # Apply Film Conditional Feed Forward layer lowerCamelCase_ =self.layer[-1](lowerCAmelCase, lowerCAmelCase ) return (hidden_states,) class __UpperCamelCase ( nn.Module ): def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" super().__init__() lowerCamelCase_ =TaLayerNorm(lowerCAmelCase ) lowerCamelCase_ =TaFiLMLayer(in_features=d_model * 4, out_features=lowerCAmelCase ) lowerCamelCase_ =Attention(query_dim=lowerCAmelCase, heads=lowerCAmelCase, dim_head=lowerCAmelCase, out_bias=lowerCAmelCase, scale_qk=lowerCAmelCase ) lowerCamelCase_ =nn.Dropout(lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None, lowerCAmelCase=None, ): """simple docstring""" lowerCamelCase_ =self.layer_norm(lowerCAmelCase ) if conditioning_emb is not None: lowerCamelCase_ =self.FiLMLayer(lowerCAmelCase, lowerCAmelCase ) # Self-attention block lowerCamelCase_ =self.attention(lowerCAmelCase ) lowerCamelCase_ =hidden_states + self.dropout(lowerCAmelCase ) return hidden_states class __UpperCamelCase ( nn.Module ): def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" super().__init__() lowerCamelCase_ =Attention(query_dim=lowerCAmelCase, heads=lowerCAmelCase, dim_head=lowerCAmelCase, out_bias=lowerCAmelCase, scale_qk=lowerCAmelCase ) lowerCamelCase_ =TaLayerNorm(lowerCAmelCase, eps=lowerCAmelCase ) lowerCamelCase_ =nn.Dropout(lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None, lowerCAmelCase=None, ): """simple docstring""" lowerCamelCase_ =self.layer_norm(lowerCAmelCase ) lowerCamelCase_ =self.attention( lowerCAmelCase, encoder_hidden_states=lowerCAmelCase, attention_mask=attention_mask.squeeze(1 ), ) lowerCamelCase_ =hidden_states + self.dropout(lowerCAmelCase ) return layer_output class __UpperCamelCase ( nn.Module ): def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" super().__init__() lowerCamelCase_ =TaDenseGatedActDense(d_model=lowerCAmelCase, d_ff=lowerCAmelCase, dropout_rate=lowerCAmelCase ) lowerCamelCase_ =TaFiLMLayer(in_features=d_model * 4, out_features=lowerCAmelCase ) lowerCamelCase_ =TaLayerNorm(lowerCAmelCase, eps=lowerCAmelCase ) lowerCamelCase_ =nn.Dropout(lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ): """simple docstring""" lowerCamelCase_ =self.layer_norm(lowerCAmelCase ) if conditioning_emb is not None: lowerCamelCase_ =self.film(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =self.DenseReluDense(lowerCAmelCase ) lowerCamelCase_ =hidden_states + self.dropout(lowerCAmelCase ) return hidden_states class __UpperCamelCase ( nn.Module ): def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" super().__init__() lowerCamelCase_ =nn.Linear(lowerCAmelCase, lowerCAmelCase, bias=lowerCAmelCase ) lowerCamelCase_ =nn.Linear(lowerCAmelCase, lowerCAmelCase, bias=lowerCAmelCase ) lowerCamelCase_ =nn.Linear(lowerCAmelCase, lowerCAmelCase, bias=lowerCAmelCase ) lowerCamelCase_ =nn.Dropout(lowerCAmelCase ) lowerCamelCase_ =NewGELUActivation() def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.act(self.wi_a(lowerCAmelCase ) ) lowerCamelCase_ =self.wi_a(lowerCAmelCase ) lowerCamelCase_ =hidden_gelu * hidden_linear lowerCamelCase_ =self.dropout(lowerCAmelCase ) lowerCamelCase_ =self.wo(lowerCAmelCase ) return hidden_states class __UpperCamelCase ( nn.Module ): def __init__( self, lowerCAmelCase, lowerCAmelCase=1e-6 ): """simple docstring""" super().__init__() lowerCamelCase_ =nn.Parameter(torch.ones(lowerCAmelCase ) ) lowerCamelCase_ =eps def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =hidden_states.to(torch.floataa ).pow(2 ).mean(-1, keepdim=lowerCAmelCase ) lowerCamelCase_ =hidden_states * torch.rsqrt(variance + self.variance_epsilon ) # convert into half-precision if necessary if self.weight.dtype in [torch.floataa, torch.bfloataa]: lowerCamelCase_ =hidden_states.to(self.weight.dtype ) return self.weight * hidden_states class __UpperCamelCase ( nn.Module ): def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.0_4_4_7_1_5 * torch.pow(lowerCAmelCase, 3.0 )) )) class __UpperCamelCase ( nn.Module ): def __init__( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" super().__init__() lowerCamelCase_ =nn.Linear(lowerCAmelCase, out_features * 2, bias=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.scale_bias(lowerCAmelCase ) lowerCamelCase_, lowerCamelCase_ =torch.chunk(lowerCAmelCase, 2, -1 ) lowerCamelCase_ =x * (1 + scale) + shift return x
676
'''simple docstring''' def a_ ( __snake_case : int , __snake_case : int ) -> str: """simple docstring""" if not isinstance(__snake_case , __snake_case ): raise ValueError('''iterations must be defined as integers''' ) if not isinstance(__snake_case , __snake_case ) or not number >= 1: raise ValueError( '''starting number must be and integer and be more than 0''' ) if not iterations >= 1: raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' ) lowerCamelCase_ ='''''' while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(__snake_case ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
676
1
'''simple docstring''' def a_ ( __snake_case : list ) -> int: """simple docstring""" if not grid or not grid[0]: raise TypeError('''The grid does not contain the appropriate information''' ) for cell_n in range(1 , len(grid[0] ) ): grid[0][cell_n] += grid[0][cell_n - 1] lowerCamelCase_ =grid[0] for row_n in range(1 , len(__snake_case ) ): lowerCamelCase_ =grid[row_n] lowerCamelCase_ =fill_row(__snake_case , __snake_case ) lowerCamelCase_ =grid[row_n] return grid[-1][-1] def a_ ( __snake_case : list , __snake_case : list ) -> list: """simple docstring""" current_row[0] += row_above[0] for cell_n in range(1 , len(__snake_case ) ): current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] ) return current_row if __name__ == "__main__": import doctest doctest.testmod()
676
'''simple docstring''' from typing import List import numpy as np def a_ ( __snake_case : dict ) -> int: """simple docstring""" lowerCamelCase_ ={key: len(__snake_case ) for key, value in gen_kwargs.items() if isinstance(__snake_case , __snake_case )} if len(set(lists_lengths.values() ) ) > 1: raise RuntimeError( ( '''Sharding is ambiguous for this dataset: ''' + '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n''' + '''\n'''.join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() ) + '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, ''' + '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.''' ) ) lowerCamelCase_ =max(lists_lengths.values() , default=0 ) return max(1 , __snake_case ) def a_ ( __snake_case : int , __snake_case : int ) -> List[range]: """simple docstring""" lowerCamelCase_ =[] for group_idx in range(__snake_case ): lowerCamelCase_ =num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs)) if num_shards_to_add == 0: break lowerCamelCase_ =shards_indices_per_group[-1].stop if shards_indices_per_group else 0 lowerCamelCase_ =range(__snake_case , start + num_shards_to_add ) shards_indices_per_group.append(__snake_case ) return shards_indices_per_group def a_ ( __snake_case : dict , __snake_case : int ) -> List[dict]: """simple docstring""" lowerCamelCase_ =_number_of_shards_in_gen_kwargs(__snake_case ) if num_shards == 1: return [dict(__snake_case )] else: lowerCamelCase_ =_distribute_shards(num_shards=__snake_case , max_num_jobs=__snake_case ) return [ { key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]] if isinstance(__snake_case , __snake_case ) else value for key, value in gen_kwargs.items() } for group_idx in range(len(__snake_case ) ) ] def a_ ( __snake_case : List[dict] ) -> dict: """simple docstring""" return { key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]] if isinstance(gen_kwargs_list[0][key] , __snake_case ) else gen_kwargs_list[0][key] for key in gen_kwargs_list[0] } def a_ ( __snake_case : np.random.Generator , __snake_case : dict ) -> dict: """simple docstring""" lowerCamelCase_ ={len(__snake_case ) for value in gen_kwargs.values() if isinstance(__snake_case , __snake_case )} lowerCamelCase_ ={} for size in list_sizes: lowerCamelCase_ =list(range(__snake_case ) ) rng.shuffle(indices_per_size[size] ) # Now let's copy the gen_kwargs and shuffle the lists based on their sizes lowerCamelCase_ =dict(__snake_case ) for key, value in shuffled_kwargs.items(): if isinstance(__snake_case , __snake_case ): lowerCamelCase_ =[value[i] for i in indices_per_size[len(__snake_case )]] return shuffled_kwargs
676
1
'''simple docstring''' from ..utils import DummyObject, requires_backends class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : Tuple =['torch'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : Any =['torch'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : str =['torch'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : str =['torch'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : Optional[Any] =['torch'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : Union[str, Any] =['torch'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : Optional[int] =['torch'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : int =['torch'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : str =['torch'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : Any =['torch'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : Union[str, Any] =['torch'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) def a_ ( *__snake_case : Dict , **__snake_case : Dict ) -> List[str]: """simple docstring""" requires_backends(__snake_case , ['''torch'''] ) def a_ ( *__snake_case : Optional[int] , **__snake_case : Any ) -> Tuple: """simple docstring""" requires_backends(__snake_case , ['''torch'''] ) def a_ ( *__snake_case : Dict , **__snake_case : Any ) -> str: """simple docstring""" requires_backends(__snake_case , ['''torch'''] ) def a_ ( *__snake_case : List[str] , **__snake_case : Dict ) -> Union[str, Any]: """simple docstring""" requires_backends(__snake_case , ['''torch'''] ) def a_ ( *__snake_case : List[str] , **__snake_case : Union[str, Any] ) -> Tuple: """simple docstring""" requires_backends(__snake_case , ['''torch'''] ) def a_ ( *__snake_case : Optional[Any] , **__snake_case : Tuple ) -> Optional[int]: """simple docstring""" requires_backends(__snake_case , ['''torch'''] ) def a_ ( *__snake_case : List[str] , **__snake_case : List[str] ) -> List[str]: """simple docstring""" requires_backends(__snake_case , ['''torch'''] ) class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : Any =['torch'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : Tuple =['torch'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : str =['torch'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : Optional[Any] =['torch'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : Any =['torch'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : int =['torch'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : Optional[Any] =['torch'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : Any =['torch'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : Union[str, Any] =['torch'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : Union[str, Any] =['torch'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : List[Any] =['torch'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : Optional[int] =['torch'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : Optional[int] =['torch'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : int =['torch'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : Optional[int] =['torch'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : Union[str, Any] =['torch'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : Union[str, Any] =['torch'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : Optional[int] =['torch'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : Tuple =['torch'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : List[str] =['torch'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : str =['torch'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : Optional[int] =['torch'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : Optional[int] =['torch'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : Tuple =['torch'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : List[Any] =['torch'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : Dict =['torch'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : str =['torch'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : str =['torch'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : Any =['torch'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : Optional[Any] =['torch'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : List[str] =['torch'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : Dict =['torch'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : str =['torch'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : Optional[Any] =['torch'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : Union[str, Any] =['torch'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : Any =['torch'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : Tuple =['torch'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : List[Any] =['torch'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) class __UpperCamelCase ( metaclass=lowerCamelCase__ ): lowercase : Any =['torch'] def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(self, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] ) @classmethod def lowercase__ ( cls, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" requires_backends(cls, ['''torch'''] )
676
'''simple docstring''' import argparse import logging import pickle import random import time import numpy as np from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO ) a_ : int = logging.getLogger(__name__) def a_ ( ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ =argparse.ArgumentParser( description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' ) parser.add_argument('''--file_path''' , type=__snake_case , default='''data/dump.txt''' , help='''The path to the data.''' ) parser.add_argument('''--tokenizer_type''' , type=__snake_case , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] ) parser.add_argument('''--tokenizer_name''' , type=__snake_case , default='''bert-base-uncased''' , help='''The tokenizer to use.''' ) parser.add_argument('''--dump_file''' , type=__snake_case , default='''data/dump''' , help='''The dump file prefix.''' ) lowerCamelCase_ =parser.parse_args() logger.info(F'''Loading Tokenizer ({args.tokenizer_name})''' ) if args.tokenizer_type == "bert": lowerCamelCase_ =BertTokenizer.from_pretrained(args.tokenizer_name ) lowerCamelCase_ =tokenizer.special_tokens_map['''cls_token'''] # `[CLS]` lowerCamelCase_ =tokenizer.special_tokens_map['''sep_token'''] # `[SEP]` elif args.tokenizer_type == "roberta": lowerCamelCase_ =RobertaTokenizer.from_pretrained(args.tokenizer_name ) lowerCamelCase_ =tokenizer.special_tokens_map['''cls_token'''] # `<s>` lowerCamelCase_ =tokenizer.special_tokens_map['''sep_token'''] # `</s>` elif args.tokenizer_type == "gpt2": lowerCamelCase_ =GPTaTokenizer.from_pretrained(args.tokenizer_name ) lowerCamelCase_ =tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>` lowerCamelCase_ =tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>` logger.info(F'''Loading text from {args.file_path}''' ) with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp: lowerCamelCase_ =fp.readlines() logger.info('''Start encoding''' ) logger.info(F'''{len(__snake_case )} examples to process.''' ) lowerCamelCase_ =[] lowerCamelCase_ =0 lowerCamelCase_ =1_0000 lowerCamelCase_ =time.time() for text in data: lowerCamelCase_ =F'''{bos} {text.strip()} {sep}''' lowerCamelCase_ =tokenizer.encode(__snake_case , add_special_tokens=__snake_case ) rslt.append(__snake_case ) iter += 1 if iter % interval == 0: lowerCamelCase_ =time.time() logger.info(F'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' ) lowerCamelCase_ =time.time() logger.info('''Finished binarization''' ) logger.info(F'''{len(__snake_case )} examples processed.''' ) lowerCamelCase_ =F'''{args.dump_file}.{args.tokenizer_name}.pickle''' lowerCamelCase_ =tokenizer.vocab_size if vocab_size < (1 << 16): lowerCamelCase_ =[np.uintaa(__snake_case ) for d in rslt] else: lowerCamelCase_ =[np.intaa(__snake_case ) for d in rslt] random.shuffle(rslt_ ) logger.info(F'''Dump to {dp_file}''' ) with open(__snake_case , '''wb''' ) as handle: pickle.dump(rslt_ , __snake_case , protocol=pickle.HIGHEST_PROTOCOL ) if __name__ == "__main__": main()
676
1
'''simple docstring''' from __future__ import annotations a_ : int = list[list[int]] # assigning initial values to the grid a_ : Matrix = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution a_ : Matrix = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def a_ ( __snake_case : Matrix , __snake_case : int , __snake_case : int , __snake_case : int ) -> bool: """simple docstring""" for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def a_ ( __snake_case : Matrix ) -> tuple[int, int] | None: """simple docstring""" for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def a_ ( __snake_case : Matrix ) -> Matrix | None: """simple docstring""" if location := find_empty_location(__snake_case ): lowerCamelCase_, lowerCamelCase_ =location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(__snake_case , __snake_case , __snake_case , __snake_case ): lowerCamelCase_ =digit if sudoku(__snake_case ) is not None: return grid lowerCamelCase_ =0 return None def a_ ( __snake_case : Matrix ) -> None: """simple docstring""" for row in grid: for cell in row: print(__snake_case , end=''' ''' ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print("""\nExample grid:\n""" + """=""" * 20) print_solution(example_grid) print("""\nExample grid solution:""") a_ : Union[str, Any] = sudoku(example_grid) if solution is not None: print_solution(solution) else: print("""Cannot find a solution.""")
676
'''simple docstring''' import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : str = logging.get_logger(__name__) a_ : int = { """RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""", } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : List[str] ='mvp' lowercase : List[str] =['past_key_values'] lowercase : Dict ={'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self, lowerCAmelCase=50_267, lowerCAmelCase=1_024, lowerCAmelCase=12, lowerCAmelCase=4_096, lowerCAmelCase=16, lowerCAmelCase=12, lowerCAmelCase=4_096, lowerCAmelCase=16, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase="gelu", lowerCAmelCase=1_024, lowerCAmelCase=0.1, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=0.0, lowerCAmelCase=False, lowerCAmelCase=True, lowerCAmelCase=1, lowerCAmelCase=0, lowerCAmelCase=2, lowerCAmelCase=True, lowerCAmelCase=2, lowerCAmelCase=2, lowerCAmelCase=False, lowerCAmelCase=100, lowerCAmelCase=800, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =vocab_size lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =d_model lowerCamelCase_ =encoder_ffn_dim lowerCamelCase_ =encoder_layers lowerCamelCase_ =encoder_attention_heads lowerCamelCase_ =decoder_ffn_dim lowerCamelCase_ =decoder_layers lowerCamelCase_ =decoder_attention_heads lowerCamelCase_ =dropout lowerCamelCase_ =attention_dropout lowerCamelCase_ =activation_dropout lowerCamelCase_ =activation_function lowerCamelCase_ =init_std lowerCamelCase_ =encoder_layerdrop lowerCamelCase_ =decoder_layerdrop lowerCamelCase_ =classifier_dropout lowerCamelCase_ =use_cache lowerCamelCase_ =encoder_layers lowerCamelCase_ =scale_embedding # scale factor will be sqrt(d_model) if True lowerCamelCase_ =use_prompt lowerCamelCase_ =prompt_length lowerCamelCase_ =prompt_mid_dim super().__init__( pad_token_id=lowerCAmelCase, bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, is_encoder_decoder=lowerCAmelCase, decoder_start_token_id=lowerCAmelCase, forced_eos_token_id=lowerCAmelCase, **lowerCAmelCase, ) if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''', lowerCAmelCase ): lowerCamelCase_ =self.bos_token_id warnings.warn( f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. ''' '''The config can simply be saved and uploaded again to be fixed.''' )
676
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ : Any = {"""configuration_vit_msn""": ["""VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMSNConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : List[str] = [ """VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST""", """ViTMSNModel""", """ViTMSNForImageClassification""", """ViTMSNPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_msn import ( VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMSNForImageClassification, ViTMSNModel, ViTMSNPreTrainedModel, ) else: import sys a_ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
676
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a_ : int = logging.get_logger(__name__) a_ : str = {"""vocab_file""": """spiece.model"""} a_ : Optional[int] = { """vocab_file""": { """bert_for_seq_generation""": ( """https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model""" ), } } a_ : List[Any] = {"""bert_for_seq_generation""": 5_12} class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[int] =VOCAB_FILES_NAMES lowercase : Optional[int] =PRETRAINED_VOCAB_FILES_MAP lowercase : Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : List[int] =[] lowercase : str =['input_ids', 'attention_mask'] def __init__( self, lowerCAmelCase, lowerCAmelCase="<s>", lowerCAmelCase="</s>", lowerCAmelCase="<unk>", lowerCAmelCase="<pad>", lowerCAmelCase="<::::>", lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ ={} if sp_model_kwargs is None else sp_model_kwargs # Add extra_ids to the special token list super().__init__( bos_token=lowerCAmelCase, eos_token=lowerCAmelCase, unk_token=lowerCAmelCase, pad_token=lowerCAmelCase, sep_token=lowerCAmelCase, sp_model_kwargs=self.sp_model_kwargs, **lowerCAmelCase, ) lowerCamelCase_ =vocab_file lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowerCAmelCase ) @property def lowercase__ ( self ): """simple docstring""" return self.sp_model.get_piece_size() def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ={self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): """simple docstring""" lowerCamelCase_ =self.__dict__.copy() lowerCamelCase_ =None return state def __setstate__( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =d # for backward compatibility if not hasattr(self, '''sp_model_kwargs''' ): lowerCamelCase_ ={} lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return self.sp_model.encode(lowerCAmelCase, out_type=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return self.sp_model.piece_to_id(lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.sp_model.IdToPiece(lowerCAmelCase ) return token def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =[] lowerCamelCase_ ='''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(lowerCAmelCase ) + token lowerCamelCase_ =[] else: current_sub_tokens.append(lowerCAmelCase ) out_string += self.sp_model.decode(lowerCAmelCase ) return out_string.strip() def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ): """simple docstring""" if not os.path.isdir(lowerCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCamelCase_ =os.path.join( lowerCAmelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file, lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(lowerCAmelCase, '''wb''' ) as fi: lowerCamelCase_ =self.sp_model.serialized_model_proto() fi.write(lowerCAmelCase ) return (out_vocab_file,)
676
1
'''simple docstring''' from sklearn.metrics import fa_score import datasets a_ : Tuple = """ The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation: F1 = 2 * (precision * recall) / (precision + recall) """ a_ : int = """ Args: predictions (`list` of `int`): Predicted labels. references (`list` of `int`): Ground truth labels. labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None. pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1. average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`. - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary. - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives. - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall. - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). sample_weight (`list` of `float`): Sample weights Defaults to None. Returns: f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better. Examples: Example 1-A simple binary example >>> f1_metric = datasets.load_metric(\"f1\") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0]) >>> print(results) {'f1': 0.5} Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`. >>> f1_metric = datasets.load_metric(\"f1\") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0) >>> print(round(results['f1'], 2)) 0.67 Example 3-The same simple binary example as in Example 1, but with `sample_weight` included. >>> f1_metric = datasets.load_metric(\"f1\") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3]) >>> print(round(results['f1'], 2)) 0.35 Example 4-A multiclass example, with different values for the `average` input. >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\") >>> print(round(results['f1'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\") >>> print(round(results['f1'], 2)) 0.33 >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\") >>> print(round(results['f1'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {'f1': array([0.8, 0. , 0. ])} """ a_ : Any = """ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __UpperCamelCase ( datasets.Metric ): def lowercase__ ( self ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ), '''references''': datasets.Sequence(datasets.Value('''int32''' ) ), } if self.config_name == '''multilabel''' else { '''predictions''': datasets.Value('''int32''' ), '''references''': datasets.Value('''int32''' ), } ), reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''], ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=None, lowerCAmelCase=1, lowerCAmelCase="binary", lowerCAmelCase=None ): """simple docstring""" lowerCamelCase_ =fa_score( lowerCAmelCase, lowerCAmelCase, labels=lowerCAmelCase, pos_label=lowerCAmelCase, average=lowerCAmelCase, sample_weight=lowerCAmelCase ) return {"f1": float(lowerCAmelCase ) if score.size == 1 else score}
676
'''simple docstring''' from collections.abc import Sequence def a_ ( __snake_case : Sequence[float] , __snake_case : float ) -> float: """simple docstring""" return sum(c * (x**i) for i, c in enumerate(__snake_case ) ) def a_ ( __snake_case : Sequence[float] , __snake_case : float ) -> float: """simple docstring""" lowerCamelCase_ =0.0 for coeff in reversed(__snake_case ): lowerCamelCase_ =result * x + coeff return result if __name__ == "__main__": a_ : Optional[int] = (0.0, 0.0, 5.0, 9.3, 7.0) a_ : Tuple = 10.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
676
1
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a_ : int = logging.get_logger(__name__) a_ : str = {"""vocab_file""": """spiece.model"""} a_ : Optional[int] = { """vocab_file""": { """bert_for_seq_generation""": ( """https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model""" ), } } a_ : List[Any] = {"""bert_for_seq_generation""": 5_12} class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[int] =VOCAB_FILES_NAMES lowercase : Optional[int] =PRETRAINED_VOCAB_FILES_MAP lowercase : Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase : List[int] =[] lowercase : str =['input_ids', 'attention_mask'] def __init__( self, lowerCAmelCase, lowerCAmelCase="<s>", lowerCAmelCase="</s>", lowerCAmelCase="<unk>", lowerCAmelCase="<pad>", lowerCAmelCase="<::::>", lowerCAmelCase = None, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ ={} if sp_model_kwargs is None else sp_model_kwargs # Add extra_ids to the special token list super().__init__( bos_token=lowerCAmelCase, eos_token=lowerCAmelCase, unk_token=lowerCAmelCase, pad_token=lowerCAmelCase, sep_token=lowerCAmelCase, sp_model_kwargs=self.sp_model_kwargs, **lowerCAmelCase, ) lowerCamelCase_ =vocab_file lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowerCAmelCase ) @property def lowercase__ ( self ): """simple docstring""" return self.sp_model.get_piece_size() def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ={self.convert_ids_to_tokens(lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): """simple docstring""" lowerCamelCase_ =self.__dict__.copy() lowerCamelCase_ =None return state def __setstate__( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =d # for backward compatibility if not hasattr(self, '''sp_model_kwargs''' ): lowerCamelCase_ ={} lowerCamelCase_ =spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return self.sp_model.encode(lowerCAmelCase, out_type=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" return self.sp_model.piece_to_id(lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.sp_model.IdToPiece(lowerCAmelCase ) return token def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =[] lowerCamelCase_ ='''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(lowerCAmelCase ) + token lowerCamelCase_ =[] else: current_sub_tokens.append(lowerCAmelCase ) out_string += self.sp_model.decode(lowerCAmelCase ) return out_string.strip() def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = None ): """simple docstring""" if not os.path.isdir(lowerCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCamelCase_ =os.path.join( lowerCAmelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file, lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(lowerCAmelCase, '''wb''' ) as fi: lowerCamelCase_ =self.sp_model.serialized_model_proto() fi.write(lowerCAmelCase ) return (out_vocab_file,)
676
'''simple docstring''' import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[int] =['image_processor', 'tokenizer'] lowercase : str ='CLIPImageProcessor' lowercase : Optional[Any] =('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast') def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''', lowerCAmelCase, ) lowerCamelCase_ =kwargs.pop('''feature_extractor''' ) lowerCamelCase_ =image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(lowerCAmelCase, lowerCAmelCase ) def __call__( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ): """simple docstring""" if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: lowerCamelCase_ =self.tokenizer(lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase ) if images is not None: lowerCamelCase_ =self.image_processor(lowerCAmelCase, return_tensors=lowerCAmelCase, **lowerCAmelCase ) if text is not None and images is not None: lowerCamelCase_ =image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**lowerCAmelCase ), tensor_type=lowerCAmelCase ) def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase ) @property def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.tokenizer.model_input_names lowerCamelCase_ =self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
676
1
'''simple docstring''' from typing import Callable, List, Optional, Union import PIL import torch from transformers import ( CLIPImageProcessor, CLIPSegForImageSegmentation, CLIPSegProcessor, CLIPTextModel, CLIPTokenizer, ) from diffusers import DiffusionPipeline from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import deprecate, is_accelerate_available, logging a_ : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" super().__init__() if hasattr(scheduler.config, '''steps_offset''' ) and scheduler.config.steps_offset != 1: lowerCamelCase_ =( f'''The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`''' f''' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure ''' '''to update the config accordingly as leaving `steps_offset` might led to incorrect results''' ''' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,''' ''' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`''' ''' file''' ) deprecate('''steps_offset!=1''', '''1.0.0''', lowerCAmelCase, standard_warn=lowerCAmelCase ) lowerCamelCase_ =dict(scheduler.config ) lowerCamelCase_ =1 lowerCamelCase_ =FrozenDict(lowerCAmelCase ) if hasattr(scheduler.config, '''skip_prk_steps''' ) and scheduler.config.skip_prk_steps is False: lowerCamelCase_ =( f'''The configuration file of this scheduler: {scheduler} has not set the configuration''' ''' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make''' ''' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to''' ''' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face''' ''' Hub, it would be very nice if you could open a Pull request for the''' ''' `scheduler/scheduler_config.json` file''' ) deprecate('''skip_prk_steps not set''', '''1.0.0''', lowerCAmelCase, standard_warn=lowerCAmelCase ) lowerCamelCase_ =dict(scheduler.config ) lowerCamelCase_ =True lowerCamelCase_ =FrozenDict(lowerCAmelCase ) if safety_checker is None: logger.warning( f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure''' ''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered''' ''' results in services or applications open to the public. Both the diffusers team and Hugging Face''' ''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling''' ''' it only for use-cases that involve analyzing network behavior or auditing its results. For more''' ''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' ) self.register_modules( segmentation_model=lowerCAmelCase, segmentation_processor=lowerCAmelCase, vae=lowerCAmelCase, text_encoder=lowerCAmelCase, tokenizer=lowerCAmelCase, unet=lowerCAmelCase, scheduler=lowerCAmelCase, safety_checker=lowerCAmelCase, feature_extractor=lowerCAmelCase, ) def lowercase__ ( self, lowerCAmelCase = "auto" ): """simple docstring""" if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory lowerCamelCase_ =self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" self.enable_attention_slicing(lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('''Please install accelerate via `pip install accelerate`''' ) lowerCamelCase_ =torch.device('''cuda''' ) for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]: if cpu_offloaded_model is not None: cpu_offload(lowerCAmelCase, lowerCAmelCase ) @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def lowercase__ ( self ): """simple docstring""" if self.device != torch.device('''meta''' ) or not hasattr(self.unet, '''_hf_hook''' ): return self.device for module in self.unet.modules(): if ( hasattr(lowerCAmelCase, '''_hf_hook''' ) and hasattr(module._hf_hook, '''execution_device''' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() def __call__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = 512, lowerCAmelCase = 512, lowerCAmelCase = 50, lowerCAmelCase = 7.5, lowerCAmelCase = None, lowerCAmelCase = 1, lowerCAmelCase = 0.0, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = "pil", lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = 1, **lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =self.segmentation_processor( text=[text], images=[image], padding='''max_length''', return_tensors='''pt''' ).to(self.device ) lowerCamelCase_ =self.segmentation_model(**lowerCAmelCase ) lowerCamelCase_ =torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy() lowerCamelCase_ =self.numpy_to_pil(lowerCAmelCase )[0].resize(image.size ) # Run inpainting pipeline with the generated mask lowerCamelCase_ =StableDiffusionInpaintPipeline( vae=self.vae, text_encoder=self.text_encoder, tokenizer=self.tokenizer, unet=self.unet, scheduler=self.scheduler, safety_checker=self.safety_checker, feature_extractor=self.feature_extractor, ) return inpainting_pipeline( prompt=lowerCAmelCase, image=lowerCAmelCase, mask_image=lowerCAmelCase, height=lowerCAmelCase, width=lowerCAmelCase, num_inference_steps=lowerCAmelCase, guidance_scale=lowerCAmelCase, negative_prompt=lowerCAmelCase, num_images_per_prompt=lowerCAmelCase, eta=lowerCAmelCase, generator=lowerCAmelCase, latents=lowerCAmelCase, output_type=lowerCAmelCase, return_dict=lowerCAmelCase, callback=lowerCAmelCase, callback_steps=lowerCAmelCase, )
676
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING a_ : Optional[Any] = logging.get_logger(__name__) @add_end_docstrings(lowerCamelCase__ ) class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" super().__init__(*lowerCAmelCase, **lowerCAmelCase ) requires_backends(self, '''vision''' ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING ) def lowercase__ ( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None ): """simple docstring""" lowerCamelCase_ ={} lowerCamelCase_ ={} if prompt is not None: lowerCamelCase_ =prompt if generate_kwargs is not None: lowerCamelCase_ =generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: lowerCamelCase_ ={} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( '''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,''' ''' please use only one''' ) lowerCamelCase_ =max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return super().__call__(lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ): """simple docstring""" lowerCamelCase_ =load_image(lowerCAmelCase ) if prompt is not None: if not isinstance(lowerCAmelCase, lowerCAmelCase ): raise ValueError( f'''Received an invalid text input, got - {type(lowerCAmelCase )} - but expected a single string. ''' '''Note also that one single text can be provided for conditional image to text generation.''' ) lowerCamelCase_ =self.model.config.model_type if model_type == "git": lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework ) lowerCamelCase_ =self.tokenizer(text=lowerCAmelCase, add_special_tokens=lowerCAmelCase ).input_ids lowerCamelCase_ =[self.tokenizer.cls_token_id] + input_ids lowerCamelCase_ =torch.tensor(lowerCAmelCase ).unsqueeze(0 ) model_inputs.update({'''input_ids''': input_ids} ) elif model_type == "pix2struct": lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, header_text=lowerCAmelCase, return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework ) lowerCamelCase_ =self.tokenizer(lowerCAmelCase, return_tensors=self.framework ) model_inputs.update(lowerCAmelCase ) else: raise ValueError(f'''Model type {model_type} does not support conditional text generation''' ) else: lowerCamelCase_ =self.image_processor(images=lowerCAmelCase, return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: lowerCamelCase_ =None return model_inputs def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None ): """simple docstring""" if ( "input_ids" in model_inputs and isinstance(model_inputs['''input_ids'''], lowerCAmelCase ) and all(x is None for x in model_inputs['''input_ids'''] ) ): lowerCamelCase_ =None if generate_kwargs is None: lowerCamelCase_ ={} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. lowerCamelCase_ =model_inputs.pop(self.model.main_input_name ) lowerCamelCase_ =self.model.generate(lowerCAmelCase, **lowerCAmelCase, **lowerCAmelCase ) return model_outputs def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =[] for output_ids in model_outputs: lowerCamelCase_ ={ '''generated_text''': self.tokenizer.decode( lowerCAmelCase, skip_special_tokens=lowerCAmelCase, ) } records.append(lowerCAmelCase ) return records
676
1
'''simple docstring''' import random import unittest import torch from diffusers import IFImgaImgSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase : Tuple =IFImgaImgSuperResolutionPipeline lowercase : Any =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'width', 'height'} lowercase : Optional[Any] =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'original_image'} ) lowercase : int =PipelineTesterMixin.required_optional_params - {'latents'} def lowercase__ ( self ): """simple docstring""" return self._get_superresolution_dummy_components() def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=0 ): """simple docstring""" if str(lowerCAmelCase ).startswith('''mps''' ): lowerCamelCase_ =torch.manual_seed(lowerCAmelCase ) else: lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase ) lowerCamelCase_ =floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase ) lowerCamelCase_ =floats_tensor((1, 3, 16, 16), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase ) lowerCamelCase_ ={ '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''original_image''': original_image, '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available(), reason='''XFormers attention is only available with CUDA and `xformers` installed''', ) def lowercase__ ( self ): """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) def lowercase__ ( self ): """simple docstring""" self._test_save_load_optional_components() @unittest.skipIf(torch_device != '''cuda''', reason='''float16 requires CUDA''' ) def lowercase__ ( self ): """simple docstring""" super().test_save_load_floataa(expected_max_diff=1e-1 ) def lowercase__ ( self ): """simple docstring""" self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def lowercase__ ( self ): """simple docstring""" self._test_save_load_local() def lowercase__ ( self ): """simple docstring""" self._test_inference_batch_single_identical( expected_max_diff=1e-2, )
676
'''simple docstring''' import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def a_ ( __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Any ) -> str: """simple docstring""" # Initialise PyTorch model lowerCamelCase_ =BertConfig.from_json_file(__snake_case ) print(F'''Building PyTorch model from configuration: {config}''' ) lowerCamelCase_ =BertForPreTraining(__snake_case ) # Load weights from tf checkpoint load_tf_weights_in_bert(__snake_case , __snake_case , __snake_case ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , __snake_case ) if __name__ == "__main__": a_ : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--bert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained BERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) a_ : Optional[Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
676
1
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_flax_available, is_torch_available, is_transformers_available, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .multicontrolnet import MultiControlNetModel from .pipeline_controlnet import StableDiffusionControlNetPipeline from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline if is_transformers_available() and is_flax_available(): from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
676
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : Union[str, Any] = logging.get_logger(__name__) a_ : Optional[int] = { """BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""", # See all AltCLIP models at https://huggingface.co/models?filter=altclip } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Optional[Any] ='altclip_text_model' def __init__( self, lowerCAmelCase=250_002, lowerCAmelCase=1_024, lowerCAmelCase=24, lowerCAmelCase=16, lowerCAmelCase=4_096, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=514, lowerCAmelCase=1, lowerCAmelCase=0.0_2, lowerCAmelCase=0.0_2, lowerCAmelCase=1e-05, lowerCAmelCase=1, lowerCAmelCase=0, lowerCAmelCase=2, lowerCAmelCase="absolute", lowerCAmelCase=True, lowerCAmelCase=768, **lowerCAmelCase, ): """simple docstring""" super().__init__(pad_token_id=lowerCAmelCase, bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, **lowerCAmelCase ) lowerCamelCase_ =vocab_size lowerCamelCase_ =hidden_size lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =hidden_act lowerCamelCase_ =intermediate_size lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =attention_probs_dropout_prob lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =type_vocab_size lowerCamelCase_ =initializer_range lowerCamelCase_ =initializer_factor lowerCamelCase_ =layer_norm_eps lowerCamelCase_ =position_embedding_type lowerCamelCase_ =use_cache lowerCamelCase_ =project_dim class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Dict ='altclip_vision_model' def __init__( self, lowerCAmelCase=768, lowerCAmelCase=3_072, lowerCAmelCase=512, lowerCAmelCase=12, lowerCAmelCase=12, lowerCAmelCase=3, lowerCAmelCase=224, lowerCAmelCase=32, lowerCAmelCase="quick_gelu", lowerCAmelCase=1e-5, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=1.0, **lowerCAmelCase, ): """simple docstring""" super().__init__(**lowerCAmelCase ) lowerCamelCase_ =hidden_size lowerCamelCase_ =intermediate_size lowerCamelCase_ =projection_dim lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =num_channels lowerCamelCase_ =patch_size lowerCamelCase_ =image_size lowerCamelCase_ =initializer_range lowerCamelCase_ =initializer_factor lowerCamelCase_ =attention_dropout lowerCamelCase_ =layer_norm_eps lowerCamelCase_ =hidden_act @classmethod def lowercase__ ( cls, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" cls._set_token_in_kwargs(lowerCAmelCase ) lowerCamelCase_, lowerCamelCase_ =cls.get_config_dict(lowerCAmelCase, **lowerCAmelCase ) # get the vision config dict if we are loading from AltCLIPConfig if config_dict.get('''model_type''' ) == "altclip": lowerCamelCase_ =config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls, '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(lowerCAmelCase, **lowerCAmelCase ) class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Dict ='altclip' lowercase : str =True def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=768, lowerCAmelCase=2.6_5_9_2, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =kwargs.pop('''text_config_dict''', lowerCAmelCase ) lowerCamelCase_ =kwargs.pop('''vision_config_dict''', lowerCAmelCase ) super().__init__(**lowerCAmelCase ) # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`. if text_config_dict is not None: if text_config is None: lowerCamelCase_ ={} # This is the complete result when using `text_config_dict`. lowerCamelCase_ =AltCLIPTextConfig(**lowerCAmelCase ).to_dict() # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different. for key, value in _text_config_dict.items(): if key in text_config and value != text_config[key] and key not in ["transformers_version"]: # If specified in `text_config_dict` if key in text_config_dict: lowerCamelCase_ =( f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. ''' f'''The value `text_config_dict["{key}"]` will be used instead.''' ) # If inferred from default argument values (just to be super careful) else: lowerCamelCase_ =( f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The ''' f'''value `text_config["{key}"]` will be overriden.''' ) logger.warning(lowerCAmelCase ) # Update all values in `text_config` with the ones in `_text_config_dict`. text_config.update(_text_config_dict ) if vision_config_dict is not None: if vision_config is None: lowerCamelCase_ ={} # This is the complete result when using `vision_config_dict`. lowerCamelCase_ =AltCLIPVisionConfig(**lowerCAmelCase ).to_dict() # convert keys to string instead of integer if "id2label" in _vision_config_dict: lowerCamelCase_ ={ str(lowerCAmelCase ): value for key, value in _vision_config_dict['''id2label'''].items() } # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different. for key, value in _vision_config_dict.items(): if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]: # If specified in `vision_config_dict` if key in vision_config_dict: lowerCamelCase_ =( f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different ''' f'''values. The value `vision_config_dict["{key}"]` will be used instead.''' ) # If inferred from default argument values (just to be super careful) else: lowerCamelCase_ =( f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. ''' f'''The value `vision_config["{key}"]` will be overriden.''' ) logger.warning(lowerCAmelCase ) # Update all values in `vision_config` with the ones in `_vision_config_dict`. vision_config.update(_vision_config_dict ) if text_config is None: lowerCamelCase_ ={} logger.info('''`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.''' ) if vision_config is None: lowerCamelCase_ ={} logger.info('''`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.''' ) lowerCamelCase_ =AltCLIPTextConfig(**lowerCAmelCase ) lowerCamelCase_ =AltCLIPVisionConfig(**lowerCAmelCase ) lowerCamelCase_ =projection_dim lowerCamelCase_ =logit_scale_init_value lowerCamelCase_ =1.0 @classmethod def lowercase__ ( cls, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =copy.deepcopy(self.__dict__ ) lowerCamelCase_ =self.text_config.to_dict() lowerCamelCase_ =self.vision_config.to_dict() lowerCamelCase_ =self.__class__.model_type return output
676
1
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class __UpperCamelCase ( lowerCamelCase__ ): lowercase : torch.FloatTensor class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): @register_to_config def __init__( self, lowerCAmelCase = 3, lowerCAmelCase = 3, lowerCAmelCase = ("DownEncoderBlock2D",), lowerCAmelCase = ("UpDecoderBlock2D",), lowerCAmelCase = (64,), lowerCAmelCase = 1, lowerCAmelCase = "silu", lowerCAmelCase = 3, lowerCAmelCase = 32, lowerCAmelCase = 256, lowerCAmelCase = 32, lowerCAmelCase = None, lowerCAmelCase = 0.1_8_2_1_5, lowerCAmelCase = "group", ): """simple docstring""" super().__init__() # pass init params to Encoder lowerCamelCase_ =Encoder( in_channels=lowerCAmelCase, out_channels=lowerCAmelCase, down_block_types=lowerCAmelCase, block_out_channels=lowerCAmelCase, layers_per_block=lowerCAmelCase, act_fn=lowerCAmelCase, norm_num_groups=lowerCAmelCase, double_z=lowerCAmelCase, ) lowerCamelCase_ =vq_embed_dim if vq_embed_dim is not None else latent_channels lowerCamelCase_ =nn.Convad(lowerCAmelCase, lowerCAmelCase, 1 ) lowerCamelCase_ =VectorQuantizer(lowerCAmelCase, lowerCAmelCase, beta=0.2_5, remap=lowerCAmelCase, sane_index_shape=lowerCAmelCase ) lowerCamelCase_ =nn.Convad(lowerCAmelCase, lowerCAmelCase, 1 ) # pass init params to Decoder lowerCamelCase_ =Decoder( in_channels=lowerCAmelCase, out_channels=lowerCAmelCase, up_block_types=lowerCAmelCase, block_out_channels=lowerCAmelCase, layers_per_block=lowerCAmelCase, act_fn=lowerCAmelCase, norm_num_groups=lowerCAmelCase, norm_type=lowerCAmelCase, ) @apply_forward_hook def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = True ): """simple docstring""" lowerCamelCase_ =self.encoder(lowerCAmelCase ) lowerCamelCase_ =self.quant_conv(lowerCAmelCase ) if not return_dict: return (h,) return VQEncoderOutput(latents=lowerCAmelCase ) @apply_forward_hook def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = False, lowerCAmelCase = True ): """simple docstring""" if not force_not_quantize: lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =self.quantize(lowerCAmelCase ) else: lowerCamelCase_ =h lowerCamelCase_ =self.post_quant_conv(lowerCAmelCase ) lowerCamelCase_ =self.decoder(lowerCAmelCase, quant if self.config.norm_type == '''spatial''' else None ) if not return_dict: return (dec,) return DecoderOutput(sample=lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = True ): """simple docstring""" lowerCamelCase_ =sample lowerCamelCase_ =self.encode(lowerCAmelCase ).latents lowerCamelCase_ =self.decode(lowerCAmelCase ).sample if not return_dict: return (dec,) return DecoderOutput(sample=lowerCAmelCase )
676
'''simple docstring''' import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, lowerCAmelCase, lowerCAmelCase=13, lowerCAmelCase=7, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=False, lowerCAmelCase=2, lowerCAmelCase=99, lowerCAmelCase=0, lowerCAmelCase=32, lowerCAmelCase=5, lowerCAmelCase=4, lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=12, lowerCAmelCase=2, lowerCAmelCase=0.0_2, lowerCAmelCase=3, lowerCAmelCase=4, lowerCAmelCase="last", lowerCAmelCase=None, lowerCAmelCase=None, ): """simple docstring""" lowerCamelCase_ =parent lowerCamelCase_ =batch_size lowerCamelCase_ =seq_length lowerCamelCase_ =is_training lowerCamelCase_ =use_input_lengths lowerCamelCase_ =use_token_type_ids lowerCamelCase_ =use_labels lowerCamelCase_ =gelu_activation lowerCamelCase_ =sinusoidal_embeddings lowerCamelCase_ =causal lowerCamelCase_ =asm lowerCamelCase_ =n_langs lowerCamelCase_ =vocab_size lowerCamelCase_ =n_special lowerCamelCase_ =hidden_size lowerCamelCase_ =num_hidden_layers lowerCamelCase_ =num_attention_heads lowerCamelCase_ =hidden_dropout_prob lowerCamelCase_ =attention_probs_dropout_prob lowerCamelCase_ =max_position_embeddings lowerCamelCase_ =type_vocab_size lowerCamelCase_ =type_sequence_label_size lowerCamelCase_ =initializer_range lowerCamelCase_ =num_labels lowerCamelCase_ =num_choices lowerCamelCase_ =summary_type lowerCamelCase_ =use_proj lowerCamelCase_ =scope def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase_ =None if self.use_input_lengths: lowerCamelCase_ =( ids_tensor([self.batch_size], vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length lowerCamelCase_ =None if self.use_token_type_ids: lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.n_langs ) lowerCamelCase_ =None lowerCamelCase_ =None lowerCamelCase_ =None if self.use_labels: lowerCamelCase_ =ids_tensor([self.batch_size], self.type_sequence_label_size ) lowerCamelCase_ =ids_tensor([self.batch_size, self.seq_length], self.num_labels ) lowerCamelCase_ =ids_tensor([self.batch_size], 2 ).float() lowerCamelCase_ =ids_tensor([self.batch_size], self.num_choices ) lowerCamelCase_ =self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def lowercase__ ( self ): """simple docstring""" return FlaubertConfig( vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =FlaubertModel(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase, lengths=lowerCAmelCase, langs=lowerCAmelCase ) lowerCamelCase_ =model(lowerCAmelCase, langs=lowerCAmelCase ) lowerCamelCase_ =model(lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =FlaubertWithLMHeadModel(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape, () ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =FlaubertForQuestionAnsweringSimple(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase ) lowerCamelCase_ =model(lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =FlaubertForQuestionAnswering(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase ) lowerCamelCase_ =model( lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase, cls_index=lowerCAmelCase, is_impossible=lowerCAmelCase, p_mask=lowerCAmelCase, ) lowerCamelCase_ =model( lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase, cls_index=lowerCAmelCase, is_impossible=lowerCAmelCase, ) ((lowerCamelCase_), ) =result_with_labels.to_tuple() lowerCamelCase_ =model(lowerCAmelCase, start_positions=lowerCAmelCase, end_positions=lowerCAmelCase ) ((lowerCamelCase_), ) =result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape, () ) self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =FlaubertForSequenceClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase ) lowerCamelCase_ =model(lowerCAmelCase, labels=lowerCAmelCase ) self.parent.assertEqual(result.loss.shape, () ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =self.num_labels lowerCamelCase_ =FlaubertForTokenClassification(lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =model(lowerCAmelCase, attention_mask=lowerCAmelCase, labels=lowerCAmelCase ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ): """simple docstring""" lowerCamelCase_ =self.num_choices lowerCamelCase_ =FlaubertForMultipleChoice(config=lowerCAmelCase ) model.to(lowerCAmelCase ) model.eval() lowerCamelCase_ =input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() lowerCamelCase_ =token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() lowerCamelCase_ =input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() lowerCamelCase_ =model( lowerCAmelCase, attention_mask=lowerCAmelCase, token_type_ids=lowerCAmelCase, labels=lowerCAmelCase, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.prepare_config_and_inputs() ( ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ( lowerCamelCase_ ), ) =config_and_inputs lowerCamelCase_ ={ '''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase : List[Any] =( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) lowercase : Tuple =( { 'feature-extraction': FlaubertModel, 'fill-mask': FlaubertWithLMHeadModel, 'question-answering': FlaubertForQuestionAnsweringSimple, 'text-classification': FlaubertForSequenceClassification, 'token-classification': FlaubertForTokenClassification, 'zero-shot': FlaubertForSequenceClassification, } if is_torch_available() else {} ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ): """simple docstring""" if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('''Fast''' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase=False ): """simple docstring""" lowerCamelCase_ =super()._prepare_for_class(lowerCAmelCase, lowerCAmelCase, return_labels=lowerCAmelCase ) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": lowerCamelCase_ =torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase ) lowerCamelCase_ =torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=lowerCAmelCase ) return inputs_dict def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =FlaubertModelTester(self ) lowerCamelCase_ =ConfigTester(self, config_class=lowerCAmelCase, emb_dim=37 ) def lowercase__ ( self ): """simple docstring""" self.config_tester.run_common_tests() def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*lowerCAmelCase ) @slow def lowercase__ ( self ): """simple docstring""" for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ =FlaubertModel.from_pretrained(lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) @slow @require_torch_gpu def lowercase__ ( self ): """simple docstring""" lowerCamelCase_, lowerCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return lowerCamelCase_ =True lowerCamelCase_ =model_class(config=lowerCAmelCase ) lowerCamelCase_ =self._prepare_for_class(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =torch.jit.trace( lowerCAmelCase, (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(lowerCAmelCase, os.path.join(lowerCAmelCase, '''traced_model.pt''' ) ) lowerCamelCase_ =torch.jit.load(os.path.join(lowerCAmelCase, '''traced_model.pt''' ), map_location=lowerCAmelCase ) loaded(inputs_dict['''input_ids'''].to(lowerCAmelCase ), inputs_dict['''attention_mask'''].to(lowerCAmelCase ) ) @require_torch class __UpperCamelCase ( unittest.TestCase ): @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' ) lowerCamelCase_ =torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] ) with torch.no_grad(): lowerCamelCase_ =model(lowerCAmelCase )[0] lowerCamelCase_ =torch.Size((1, 11, 768) ) self.assertEqual(output.shape, lowerCAmelCase ) lowerCamelCase_ =torch.tensor( [[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], lowerCAmelCase, atol=1e-4 ) )
676
1