code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' from manim import * class lowercase_ ( __snake_case ): """simple docstring""" def lowerCAmelCase_ ( self : Optional[int] ): """simple docstring""" _SCREAMING_SNAKE_CASE = Rectangle(height=0.5 , width=0.5 ) _SCREAMING_SNAKE_CASE = Rectangle(height=0.2_5 , width=0.2_5 ) _SCREAMING_SNAKE_CASE = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 ) _SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )] _SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )] _SCREAMING_SNAKE_CASE = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 ) _SCREAMING_SNAKE_CASE = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 ) _SCREAMING_SNAKE_CASE = VGroup(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0 ) _SCREAMING_SNAKE_CASE = Text("CPU" , font_size=2_4 ) _SCREAMING_SNAKE_CASE = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(lowercase_ ) _SCREAMING_SNAKE_CASE = [mem.copy() for i in range(4 )] _SCREAMING_SNAKE_CASE = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 ) _SCREAMING_SNAKE_CASE = Text("GPU" , font_size=2_4 ) _SCREAMING_SNAKE_CASE = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ ) gpu.move_to([-1, -1, 0] ) self.add(lowercase_ ) _SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )] _SCREAMING_SNAKE_CASE = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 ) _SCREAMING_SNAKE_CASE = Text("Model" , font_size=2_4 ) _SCREAMING_SNAKE_CASE = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ ) model.move_to([3, -1.0, 0] ) self.add(lowercase_ ) _SCREAMING_SNAKE_CASE = [] _SCREAMING_SNAKE_CASE = [] _SCREAMING_SNAKE_CASE = [] for i, rect in enumerate(lowercase_ ): rect.set_stroke(lowercase_ ) _SCREAMING_SNAKE_CASE = Rectangle(height=0.4_6 / 4 , width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(lowercase_ , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=lowercase_ ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(model_cpu_arr[0] , direction=lowercase_ , buff=0.0 ) else: cpu_target.next_to(model_cpu_arr[i - 1] , direction=lowercase_ , buff=0.0 ) self.add(lowercase_ ) model_cpu_arr.append(lowercase_ ) self.add(*lowercase_ , *lowercase_ , *lowercase_ ) _SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )] _SCREAMING_SNAKE_CASE = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 ) _SCREAMING_SNAKE_CASE = Text("Loaded Checkpoint" , font_size=2_4 ) _SCREAMING_SNAKE_CASE = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ ) checkpoint.move_to([3, 0.5, 0] ) self.add(lowercase_ ) _SCREAMING_SNAKE_CASE = [] _SCREAMING_SNAKE_CASE = [] for i, rect in enumerate(lowercase_ ): _SCREAMING_SNAKE_CASE = fill.copy().set_fill(lowercase_ , opacity=0.7 ) target.move_to(lowercase_ ) ckpt_arr.append(lowercase_ ) _SCREAMING_SNAKE_CASE = target.copy() if i < 5: cpu_target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.move_to(cpu_right_col_base[i - 5] ) ckpt_cpu_arr.append(lowercase_ ) self.add(*lowercase_ , *lowercase_ ) _SCREAMING_SNAKE_CASE = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) _SCREAMING_SNAKE_CASE = MarkupText( F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=1_8 , ) key_text.move_to([-5, 2.4, 0] ) self.add(lowercase_ , lowercase_ ) _SCREAMING_SNAKE_CASE = MarkupText( F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=1_8 , ) blue_text.next_to(lowercase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(lowercase_ ) _SCREAMING_SNAKE_CASE = MarkupText( F"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=2_4 , ) step_a.move_to([2, 2, 0] ) _SCREAMING_SNAKE_CASE = [meta_mem.copy() for i in range(6 )] _SCREAMING_SNAKE_CASE = [meta_mem.copy() for i in range(6 )] _SCREAMING_SNAKE_CASE = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 ) _SCREAMING_SNAKE_CASE = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 ) _SCREAMING_SNAKE_CASE = VGroup(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0 ) _SCREAMING_SNAKE_CASE = Text("Disk" , font_size=2_4 ) _SCREAMING_SNAKE_CASE = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ ) disk.move_to([-4.0, -1.2_5, 0] ) self.play(Write(lowercase_ , run_time=3 ) , Write(lowercase_ , run_time=1 ) , Create(lowercase_ , run_time=1 ) ) _SCREAMING_SNAKE_CASE = [] for i, rect in enumerate(lowercase_ ): _SCREAMING_SNAKE_CASE = rect.copy() target.generate_target() target.target.move_to(disk_left_col_base[i] ).scale(0.5 ) animations.append(MoveToTarget(lowercase_ , run_time=1.5 ) ) self.play(*lowercase_ ) self.play(FadeOut(lowercase_ ) ) _SCREAMING_SNAKE_CASE = MarkupText(F"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=2_4 ) step_a.move_to([2, 2, 0] ) self.play(Write(lowercase_ , run_time=3 ) ) self.play( FadeOut(lowercase_ , lowercase_ , *lowercase_ , *lowercase_ ) , ) self.wait()
418
import dataclasses import re import string from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple import numpy as np from . import residue_constants __SCREAMING_SNAKE_CASE : List[str] = Mapping[str, np.ndarray] __SCREAMING_SNAKE_CASE : List[Any] = Mapping[str, Any] # Is a nested dict. __SCREAMING_SNAKE_CASE : List[Any] = 0.01 @dataclasses.dataclass(frozen=__snake_case ) class lowercase_ : _lowerCamelCase = 42 # [num_res, num_atom_type, 3] # Amino-acid type for each residue represented as an integer between 0 and # 20, where 20 is 'X'. _lowerCamelCase = 42 # [num_res] # Binary float mask to indicate presence of a particular atom. 1.0 if an atom # is present and 0.0 if not. This should be used for loss masking. _lowerCamelCase = 42 # [num_res, num_atom_type] # Residue index as used in PDB. It is not necessarily continuous or 0-indexed. _lowerCamelCase = 42 # [num_res] # B-factors, or temperature factors, of each residue (in sq. angstroms units), # representing the displacement of the residue from its ground truth mean # value. _lowerCamelCase = 42 # [num_res, num_atom_type] # Chain indices for multi-chain predictions _lowerCamelCase = None # Optional remark about the protein. Included as a comment in output PDB # files _lowerCamelCase = None # Templates used to generate this protein (prediction-only) _lowerCamelCase = None # Chain corresponding to each parent _lowerCamelCase = None def snake_case (__lowercase ) -> Protein: '''simple docstring''' _snake_case : str = r"(\[[A-Z]+\]\n)" _snake_case : List[str] = [tag.strip() for tag in re.split(__lowercase , __lowercase ) if len(__lowercase ) > 0] _snake_case : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split("\n" ) for l in tags[1::2]] ) _snake_case : List[str] = ["N", "CA", "C"] _snake_case : Any = None _snake_case : Union[str, Any] = None _snake_case : Optional[int] = None for g in groups: if "[PRIMARY]" == g[0]: _snake_case : Tuple = g[1][0].strip() for i in range(len(__lowercase ) ): if seq[i] not in residue_constants.restypes: _snake_case : Tuple = "X" # FIXME: strings are immutable _snake_case : int = np.array( [residue_constants.restype_order.get(__lowercase , residue_constants.restype_num ) for res_symbol in seq] ) elif "[TERTIARY]" == g[0]: _snake_case : List[List[float]] = [] for axis in range(3 ): tertiary.append(list(map(__lowercase , g[1][axis].split() ) ) ) _snake_case : Dict = np.array(__lowercase ) _snake_case : Dict = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa ) for i, atom in enumerate(__lowercase ): _snake_case : List[Any] = np.transpose(tertiary_np[:, i::3] ) atom_positions *= PICO_TO_ANGSTROM elif "[MASK]" == g[0]: _snake_case : int = np.array(list(map({"-": 0, "+": 1}.get , g[1][0].strip() ) ) ) _snake_case : Any = np.zeros( ( len(__lowercase ), residue_constants.atom_type_num, ) ).astype(np.floataa ) for i, atom in enumerate(__lowercase ): _snake_case : Dict = 1 atom_mask *= mask[..., None] assert aatype is not None return Protein( atom_positions=__lowercase , atom_mask=__lowercase , aatype=__lowercase , residue_index=np.arange(len(__lowercase ) ) , b_factors=__lowercase , ) def snake_case (__lowercase , __lowercase = 0 ) -> List[str]: '''simple docstring''' _snake_case : List[str] = [] _snake_case : Optional[Any] = prot.remark if remark is not None: pdb_headers.append(F"""REMARK {remark}""" ) _snake_case : str = prot.parents _snake_case : str = prot.parents_chain_index if parents is not None and parents_chain_index is not None: _snake_case : int = [p for i, p in zip(__lowercase , __lowercase ) if i == chain_id] if parents is None or len(__lowercase ) == 0: _snake_case : Optional[int] = ["N/A"] pdb_headers.append(F"""PARENT {' '.join(__lowercase )}""" ) return pdb_headers def snake_case (__lowercase , __lowercase ) -> str: '''simple docstring''' _snake_case : List[str] = [] _snake_case : Optional[int] = pdb_str.split("\n" ) _snake_case : List[str] = prot.remark if remark is not None: out_pdb_lines.append(F"""REMARK {remark}""" ) _snake_case : List[List[str]] if prot.parents is not None and len(prot.parents ) > 0: _snake_case : str = [] if prot.parents_chain_index is not None: _snake_case : Dict[str, List[str]] = {} for p, i in zip(prot.parents , prot.parents_chain_index ): parent_dict.setdefault(str(__lowercase ) , [] ) parent_dict[str(__lowercase )].append(__lowercase ) _snake_case : Any = max([int(__lowercase ) for chain_idx in parent_dict] ) for i in range(max_idx + 1 ): _snake_case : Tuple = parent_dict.get(str(__lowercase ) , ["N/A"] ) parents_per_chain.append(__lowercase ) else: parents_per_chain.append(list(prot.parents ) ) else: _snake_case : List[str] = [["N/A"]] def make_parent_line(__lowercase ) -> str: return F"""PARENT {' '.join(__lowercase )}""" out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) ) _snake_case : int = 0 for i, l in enumerate(__lowercase ): if "PARENT" not in l and "REMARK" not in l: out_pdb_lines.append(__lowercase ) if "TER" in l and "END" not in lines[i + 1]: chain_counter += 1 if not chain_counter >= len(__lowercase ): _snake_case : Tuple = parents_per_chain[chain_counter] else: _snake_case : str = ["N/A"] out_pdb_lines.append(make_parent_line(__lowercase ) ) return "\n".join(__lowercase ) def snake_case (__lowercase ) -> str: '''simple docstring''' _snake_case : Optional[Any] = residue_constants.restypes + ["X"] def res_atoa(__lowercase ) -> str: return residue_constants.restype_atoa.get(restypes[r] , "UNK" ) _snake_case : Optional[int] = residue_constants.atom_types _snake_case : List[str] = [] _snake_case : Tuple = prot.atom_mask _snake_case : List[str] = prot.aatype _snake_case : int = prot.atom_positions _snake_case : int = prot.residue_index.astype(np.intaa ) _snake_case : List[Any] = prot.b_factors _snake_case : str = prot.chain_index if np.any(aatype > residue_constants.restype_num ): raise ValueError("Invalid aatypes." ) _snake_case : Union[str, Any] = get_pdb_headers(__lowercase ) if len(__lowercase ) > 0: pdb_lines.extend(__lowercase ) _snake_case : Optional[Any] = aatype.shape[0] _snake_case : str = 1 _snake_case : Tuple = 0 _snake_case : int = string.ascii_uppercase _snake_case : Optional[Any] = None # Add all atom sites. for i in range(__lowercase ): _snake_case : Dict = res_atoa(aatype[i] ) for atom_name, pos, mask, b_factor in zip(__lowercase , atom_positions[i] , atom_mask[i] , b_factors[i] ): if mask < 0.5: continue _snake_case : List[Any] = "ATOM" _snake_case : Union[str, Any] = atom_name if len(__lowercase ) == 4 else F""" {atom_name}""" _snake_case : str = "" _snake_case : str = "" _snake_case : Any = 1.00 _snake_case : str = atom_name[0] # Protein supports only C, N, O, S, this works. _snake_case : Dict = "" _snake_case : Any = "A" if chain_index is not None: _snake_case : List[Any] = chain_tags[chain_index[i]] # PDB is a columnar format, every space matters here! _snake_case : Optional[int] = ( F"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}""" F"""{res_name_a:>3} {chain_tag:>1}""" F"""{residue_index[i]:>4}{insertion_code:>1} """ F"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}""" F"""{occupancy:>6.2f}{b_factor:>6.2f} """ F"""{element:>2}{charge:>2}""" ) pdb_lines.append(__lowercase ) atom_index += 1 _snake_case : Dict = i == n - 1 if chain_index is not None: if i != n - 1 and chain_index[i + 1] != prev_chain_index: _snake_case : Optional[int] = True _snake_case : Union[str, Any] = chain_index[i + 1] if should_terminate: # Close the chain. _snake_case : List[str] = "TER" _snake_case : str = ( F"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}""" ) pdb_lines.append(__lowercase ) atom_index += 1 if i != n - 1: # "prev" is a misnomer here. This happens at the beginning of # each new chain. pdb_lines.extend(get_pdb_headers(__lowercase , __lowercase ) ) pdb_lines.append("END" ) pdb_lines.append("" ) return "\n".join(__lowercase ) def snake_case (__lowercase ) -> np.ndarray: '''simple docstring''' return residue_constants.STANDARD_ATOM_MASK[prot.aatype] def snake_case (__lowercase , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , ) -> Protein: '''simple docstring''' return Protein( aatype=features["aatype"] , atom_positions=result["final_atom_positions"] , atom_mask=result["final_atom_mask"] , residue_index=features["residue_index"] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["final_atom_mask"] ) , chain_index=__lowercase , remark=__lowercase , parents=__lowercase , parents_chain_index=__lowercase , )
670
0
from typing import Any class lowercase : """simple docstring""" def __init__( self : List[str] , __UpperCAmelCase : int ) -> int: UpperCAmelCase_= data UpperCAmelCase_= None class lowercase : """simple docstring""" def __init__( self : Dict ) -> int: UpperCAmelCase_= None def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple: UpperCAmelCase_= self.head while temp is not None: print(temp.data , end=""" """ ) UpperCAmelCase_= temp.next print() def _SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : List[Any] ) -> Dict: UpperCAmelCase_= Node(lowercase_ ) UpperCAmelCase_= self.head UpperCAmelCase_= new_node def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : List[Any] ) -> List[str]: if node_data_a == node_data_a: return else: UpperCAmelCase_= self.head while node_a is not None and node_a.data != node_data_a: UpperCAmelCase_= node_a.next UpperCAmelCase_= self.head while node_a is not None and node_a.data != node_data_a: UpperCAmelCase_= node_a.next if node_a is None or node_a is None: return UpperCAmelCase_= node_a.data, node_a.data if __name__ == "__main__": __A = LinkedList() for i in range(5, 0, -1): ll.push(i) ll.print_list() ll.swap_nodes(1, 4) print('''After swapping''') ll.print_list()
593
from copy import deepcopy from typing import Optional, Union import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, is_tf_available, is_torch_available if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf class lowercase_ ( __snake_case ): _lowerCamelCase = ['image_processor'] _lowerCamelCase = 'SamImageProcessor' def __init__( self , lowercase_ ): super().__init__(lowercase_ ) _snake_case : Optional[Any] = self.image_processor _snake_case : Tuple = -10 _snake_case : str = self.image_processor.size["longest_edge"] def __call__( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_ = None , **lowercase_ , ): _snake_case : List[Any] = self.image_processor( lowercase_ , return_tensors=lowercase_ , **lowercase_ , ) # pop arguments that are not used in the foward but used nevertheless _snake_case : Any = encoding_image_processor["original_sizes"] if hasattr(lowercase_ , "numpy" ): # Checks if Torch or TF tensor _snake_case : int = original_sizes.numpy() _snake_case ,_snake_case ,_snake_case : Union[str, Any] = self._check_and_preprocess_points( input_points=lowercase_ , input_labels=lowercase_ , input_boxes=lowercase_ , ) _snake_case : Dict = self._normalize_and_convert( lowercase_ , lowercase_ , input_points=lowercase_ , input_labels=lowercase_ , input_boxes=lowercase_ , return_tensors=lowercase_ , ) return encoding_image_processor def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_="pt" , ): if input_points is not None: if len(lowercase_ ) != len(lowercase_ ): _snake_case : int = [ self._normalize_coordinates(self.target_size , lowercase_ , original_sizes[0] ) for point in input_points ] else: _snake_case : Dict = [ self._normalize_coordinates(self.target_size , lowercase_ , lowercase_ ) for point, original_size in zip(lowercase_ , lowercase_ ) ] # check that all arrays have the same shape if not all(point.shape == input_points[0].shape for point in input_points ): if input_labels is not None: _snake_case ,_snake_case : int = self._pad_points_and_labels(lowercase_ , lowercase_ ) _snake_case : Any = np.array(lowercase_ ) if input_labels is not None: _snake_case : Optional[Any] = np.array(lowercase_ ) if input_boxes is not None: if len(lowercase_ ) != len(lowercase_ ): _snake_case : Optional[Any] = [ self._normalize_coordinates(self.target_size , lowercase_ , original_sizes[0] , is_bounding_box=lowercase_ ) for box in input_boxes ] else: _snake_case : List[str] = [ self._normalize_coordinates(self.target_size , lowercase_ , lowercase_ , is_bounding_box=lowercase_ ) for box, original_size in zip(lowercase_ , lowercase_ ) ] _snake_case : Tuple = np.array(lowercase_ ) if input_boxes is not None: if return_tensors == "pt": _snake_case : List[str] = torch.from_numpy(lowercase_ ) # boxes batch size of 1 by default _snake_case : Optional[Any] = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes elif return_tensors == "tf": _snake_case : List[str] = tf.convert_to_tensor(lowercase_ ) # boxes batch size of 1 by default _snake_case : Optional[int] = tf.expand_dims(lowercase_ , 1 ) if len(input_boxes.shape ) != 3 else input_boxes encoding_image_processor.update({"input_boxes": input_boxes} ) if input_points is not None: if return_tensors == "pt": _snake_case : Tuple = torch.from_numpy(lowercase_ ) # point batch size of 1 by default _snake_case : int = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points elif return_tensors == "tf": _snake_case : List[str] = tf.convert_to_tensor(lowercase_ ) # point batch size of 1 by default _snake_case : Tuple = tf.expand_dims(lowercase_ , 1 ) if len(input_points.shape ) != 4 else input_points encoding_image_processor.update({"input_points": input_points} ) if input_labels is not None: if return_tensors == "pt": _snake_case : Dict = torch.from_numpy(lowercase_ ) # point batch size of 1 by default _snake_case : str = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels elif return_tensors == "tf": _snake_case : Optional[Any] = tf.convert_to_tensor(lowercase_ ) # point batch size of 1 by default _snake_case : List[Any] = tf.expand_dims(lowercase_ , 1 ) if len(input_labels.shape ) != 3 else input_labels encoding_image_processor.update({"input_labels": input_labels} ) return encoding_image_processor def UpperCamelCase ( self , lowercase_ , lowercase_ ): _snake_case : List[Any] = max([point.shape[0] for point in input_points] ) _snake_case : List[str] = [] for i, point in enumerate(lowercase_ ): if point.shape[0] != expected_nb_points: _snake_case : Optional[Any] = np.concatenate( [point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 ) _snake_case : Union[str, Any] = np.append(input_labels[i] , [self.point_pad_value] ) processed_input_points.append(lowercase_ ) _snake_case : Optional[Any] = processed_input_points return input_points, input_labels def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=False ): _snake_case ,_snake_case : Optional[int] = original_size _snake_case ,_snake_case : List[str] = self.image_processor._get_preprocess_shape(lowercase_ , longest_edge=lowercase_ ) _snake_case : Optional[Any] = deepcopy(lowercase_ ).astype(lowercase_ ) if is_bounding_box: _snake_case : str = coords.reshape(-1 , 2 , 2 ) _snake_case : Optional[Any] = coords[..., 0] * (new_w / old_w) _snake_case : Dict = coords[..., 1] * (new_h / old_h) if is_bounding_box: _snake_case : Optional[Any] = coords.reshape(-1 , 4 ) return coords def UpperCamelCase ( self , lowercase_=None , lowercase_=None , lowercase_=None , ): if input_points is not None: if hasattr(lowercase_ , "numpy" ): # Checks for TF or Torch tensor _snake_case : Union[str, Any] = input_points.numpy().tolist() if not isinstance(lowercase_ , lowercase_ ) or not isinstance(input_points[0] , lowercase_ ): raise ValueError("Input points must be a list of list of floating points." ) _snake_case : Any = [np.array(lowercase_ ) for input_point in input_points] else: _snake_case : Optional[int] = None if input_labels is not None: if hasattr(lowercase_ , "numpy" ): _snake_case : Tuple = input_labels.numpy().tolist() if not isinstance(lowercase_ , lowercase_ ) or not isinstance(input_labels[0] , lowercase_ ): raise ValueError("Input labels must be a list of list integers." ) _snake_case : Tuple = [np.array(lowercase_ ) for label in input_labels] else: _snake_case : Optional[Any] = None if input_boxes is not None: if hasattr(lowercase_ , "numpy" ): _snake_case : List[str] = input_boxes.numpy().tolist() if ( not isinstance(lowercase_ , lowercase_ ) or not isinstance(input_boxes[0] , lowercase_ ) or not isinstance(input_boxes[0][0] , lowercase_ ) ): raise ValueError("Input boxes must be a list of list of list of floating points." ) _snake_case : List[Any] = [np.array(lowercase_ ).astype(np.floataa ) for box in input_boxes] else: _snake_case : Optional[int] = None return input_points, input_labels, input_boxes @property def UpperCamelCase ( self ): _snake_case : Union[str, Any] = self.image_processor.model_input_names return list(dict.fromkeys(lowercase_ ) ) def UpperCamelCase ( self , *lowercase_ , **lowercase_ ): return self.image_processor.post_process_masks(*lowercase_ , **lowercase_ )
670
0
import math def UpperCAmelCase ( )-> None: '''simple docstring''' SCREAMING_SNAKE_CASE_ = input('''Enter message: ''' ) SCREAMING_SNAKE_CASE_ = int(input(f'''Enter key [2-{len(__lowercase ) - 1}]: ''' ) ) SCREAMING_SNAKE_CASE_ = input('''Encryption/Decryption [e/d]: ''' ) if mode.lower().startswith('''e''' ): SCREAMING_SNAKE_CASE_ = encrypt_message(__lowercase ,__lowercase ) elif mode.lower().startswith('''d''' ): SCREAMING_SNAKE_CASE_ = decrypt_message(__lowercase ,__lowercase ) # Append pipe symbol (vertical bar) to identify spaces at the end. print(f'''Output:\n{text + '|'}''' ) def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase )-> str: '''simple docstring''' SCREAMING_SNAKE_CASE_ = [""] * key for col in range(__lowercase ): SCREAMING_SNAKE_CASE_ = col while pointer < len(__lowercase ): cipher_text[col] += message[pointer] pointer += key return "".join(__lowercase ) def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase )-> str: '''simple docstring''' SCREAMING_SNAKE_CASE_ = math.ceil(len(__lowercase ) / key ) SCREAMING_SNAKE_CASE_ = key SCREAMING_SNAKE_CASE_ = (num_cols * num_rows) - len(__lowercase ) SCREAMING_SNAKE_CASE_ = [""] * num_cols SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = 0 for symbol in message: plain_text[col] += symbol col += 1 if ( (col == num_cols) or (col == num_cols - 1) and (row >= num_rows - num_shaded_boxes) ): SCREAMING_SNAKE_CASE_ = 0 row += 1 return "".join(__lowercase ) if __name__ == "__main__": import doctest doctest.testmod() main()
393
def snake_case (__lowercase ) -> int: '''simple docstring''' if not grid or not grid[0]: raise TypeError("The grid does not contain the appropriate information" ) for cell_n in range(1 , len(grid[0] ) ): grid[0][cell_n] += grid[0][cell_n - 1] _snake_case : Union[str, Any] = grid[0] for row_n in range(1 , len(__lowercase ) ): _snake_case : Union[str, Any] = grid[row_n] _snake_case : List[Any] = fill_row(__lowercase , __lowercase ) _snake_case : List[Any] = grid[row_n] return grid[-1][-1] def snake_case (__lowercase , __lowercase ) -> list: '''simple docstring''' current_row[0] += row_above[0] for cell_n in range(1 , len(__lowercase ) ): current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] ) return current_row if __name__ == "__main__": import doctest doctest.testmod()
670
0
'''simple docstring''' import argparse import re from flax.traverse_util import flatten_dict, unflatten_dict from tax import checkpoints from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model from transformers.utils import logging logging.set_verbosity_info() # should not include what is already done by the `from_pt` argument _A: Union[str, Any] = { '/attention/': '/0/SelfAttention/', '/self_attention/': '/0/SelfAttention/', '/encoder_decoder_attention/': '/1/EncDecAttention/', 'value': 'v', 'query': 'q', 'key': 'k', 'out': 'o', 'pre_self_attention_layer_norm': '0/layer_norm', 'pre_cross_attention_layer_norm': '1/layer_norm', 'pre_attention_layer_norm': '0/layer_norm', # previously 1, but seems wrong 'token_embedder': 'shared', 'encoder_norm': 'final_layer_norm', 'decoder_norm': 'final_layer_norm', 'relpos_bias/rel_embedding': 'block/0/layer/0/SelfAttention/relative_attention_bias/weight', 'router/router_weights/w/': 'router/classifier/', 'roer/roer_weights/w/': 'router/classifier/', 'logits_dense': 'lm_head', } def _lowerCAmelCase ( _lowerCAmelCase )-> Tuple: __UpperCAmelCase = list(s_dict.keys() ) for key in keys: __UpperCAmelCase = r".*/layers_(\d+)" __UpperCAmelCase = key if re.match(__lowercase , __lowercase ): __UpperCAmelCase = re.sub(r'layers_(\d+)' , r'block/\1/layer' , __lowercase ) __UpperCAmelCase = r"(encoder|decoder)\/" if re.match(__lowercase , __lowercase ): __UpperCAmelCase = re.match(__lowercase , __lowercase ).groups() if groups[0] == "encoder": __UpperCAmelCase = re.sub(r'/mlp/' , r'/1/mlp/' , __lowercase ) __UpperCAmelCase = re.sub(r'/pre_mlp_layer_norm/' , r'/1/layer_norm/' , __lowercase ) elif groups[0] == "decoder": __UpperCAmelCase = re.sub(r'/mlp/' , r'/2/mlp/' , __lowercase ) __UpperCAmelCase = re.sub(r'/pre_mlp_layer_norm/' , r'/2/layer_norm/' , __lowercase ) # 2. Convert other classic mappings for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items(): if old_key in new_key: __UpperCAmelCase = new_key.replace(__lowercase , __lowercase ) print(F'{key} -> {new_key}' ) __UpperCAmelCase = s_dict.pop(__lowercase ) if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: __UpperCAmelCase = s_dict[ "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" ].T if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: __UpperCAmelCase = s_dict[ "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" ].T # 3. Take extra care of the EXPERTS layer for key in list(s_dict.keys() ): if "expert" in key: __UpperCAmelCase = s_dict[key].shape[0] __UpperCAmelCase = s_dict[key] for idx in range(__lowercase ): __UpperCAmelCase = expert_weihts[idx] print(F'{key} -> {key.replace("expert/" , "nested fstring" )}' ) s_dict.pop(__lowercase ) return s_dict _A: str = { 'NUM_ENCODER_LAYERS': 'num_layers', 'NUM_DECODER_LAYERS': 'num_decoder_layers', 'NUM_HEADS': 'num_heads', 'HEAD_DIM': 'd_kv', 'EMBED_DIM': 'd_model', 'MLP_DIM': 'd_ff', 'NUM_SELECTED_EXPERTS': 'num_selected_experts', 'NUM_ENCODER_SPARSE_LAYERS': 'num_sparse_encoder_layers', 'NUM_DECODER_SPARSE_LAYERS': 'num_sparse_decoder_layers', 'dense.MlpBlock.activations': 'feed_forward_proj', } def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase )-> Any: import regex as re with open(__lowercase , 'r' ) as f: __UpperCAmelCase = f.read() __UpperCAmelCase = re.findall(r'(.*) = ([0-9.]*)' , __lowercase ) __UpperCAmelCase = {} for param, value in regex_match: if param in GIN_TO_CONFIG_MAPPING and value != "": __UpperCAmelCase = float(__lowercase ) if "." in value else int(__lowercase ) __UpperCAmelCase = re.findall(r'(.*activations) = \(\'(.*)\',\)' , __lowercase )[0] __UpperCAmelCase = str(activation[1] ) __UpperCAmelCase = num_experts __UpperCAmelCase = SwitchTransformersConfig(**__lowercase ) return config def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase="./" , _lowerCAmelCase=8 )-> str: print(F'Loading flax weights from : {flax_checkpoint_path}' ) __UpperCAmelCase = checkpoints.load_tax_checkpoint(__lowercase ) if gin_file is not None: __UpperCAmelCase = convert_gin_to_config(__lowercase , __lowercase ) else: __UpperCAmelCase = SwitchTransformersConfig.from_pretrained(__lowercase ) __UpperCAmelCase = SwitchTransformersForConditionalGeneration(__lowercase ) __UpperCAmelCase = flax_params["target"] __UpperCAmelCase = flatten_dict(__lowercase , sep='/' ) __UpperCAmelCase = rename_keys(__lowercase ) __UpperCAmelCase = unflatten_dict(__lowercase , sep='/' ) # Load the flax params in the PT model load_flax_weights_in_pytorch_model(__lowercase , __lowercase ) print(F'Save PyTorch model to {pytorch_dump_path}' ) pt_model.save_pretrained(__lowercase ) if __name__ == "__main__": _A: Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--switch_t5x_checkpoint_path""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the""" """ model architecture. If not provided, a `gin_file` has to be provided.""" ), ) parser.add_argument( """--gin_file""", default=None, type=str, required=False, help="""Path to the gin config file. If not provided, a `config_file` has to be passed """, ) parser.add_argument( """--config_name""", default=None, type=str, required=False, help="""Config name of SwitchTransformers model.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output pytorch model.""" ) parser.add_argument("""--num_experts""", default=8, type=int, required=False, help="""Number of experts""") _A: int = parser.parse_args() convert_flax_checkpoint_to_pytorch( args.switch_tax_checkpoint_path, args.config_name, args.gin_file, args.pytorch_dump_folder_path, args.num_experts, )
126
import random def snake_case (__lowercase , __lowercase ) -> tuple: '''simple docstring''' _snake_case ,_snake_case ,_snake_case : List[Any] = [], [], [] for element in data: if element < pivot: less.append(__lowercase ) elif element > pivot: greater.append(__lowercase ) else: equal.append(__lowercase ) return less, equal, greater def snake_case (__lowercase , __lowercase ) -> List[Any]: '''simple docstring''' if index >= len(__lowercase ) or index < 0: return None _snake_case : Any = items[random.randint(0 , len(__lowercase ) - 1 )] _snake_case : Tuple = 0 _snake_case ,_snake_case ,_snake_case : Tuple = _partition(__lowercase , __lowercase ) _snake_case : Tuple = len(__lowercase ) _snake_case : List[str] = len(__lowercase ) # index is the pivot if m <= index < m + count: return pivot # must be in smaller elif m > index: return quick_select(__lowercase , __lowercase ) # must be in larger else: return quick_select(__lowercase , index - (m + count) )
670
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { 'microsoft/trocr-base-handwritten': ( 'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json' ), # See all TrOCR models at https://huggingface.co/models?filter=trocr } class __magic_name__ ( __snake_case ): _SCREAMING_SNAKE_CASE : Optional[Any] = 'trocr' _SCREAMING_SNAKE_CASE : List[Any] = ['past_key_values'] _SCREAMING_SNAKE_CASE : str = { 'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model', 'num_hidden_layers': 'decoder_layers', } def __init__( self : List[str] , snake_case_ : List[Any]=50265 , snake_case_ : int=1024 , snake_case_ : List[str]=12 , snake_case_ : Tuple=16 , snake_case_ : int=4096 , snake_case_ : Tuple="gelu" , snake_case_ : Tuple=512 , snake_case_ : Tuple=0.1 , snake_case_ : Optional[Any]=0.0 , snake_case_ : Tuple=0.0 , snake_case_ : int=2 , snake_case_ : List[Any]=0.02 , snake_case_ : Dict=0.0 , snake_case_ : Optional[int]=True , snake_case_ : List[Any]=False , snake_case_ : List[str]=True , snake_case_ : Dict=True , snake_case_ : List[str]=1 , snake_case_ : str=0 , snake_case_ : Tuple=2 , **snake_case_ : Optional[int] , ): __snake_case = vocab_size __snake_case = d_model __snake_case = decoder_layers __snake_case = decoder_attention_heads __snake_case = decoder_ffn_dim __snake_case = activation_function __snake_case = max_position_embeddings __snake_case = dropout __snake_case = attention_dropout __snake_case = activation_dropout __snake_case = init_std __snake_case = decoder_layerdrop __snake_case = use_cache __snake_case = scale_embedding __snake_case = use_learned_position_embeddings __snake_case = layernorm_embedding super().__init__( pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , decoder_start_token_id=lowercase_ , **lowercase_ , )
163
from math import pow, sqrt def snake_case (*__lowercase ) -> bool: '''simple docstring''' _snake_case : str = len(__lowercase ) > 0 and all(value > 0.0 for value in values ) return result def snake_case (__lowercase , __lowercase ) -> float | ValueError: '''simple docstring''' return ( round(sqrt(molar_mass_a / molar_mass_a ) , 6 ) if validate(__lowercase , __lowercase ) else ValueError("Input Error: Molar mass values must greater than 0." ) ) def snake_case (__lowercase , __lowercase , __lowercase ) -> float | ValueError: '''simple docstring''' return ( round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 ) if validate(__lowercase , __lowercase , __lowercase ) else ValueError( "Input Error: Molar mass and effusion rate values must greater than 0." ) ) def snake_case (__lowercase , __lowercase , __lowercase ) -> float | ValueError: '''simple docstring''' return ( round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 ) if validate(__lowercase , __lowercase , __lowercase ) else ValueError( "Input Error: Molar mass and effusion rate values must greater than 0." ) ) def snake_case (__lowercase , __lowercase , __lowercase ) -> float | ValueError: '''simple docstring''' return ( round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 ) if validate(__lowercase , __lowercase , __lowercase ) else ValueError( "Input Error: Molar mass and effusion rate values must greater than 0." ) ) def snake_case (__lowercase , __lowercase , __lowercase ) -> float | ValueError: '''simple docstring''' return ( round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 ) if validate(__lowercase , __lowercase , __lowercase ) else ValueError( "Input Error: Molar mass and effusion rate values must greater than 0." ) )
670
0
from typing import TYPE_CHECKING from ...utils import _LazyModule _lowerCAmelCase = {'tokenization_byt5': ['ByT5Tokenizer']} if TYPE_CHECKING: from .tokenization_byta import ByTaTokenizer else: import sys _lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
569
import warnings from ...utils import logging from .image_processing_layoutlmva import LayoutLMvaImageProcessor __SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__) class lowercase_ ( __snake_case ): def __init__( self , *lowercase_ , **lowercase_ ): warnings.warn( "The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use LayoutLMv2ImageProcessor instead." , lowercase_ , ) super().__init__(*lowercase_ , **lowercase_ )
670
0
import operator def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ = False , UpperCAmelCase__ = None ): """simple docstring""" a_ = operator.lt if reverse else operator.gt a_ = solution or [] if not arr: return solution a_ = [arr.pop(0 )] for i, item in enumerate(__lowercase ): if _operator(__lowercase , sublist[-1] ): sublist.append(__lowercase ) arr.pop(__lowercase ) # merging sublist into solution list if not solution: solution.extend(__lowercase ) else: while sublist: a_ = sublist.pop(0 ) for i, xx in enumerate(__lowercase ): if not _operator(__lowercase , __lowercase ): solution.insert(__lowercase , __lowercase ) break else: solution.append(__lowercase ) strand_sort(__lowercase , __lowercase , __lowercase ) return solution if __name__ == "__main__": assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5] assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
483
from __future__ import annotations from typing import TypedDict class lowercase_ ( __snake_case ): _lowerCamelCase = 42 _lowerCamelCase = 42 def snake_case (__lowercase ) -> list[str]: '''simple docstring''' if not isinstance(__lowercase , __lowercase ): raise TypeError("The parameter s type must be str." ) return [s[i:] + s[:i] for i in range(len(__lowercase ) )] def snake_case (__lowercase ) -> BWTTransformDict: '''simple docstring''' if not isinstance(__lowercase , __lowercase ): raise TypeError("The parameter s type must be str." ) if not s: raise ValueError("The parameter s must not be empty." ) _snake_case : List[str] = all_rotations(__lowercase ) rotations.sort() # sort the list of rotations in alphabetically order # make a string composed of the last char of each rotation _snake_case : BWTTransformDict = { "bwt_string": "".join([word[-1] for word in rotations] ), "idx_original_string": rotations.index(__lowercase ), } return response def snake_case (__lowercase , __lowercase ) -> str: '''simple docstring''' if not isinstance(__lowercase , __lowercase ): raise TypeError("The parameter bwt_string type must be str." ) if not bwt_string: raise ValueError("The parameter bwt_string must not be empty." ) try: _snake_case : Union[str, Any] = int(__lowercase ) except ValueError: raise TypeError( "The parameter idx_original_string type must be int or passive" " of cast to int." ) if idx_original_string < 0: raise ValueError("The parameter idx_original_string must not be lower than 0." ) if idx_original_string >= len(__lowercase ): raise ValueError( "The parameter idx_original_string must be lower than" " len(bwt_string)." ) _snake_case : Optional[Any] = [""] * len(__lowercase ) for _ in range(len(__lowercase ) ): for i in range(len(__lowercase ) ): _snake_case : Tuple = bwt_string[i] + ordered_rotations[i] ordered_rotations.sort() return ordered_rotations[idx_original_string] if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Optional[Any] = 'Provide a string that I will generate its BWT transform: ' __SCREAMING_SNAKE_CASE : Optional[Any] = input(entry_msg).strip() __SCREAMING_SNAKE_CASE : int = bwt_transform(s) print( F'''Burrows Wheeler transform for string \'{s}\' results ''' F'''in \'{result['bwt_string']}\'''' ) __SCREAMING_SNAKE_CASE : List[str] = reverse_bwt(result['bwt_string'], result['idx_original_string']) print( F'''Reversing Burrows Wheeler transform for entry \'{result['bwt_string']}\' ''' F'''we get original string \'{original_string}\'''' )
670
0
'''simple docstring''' from __future__ import annotations def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> list[list[int]]: _UpperCamelCase : list[list[int]] = [] _UpperCamelCase : list[int] = [] _UpperCamelCase : Optional[int] = 0 _UpperCamelCase : int = sum(__lowercase ) create_state_space_tree(__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase ) return result def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,) -> None: if sum(__lowercase ) > max_sum or (remaining_nums_sum + sum(__lowercase )) < max_sum: return if sum(__lowercase ) == max_sum: result.append(__lowercase ) return for index in range(__lowercase ,len(__lowercase ) ): create_state_space_tree( __lowercase ,__lowercase ,index + 1 ,[*path, nums[index]] ,__lowercase ,remaining_nums_sum - nums[index] ,) _UpperCAmelCase : str = [3, 34, 4, 12, 5, 2] _UpperCAmelCase : Union[str, Any] = 9 _UpperCAmelCase : Dict = generate_sum_of_subsets_soln(nums, max_sum) print(*result)
683
# NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401 from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401 deprecate( 'stable diffusion controlnet', '0.22.0', 'Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.', standard_warn=False, stacklevel=3, )
670
0
"""simple docstring""" import json import logging import os import sys from pathlib import Path import finetune_rag from transformers.file_utils import is_apex_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, require_ray, require_torch_gpu, require_torch_multi_gpu, ) logging.basicConfig(level=logging.DEBUG) A : List[str] = logging.getLogger() A : Dict = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class _UpperCamelCase ( __snake_case ): '''simple docstring''' def snake_case ( self , __a ): os.makedirs(lowercase_ , exist_ok=lowercase_ ) __lowerCAmelCase = {"source": "What is love ?", "target": "life"} __lowerCAmelCase = {"train": 12, "val": 2, "test": 2} for split in ["train", "test", "val"]: for field in ["source", "target"]: __lowerCAmelCase = "\n".join([contents[field]] * n_lines[split] ) with open(os.path.join(lowercase_ , f"{split}.{field}" ) , "w" ) as f: f.write(lowercase_ ) def snake_case ( self , __a , __a = "pytorch" ): __lowerCAmelCase = self.get_auto_remove_tmp_dir() __lowerCAmelCase = os.path.join(lowercase_ , "output" ) __lowerCAmelCase = os.path.join(lowercase_ , "data" ) self._create_dummy_data(data_dir=lowercase_ ) __lowerCAmelCase = f"\n --data_dir {data_dir} \\n --output_dir {output_dir} \\n --model_name_or_path facebook/rag-sequence-base \\n --model_type rag_sequence \\n --do_train \\n --do_predict \\n --n_val -1 \\n --val_check_interval 1.0 \\n --train_batch_size 2 \\n --eval_batch_size 1 \\n --max_source_length 25 \\n --max_target_length 25 \\n --val_max_target_length 25 \\n --test_max_target_length 25 \\n --label_smoothing 0.1 \\n --dropout 0.1 \\n --attention_dropout 0.1 \\n --weight_decay 0.001 \\n --adam_epsilon 1e-08 \\n --max_grad_norm 0.1 \\n --lr_scheduler polynomial \\n --learning_rate 3e-04 \\n --num_train_epochs 1 \\n --warmup_steps 4 \\n --gradient_accumulation_steps 1 \\n --distributed-port 8787 \\n --use_dummy_dataset 1 \\n --distributed_retriever {distributed_retriever} \\n ".split() if gpus > 0: testargs.append(f"--gpus={gpus}" ) if is_apex_available(): testargs.append("--fp16" ) else: testargs.append("--gpus=0" ) testargs.append("--distributed_backend=ddp_cpu" ) testargs.append("--num_processes=2" ) __lowerCAmelCase = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs execute_subprocess_async(lowercase_ , env=self.get_env() ) __lowerCAmelCase = os.path.join(lowercase_ , "metrics.json" ) with open(lowercase_ ) as f: __lowerCAmelCase = json.load(lowercase_ ) return result @require_torch_gpu def snake_case ( self ): __lowerCAmelCase = self._run_finetune(gpus=1 ) self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 ) @require_torch_multi_gpu def snake_case ( self ): __lowerCAmelCase = self._run_finetune(gpus=2 ) self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 ) @require_torch_gpu @require_ray def snake_case ( self ): __lowerCAmelCase = self._run_finetune(gpus=1 , distributed_retriever="ray" ) self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 ) @require_torch_multi_gpu @require_ray def snake_case ( self ): __lowerCAmelCase = self._run_finetune(gpus=1 , distributed_retriever="ray" ) self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
636
from __future__ import annotations import unittest from transformers import LEDConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFLEDForConditionalGeneration, TFLEDModel @require_tf class lowercase_ : _lowerCamelCase = LEDConfig _lowerCamelCase = {} _lowerCamelCase = 'gelu' def __init__( self , lowercase_ , lowercase_=13 , lowercase_=7 , lowercase_=True , lowercase_=False , lowercase_=99 , lowercase_=32 , lowercase_=2 , lowercase_=4 , lowercase_=37 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=20 , lowercase_=2 , lowercase_=1 , lowercase_=0 , lowercase_=4 , ): _snake_case : Optional[int] = parent _snake_case : str = batch_size _snake_case : int = seq_length _snake_case : Dict = is_training _snake_case : Optional[Any] = use_labels _snake_case : Tuple = vocab_size _snake_case : str = hidden_size _snake_case : int = num_hidden_layers _snake_case : Union[str, Any] = num_attention_heads _snake_case : int = intermediate_size _snake_case : List[str] = hidden_dropout_prob _snake_case : List[Any] = attention_probs_dropout_prob _snake_case : int = max_position_embeddings _snake_case : Union[str, Any] = eos_token_id _snake_case : str = pad_token_id _snake_case : Any = bos_token_id _snake_case : str = attention_window # `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size # [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention # returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1] # because its local attention only attends to `self.attention_window` and one before and one after _snake_case : List[Any] = self.attention_window + 2 # because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for # the `test_attention_outputs` and `test_hidden_states_output` tests _snake_case : List[str] = ( self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window ) def UpperCamelCase ( self ): _snake_case : str = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) _snake_case : Any = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) _snake_case : Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 ) _snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _snake_case : List[str] = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , ) _snake_case : Optional[Any] = prepare_led_inputs_dict(lowercase_ , lowercase_ , lowercase_ ) _snake_case : int = tf.concat( [tf.zeros_like(lowercase_ )[:, :-1], tf.ones_like(lowercase_ )[:, -1:]] , axis=-1 , ) _snake_case : List[Any] = global_attention_mask return config, inputs_dict def UpperCamelCase ( self , lowercase_ , lowercase_ ): _snake_case : Dict = TFLEDModel(config=lowercase_ ).get_decoder() _snake_case : Optional[Any] = inputs_dict["input_ids"] _snake_case : Optional[int] = input_ids[:1, :] _snake_case : int = inputs_dict["attention_mask"][:1, :] _snake_case : int = 1 # first forward pass _snake_case : str = model(lowercase_ , attention_mask=lowercase_ , use_cache=lowercase_ ) _snake_case ,_snake_case : Optional[int] = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _snake_case : Any = ids_tensor((self.batch_size, 3) , config.vocab_size ) _snake_case : List[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and _snake_case : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 ) _snake_case : List[str] = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) _snake_case : str = model(lowercase_ , attention_mask=lowercase_ )[0] _snake_case : List[str] = model(lowercase_ , attention_mask=lowercase_ , past_key_values=lowercase_ )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice _snake_case : Optional[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) _snake_case : List[str] = output_from_no_past[:, -3:, random_slice_idx] _snake_case : List[str] = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(lowercase_ , lowercase_ , rtol=1e-3 ) def snake_case (__lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , ) -> List[Any]: '''simple docstring''' if attention_mask is None: _snake_case : int = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: _snake_case : Optional[int] = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: _snake_case : Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _snake_case : Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, } @require_tf class lowercase_ ( __snake_case , __snake_case , unittest.TestCase ): _lowerCamelCase = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else () _lowerCamelCase = (TFLEDForConditionalGeneration,) if is_tf_available() else () _lowerCamelCase = ( { 'conversational': TFLEDForConditionalGeneration, 'feature-extraction': TFLEDModel, 'summarization': TFLEDForConditionalGeneration, 'text2text-generation': TFLEDForConditionalGeneration, 'translation': TFLEDForConditionalGeneration, } if is_tf_available() else {} ) _lowerCamelCase = True _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False def UpperCamelCase ( self ): _snake_case : Optional[Any] = TFLEDModelTester(self ) _snake_case : List[Any] = ConfigTester(self , config_class=lowercase_ ) def UpperCamelCase ( self ): self.config_tester.run_common_tests() def UpperCamelCase ( self ): _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowercase_ ) def UpperCamelCase ( self ): _snake_case ,_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() _snake_case : Tuple = tf.zeros_like(inputs_dict["attention_mask"] ) _snake_case : Tuple = 2 _snake_case : Dict = tf.where( tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , ) _snake_case : Tuple = True _snake_case : Union[str, Any] = self.model_tester.seq_length _snake_case : Union[str, Any] = self.model_tester.encoder_seq_length def check_decoder_attentions_output(lowercase_ ): _snake_case : Optional[Any] = outputs.decoder_attentions self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) def check_encoder_attentions_output(lowercase_ ): _snake_case : int = [t.numpy() for t in outputs.encoder_attentions] _snake_case : Optional[int] = [t.numpy() for t in outputs.encoder_global_attentions] self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers ) self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) self.assertListEqual( list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , ) for model_class in self.all_model_classes: _snake_case : Union[str, Any] = True _snake_case : Dict = False _snake_case : Any = False _snake_case : Any = model_class(lowercase_ ) _snake_case : Union[str, Any] = model(self._prepare_for_class(lowercase_ , lowercase_ ) ) _snake_case : Tuple = len(lowercase_ ) self.assertEqual(config.output_hidden_states , lowercase_ ) check_encoder_attentions_output(lowercase_ ) if self.is_encoder_decoder: _snake_case : int = model_class(lowercase_ ) _snake_case : Union[str, Any] = model(self._prepare_for_class(lowercase_ , lowercase_ ) ) self.assertEqual(config.output_hidden_states , lowercase_ ) check_decoder_attentions_output(lowercase_ ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] _snake_case : List[Any] = True _snake_case : Any = model_class(lowercase_ ) _snake_case : Optional[Any] = model(self._prepare_for_class(lowercase_ , lowercase_ ) ) self.assertEqual(config.output_hidden_states , lowercase_ ) check_encoder_attentions_output(lowercase_ ) # Check attention is always last and order is fine _snake_case : Optional[int] = True _snake_case : Optional[int] = True _snake_case : List[Any] = model_class(lowercase_ ) _snake_case : Union[str, Any] = model(self._prepare_for_class(lowercase_ , lowercase_ ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase_ ) ) self.assertEqual(model.config.output_hidden_states , lowercase_ ) check_encoder_attentions_output(lowercase_ ) @unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." ) def UpperCamelCase ( self ): pass def UpperCamelCase ( self ): # TODO: Head-masking not yet implement pass def snake_case (__lowercase ) -> Optional[Any]: '''simple docstring''' return tf.constant(__lowercase , dtype=tf.intaa ) __SCREAMING_SNAKE_CASE : List[Any] = 1E-4 @slow @require_tf class lowercase_ ( unittest.TestCase ): def UpperCamelCase ( self ): _snake_case : Dict = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led # change to intended input here _snake_case : Union[str, Any] = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) _snake_case : Optional[int] = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) _snake_case : Union[str, Any] = prepare_led_inputs_dict(model.config , lowercase_ , lowercase_ ) _snake_case : Optional[Any] = model(**lowercase_ )[0] _snake_case : str = (1, 1_024, 768) self.assertEqual(output.shape , lowercase_ ) # change to expected output here _snake_case : Optional[Any] = tf.convert_to_tensor( [[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , ) tf.debugging.assert_near(output[:, :3, :3] , lowercase_ , atol=1e-3 ) def UpperCamelCase ( self ): _snake_case : List[Any] = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ) # change to intended input here _snake_case : int = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) _snake_case : int = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) _snake_case : Optional[Any] = prepare_led_inputs_dict(model.config , lowercase_ , lowercase_ ) _snake_case : Tuple = model(**lowercase_ )[0] _snake_case : Any = (1, 1_024, model.config.vocab_size) self.assertEqual(output.shape , lowercase_ ) # change to expected output here _snake_case : Optional[int] = tf.convert_to_tensor( [[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , ) tf.debugging.assert_near(output[:, :3, :3] , lowercase_ , atol=1e-3 , rtol=1e-3 )
670
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available lowerCAmelCase = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = ['GPTSw3Tokenizer'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_swa import GPTSwaTokenizer else: import sys lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
462
import unittest from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __SCREAMING_SNAKE_CASE : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece @require_tokenizers class lowercase_ ( __snake_case , unittest.TestCase ): _lowerCamelCase = ReformerTokenizer _lowerCamelCase = ReformerTokenizerFast _lowerCamelCase = True _lowerCamelCase = False _lowerCamelCase = True def UpperCamelCase ( self ): super().setUp() _snake_case : Union[str, Any] = ReformerTokenizer(lowercase_ , keep_accents=lowercase_ ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase ( self ): _snake_case : int = "<s>" _snake_case : int = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ ) def UpperCamelCase ( self ): _snake_case : str = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<unk>" ) self.assertEqual(vocab_keys[1] , "<s>" ) self.assertEqual(vocab_keys[-1] , "j" ) self.assertEqual(len(lowercase_ ) , 1_000 ) def UpperCamelCase ( self ): self.assertEqual(self.get_tokenizer().vocab_size , 1_000 ) def UpperCamelCase ( self ): if not self.test_rust_tokenizer: return _snake_case : Tuple = self.get_tokenizer() _snake_case : List[str] = self.get_rust_tokenizer() _snake_case : int = "I was born in 92000, and this is falsé." _snake_case : Tuple = tokenizer.tokenize(lowercase_ ) _snake_case : List[Any] = rust_tokenizer.tokenize(lowercase_ ) self.assertListEqual(lowercase_ , lowercase_ ) _snake_case : str = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) _snake_case : Tuple = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) self.assertListEqual(lowercase_ , lowercase_ ) _snake_case : Dict = self.get_rust_tokenizer() _snake_case : List[Any] = tokenizer.encode(lowercase_ ) _snake_case : str = rust_tokenizer.encode(lowercase_ ) self.assertListEqual(lowercase_ , lowercase_ ) def UpperCamelCase ( self , lowercase_=15 ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): _snake_case : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ ) # Simple input _snake_case : List[str] = "This is a simple input" _snake_case : Optional[Any] = ["This is a simple input 1", "This is a simple input 2"] _snake_case : Union[str, Any] = ("This is a simple input", "This is a pair") _snake_case : int = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests self.assertRaises(lowercase_ , tokenizer_r.encode , lowercase_ , max_length=lowercase_ , padding="max_length" ) # Simple input self.assertRaises(lowercase_ , tokenizer_r.encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" ) # Simple input self.assertRaises( lowercase_ , tokenizer_r.batch_encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" , ) # Pair input self.assertRaises(lowercase_ , tokenizer_r.encode , lowercase_ , max_length=lowercase_ , padding="max_length" ) # Pair input self.assertRaises(lowercase_ , tokenizer_r.encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" ) # Pair input self.assertRaises( lowercase_ , tokenizer_r.batch_encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" , ) def UpperCamelCase ( self ): pass def UpperCamelCase ( self ): _snake_case : Dict = ReformerTokenizer(lowercase_ , keep_accents=lowercase_ ) _snake_case : Tuple = tokenizer.tokenize("This is a test" ) self.assertListEqual(lowercase_ , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowercase_ ) , [285, 46, 10, 170, 382] , ) _snake_case : str = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( lowercase_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) _snake_case : Any = tokenizer.convert_tokens_to_ids(lowercase_ ) self.assertListEqual( lowercase_ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) _snake_case : List[Any] = tokenizer.convert_ids_to_tokens(lowercase_ ) self.assertListEqual( lowercase_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) @cached_property def UpperCamelCase ( self ): return ReformerTokenizer.from_pretrained("google/reformer-crime-and-punishment" ) @slow def UpperCamelCase ( self ): _snake_case : int = "Hello World!" _snake_case : Dict = [126, 32, 262, 152, 38, 72, 287] self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) ) @slow def UpperCamelCase ( self ): _snake_case : Optional[int] = ( "This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will" " add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth" ) _snake_case : Dict = [ 108, 265, 24, 111, 4, 258, 156, 35, 28, 275, 3, 259, 297, 260, 84, 4, 35, 110, 44, 8, 259, 91, 268, 21, 11, 209, 274, 109, 266, 277, 117, 86, 93, 315, 258, 278, 258, 277, 258, 0, 258, 288, 258, 319, 258, 0, 258, 0, 258, 0, 258, 0, 258, 287, 258, 315, 258, 289, 258, 278, 99, 269, 266, 262, 8, 259, 241, 4, 217, 230, 268, 266, 55, 168, 106, 75, 193, 266, 223, 27, 49, 26, 282, 25, 264, 299, 19, 26, 0, 258, 277, 117, 86, 93, 176, 183, 270, 11, 262, 42, 61, 265, ] self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) ) @require_torch @slow def UpperCamelCase ( self ): import torch from transformers import ReformerConfig, ReformerModel # Build sequence _snake_case : str = list(self.big_tokenizer.get_vocab().keys() )[:10] _snake_case : str = " ".join(lowercase_ ) _snake_case : Tuple = self.big_tokenizer.encode_plus(lowercase_ , return_tensors="pt" ) _snake_case : Tuple = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="pt" ) _snake_case : int = ReformerConfig() # The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024) _snake_case : Union[str, Any] = encoded_sequence["input_ids"].shape _snake_case : List[str] = ReformerModel(lowercase_ ) # Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**lowercase_ ) model(**lowercase_ ) @slow def UpperCamelCase ( self ): # fmt: off _snake_case : Union[str, Any] = {"input_ids": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # This tokenizer does not know some characters like ")". # That is the reason why we use very simple texts here. # Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064 _snake_case : Tuple = [ "This is a very simple sentence.", "The quick brown fox jumps over the lazy dog.", ] self.tokenizer_integration_test_util( expected_encoding=lowercase_ , model_name="google/reformer-crime-and-punishment" , revision="0e6c3decb8211d49bf881013425dc8b0448b3f5a" , padding=lowercase_ , sequences=lowercase_ , )
670
0
"""simple docstring""" def lowercase__() ->List[str]: """simple docstring""" lowercase__ : Dict= 0 for i in range(1 , 1_001 ): total += i**i return str(__lowercase )[-10:] if __name__ == "__main__": print(solution())
218
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPSegProcessor, ViTImageProcessor @require_vision class lowercase_ ( unittest.TestCase ): def UpperCamelCase ( self ): _snake_case : Any = tempfile.mkdtemp() # fmt: off _snake_case : Optional[Any] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: on _snake_case : Dict = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) ) _snake_case : Dict = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""] _snake_case : Optional[int] = {"unk_token": "<unk>"} _snake_case : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) _snake_case : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(lowercase_ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(lowercase_ ) ) _snake_case : Any = { "do_resize": True, "size": 20, "do_center_crop": True, "crop_size": 18, "do_normalize": True, "image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073], "image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711], } _snake_case : Optional[Any] = os.path.join(self.tmpdirname , lowercase_ ) with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp: json.dump(lowercase_ , lowercase_ ) def UpperCamelCase ( self , **lowercase_ ): return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowercase_ ) def UpperCamelCase ( self , **lowercase_ ): return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowercase_ ) def UpperCamelCase ( self , **lowercase_ ): return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowercase_ ) def UpperCamelCase ( self ): shutil.rmtree(self.tmpdirname ) def UpperCamelCase ( self ): _snake_case : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] _snake_case : Union[str, Any] = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def UpperCamelCase ( self ): _snake_case : Tuple = self.get_tokenizer() _snake_case : Any = self.get_rust_tokenizer() _snake_case : Optional[Any] = self.get_image_processor() _snake_case : Any = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) processor_slow.save_pretrained(self.tmpdirname ) _snake_case : Optional[int] = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase_ ) _snake_case : List[Any] = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) processor_fast.save_pretrained(self.tmpdirname ) _snake_case : Optional[Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , lowercase_ ) self.assertIsInstance(processor_fast.tokenizer , lowercase_ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , lowercase_ ) self.assertIsInstance(processor_fast.image_processor , lowercase_ ) def UpperCamelCase ( self ): _snake_case : List[Any] = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _snake_case : List[Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) _snake_case : Optional[Any] = self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0 ) _snake_case : Tuple = CLIPSegProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowercase_ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , lowercase_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , lowercase_ ) def UpperCamelCase ( self ): _snake_case : Union[str, Any] = self.get_image_processor() _snake_case : Any = self.get_tokenizer() _snake_case : int = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) _snake_case : Optional[int] = self.prepare_image_inputs() _snake_case : Optional[Any] = image_processor(lowercase_ , return_tensors="np" ) _snake_case : str = processor(images=lowercase_ , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def UpperCamelCase ( self ): _snake_case : Optional[Any] = self.get_image_processor() _snake_case : Any = self.get_tokenizer() _snake_case : Dict = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) _snake_case : List[str] = "lower newer" _snake_case : int = processor(text=lowercase_ ) _snake_case : str = tokenizer(lowercase_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def UpperCamelCase ( self ): _snake_case : List[Any] = self.get_image_processor() _snake_case : int = self.get_tokenizer() _snake_case : Tuple = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) _snake_case : List[Any] = "lower newer" _snake_case : int = self.prepare_image_inputs() _snake_case : Dict = processor(text=lowercase_ , images=lowercase_ ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(lowercase_ ): processor() def UpperCamelCase ( self ): _snake_case : Dict = self.get_image_processor() _snake_case : List[str] = self.get_tokenizer() _snake_case : Union[str, Any] = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) _snake_case : Optional[int] = self.prepare_image_inputs() _snake_case : Dict = self.prepare_image_inputs() _snake_case : List[Any] = processor(images=lowercase_ , visual_prompt=lowercase_ ) self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "conditional_pixel_values"] ) # test if it raises when no input is passed with pytest.raises(lowercase_ ): processor() def UpperCamelCase ( self ): _snake_case : Dict = self.get_image_processor() _snake_case : List[Any] = self.get_tokenizer() _snake_case : str = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) _snake_case : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _snake_case : Any = processor.batch_decode(lowercase_ ) _snake_case : Any = tokenizer.batch_decode(lowercase_ ) self.assertListEqual(lowercase_ , lowercase_ )
670
0
'''simple docstring''' import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class lowercase_ ( unittest.TestCase ): """simple docstring""" def lowerCAmelCase_ ( self : Any ): """simple docstring""" _SCREAMING_SNAKE_CASE = inspect.getfile(accelerate.test_utils ) _SCREAMING_SNAKE_CASE = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] ) _SCREAMING_SNAKE_CASE = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] ) _SCREAMING_SNAKE_CASE = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] ) @require_multi_gpu def lowerCAmelCase_ ( self : List[Any] ): """simple docstring""" print(F"""Found {torch.cuda.device_count()} devices.""" ) _SCREAMING_SNAKE_CASE = ["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(lowercase_ , env=os.environ.copy() ) @require_multi_gpu def lowerCAmelCase_ ( self : Optional[Any] ): """simple docstring""" print(F"""Found {torch.cuda.device_count()} devices.""" ) _SCREAMING_SNAKE_CASE = ["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path] print(F"""Command: {cmd}""" ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(lowercase_ , env=os.environ.copy() ) @require_multi_gpu def lowerCAmelCase_ ( self : str ): """simple docstring""" _SCREAMING_SNAKE_CASE = ["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(lowercase_ , env=os.environ.copy() ) @require_multi_gpu def lowerCAmelCase_ ( self : int ): """simple docstring""" print(F"""Found {torch.cuda.device_count()} devices, using 2 devices only""" ) _SCREAMING_SNAKE_CASE = ["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ): execute_subprocess_async(lowercase_ , env=os.environ.copy() ) if __name__ == "__main__": lowerCamelCase_ = Accelerator() lowerCamelCase_ = (accelerator.state.process_index + 2, 10) lowerCamelCase_ = torch.randint(0, 10, shape).to(accelerator.device) lowerCamelCase_ = '' lowerCamelCase_ = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." lowerCamelCase_ = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." lowerCamelCase_ = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
418
from argparse import ArgumentParser from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline from ..utils import logging from . import BaseTransformersCLICommand __SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__) # pylint: disable=invalid-name def snake_case (__lowercase ) -> Any: '''simple docstring''' if not path: return "pipe" for ext in PipelineDataFormat.SUPPORTED_FORMATS: if path.endswith(__lowercase ): return ext raise Exception( F"""Unable to determine file format from file extension {path}. """ F"""Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}""" ) def snake_case (__lowercase ) -> Any: '''simple docstring''' _snake_case : int = pipeline( task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , ) _snake_case : List[Any] = try_infer_format_from_ext(args.input ) if args.format == "infer" else args.format _snake_case : Optional[int] = PipelineDataFormat.from_str( format=__lowercase , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , ) return RunCommand(__lowercase , __lowercase ) class lowercase_ ( __snake_case ): def __init__( self , lowercase_ , lowercase_ ): _snake_case : str = nlp _snake_case : str = reader @staticmethod def UpperCamelCase ( lowercase_ ): _snake_case : Dict = parser.add_parser("run" , help="Run a pipeline through the CLI" ) run_parser.add_argument("--task" , choices=get_supported_tasks() , help="Task to run" ) run_parser.add_argument("--input" , type=lowercase_ , help="Path to the file to use for inference" ) run_parser.add_argument("--output" , type=lowercase_ , help="Path to the file that will be used post to write results." ) run_parser.add_argument("--model" , type=lowercase_ , help="Name or path to the model to instantiate." ) run_parser.add_argument("--config" , type=lowercase_ , help="Name or path to the model's config to instantiate." ) run_parser.add_argument( "--tokenizer" , type=lowercase_ , help="Name of the tokenizer to use. (default: same as the model name)" ) run_parser.add_argument( "--column" , type=lowercase_ , help="Name of the column to use as input. (For multi columns input as QA use column1,columns2)" , ) run_parser.add_argument( "--format" , type=lowercase_ , default="infer" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="Input format to read from" , ) run_parser.add_argument( "--device" , type=lowercase_ , default=-1 , help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" , ) run_parser.add_argument("--overwrite" , action="store_true" , help="Allow overwriting the output file." ) run_parser.set_defaults(func=lowercase_ ) def UpperCamelCase ( self ): _snake_case ,_snake_case : Tuple = self._nlp, [] for entry in self._reader: _snake_case : Optional[Any] = nlp(**lowercase_ ) if self._reader.is_multi_columns else nlp(lowercase_ ) if isinstance(lowercase_ , lowercase_ ): outputs.append(lowercase_ ) else: outputs += output # Saving data if self._nlp.binary_output: _snake_case : str = self._reader.save_binary(lowercase_ ) logger.warning(f"""Current pipeline requires output to be in binary format, saving at {binary_path}""" ) else: self._reader.save(lowercase_ )
670
0
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class lowercase : """simple docstring""" def __init__( self : Optional[int] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[Any]=2 , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : int=False , __UpperCAmelCase : Optional[Any]=10 , __UpperCAmelCase : List[str]=3 , __UpperCAmelCase : List[str]=32 * 4 , __UpperCAmelCase : List[str]=32 * 6 , __UpperCAmelCase : Union[str, Any]=4 , __UpperCAmelCase : int=32 , ) -> List[str]: UpperCAmelCase_= parent UpperCAmelCase_= batch_size UpperCAmelCase_= is_training UpperCAmelCase_= use_auxiliary_loss UpperCAmelCase_= num_queries UpperCAmelCase_= num_channels UpperCAmelCase_= min_size UpperCAmelCase_= max_size UpperCAmelCase_= num_labels UpperCAmelCase_= mask_feature_size def _SCREAMING_SNAKE_CASE ( self : Any ) -> Any: UpperCAmelCase_= floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( lowercase_ ) UpperCAmelCase_= torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowercase_ ) UpperCAmelCase_= ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowercase_ ) > 0.5 ).float() UpperCAmelCase_= (torch.rand((self.batch_size, self.num_labels) , device=lowercase_ ) > 0.5).long() UpperCAmelCase_= self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]: return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]: UpperCAmelCase_= self.prepare_config_and_inputs() UpperCAmelCase_= {"pixel_values": pixel_values, "pixel_mask": pixel_mask} return config, inputs_dict def _SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : int , __UpperCAmelCase : Any ) -> Any: UpperCAmelCase_= output.encoder_hidden_states UpperCAmelCase_= output.pixel_decoder_hidden_states UpperCAmelCase_= output.transformer_decoder_hidden_states self.parent.assertTrue(len(lowercase_ ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(lowercase_ ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(lowercase_ ) , config.decoder_config.decoder_layers ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Union[str, Any]=False ) -> List[str]: with torch.no_grad(): UpperCAmelCase_= MaskFormerModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() UpperCAmelCase_= model(pixel_values=lowercase_ , pixel_mask=lowercase_ ) UpperCAmelCase_= model(lowercase_ , output_hidden_states=lowercase_ ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(lowercase_ , lowercase_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int ) -> Optional[int]: UpperCAmelCase_= MaskFormerForInstanceSegmentation(config=lowercase_ ) model.to(lowercase_ ) model.eval() def comm_check_on_output(__UpperCAmelCase : int ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): UpperCAmelCase_= model(pixel_values=lowercase_ , pixel_mask=lowercase_ ) UpperCAmelCase_= model(lowercase_ ) comm_check_on_output(lowercase_ ) UpperCAmelCase_= model( pixel_values=lowercase_ , pixel_mask=lowercase_ , mask_labels=lowercase_ , class_labels=lowercase_ ) comm_check_on_output(lowercase_ ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class lowercase ( __snake_case , __snake_case , unittest.TestCase): """simple docstring""" a__ : str = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () a__ : List[Any] = ( {"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) a__ : Dict = False a__ : Optional[int] = False a__ : Any = False a__ : str = False def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str: UpperCAmelCase_= MaskFormerModelTester(self ) UpperCAmelCase_= ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]: self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : int ) -> str: UpperCAmelCase_= self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(lowercase_ , **lowercase_ , output_hidden_states=lowercase_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]: UpperCAmelCase_= self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*lowercase_ ) @unittest.skip(reason="""MaskFormer does not use inputs_embeds""" ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]: pass @unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str: pass @unittest.skip(reason="""MaskFormer is not a generative model""" ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]: pass @unittest.skip(reason="""MaskFormer does not use token embeddings""" ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: pass @require_torch_multi_gpu @unittest.skip( reason="""MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" ) def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]: pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict: pass def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]: UpperCAmelCase_= self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_= model_class(lowercase_ ) UpperCAmelCase_= inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_= [*signature.parameters.keys()] UpperCAmelCase_= ["pixel_values"] self.assertListEqual(arg_names[:1] , lowercase_ ) @slow def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]: for model_name in ["facebook/maskformer-swin-small-coco"]: UpperCAmelCase_= MaskFormerModel.from_pretrained(lowercase_ ) self.assertIsNotNone(lowercase_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict: UpperCAmelCase_= (self.model_tester.min_size,) * 2 UpperCAmelCase_= { "pixel_values": torch.randn((2, 3, *size) , device=lowercase_ ), "mask_labels": torch.randn((2, 10, *size) , device=lowercase_ ), "class_labels": torch.zeros(2 , 10 , device=lowercase_ ).long(), } UpperCAmelCase_= MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(lowercase_ ) UpperCAmelCase_= model(**lowercase_ ) self.assertTrue(outputs.loss is not None ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]: UpperCAmelCase_= self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(lowercase_ , **lowercase_ , output_hidden_states=lowercase_ ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> str: UpperCAmelCase_= self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_= model_class(lowercase_ ).to(lowercase_ ) UpperCAmelCase_= model(**lowercase_ , output_attentions=lowercase_ ) self.assertTrue(outputs.attentions is not None ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]: if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss UpperCAmelCase_= self.all_model_classes[1] UpperCAmelCase_= self.model_tester.prepare_config_and_inputs() UpperCAmelCase_= model_class(lowercase_ ) model.to(lowercase_ ) model.train() UpperCAmelCase_= model(lowercase_ , mask_labels=lowercase_ , class_labels=lowercase_ ).loss loss.backward() def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]: # only MaskFormerForInstanceSegmentation has the loss UpperCAmelCase_= self.all_model_classes[1] UpperCAmelCase_= self.model_tester.prepare_config_and_inputs() UpperCAmelCase_= True UpperCAmelCase_= True UpperCAmelCase_= model_class(lowercase_ ) model.to(lowercase_ ) model.train() UpperCAmelCase_= model(lowercase_ , mask_labels=lowercase_ , class_labels=lowercase_ ) UpperCAmelCase_= outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() UpperCAmelCase_= outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't UpperCAmelCase_= outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() UpperCAmelCase_= outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=lowercase_ ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) __A = 1E-4 def __a ( ) -> List[Any]: '''simple docstring''' UpperCAmelCase_= Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_vision @slow class lowercase ( unittest.TestCase): """simple docstring""" @cached_property def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]: return ( MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" ) if is_vision_available() else None ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]: UpperCAmelCase_= MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(lowercase_ ) UpperCAmelCase_= self.default_image_processor UpperCAmelCase_= prepare_img() UpperCAmelCase_= image_processor(lowercase_ , return_tensors="""pt""" ).to(lowercase_ ) UpperCAmelCase_= inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(lowercase_ , (1, 3, 800, 1_088) ) with torch.no_grad(): UpperCAmelCase_= model(**lowercase_ ) UpperCAmelCase_= torch.tensor( [[-0.0_482, 0.9_228, 0.4_951], [-0.2_547, 0.8_017, 0.8_527], [-0.0_069, 0.3_385, -0.0_089]] ).to(lowercase_ ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowercase_ , atol=lowercase_ ) ) UpperCAmelCase_= torch.tensor( [[-0.8_422, -0.8_434, -0.9_718], [-1.0_144, -0.5_565, -0.4_195], [-1.0_038, -0.4_484, -0.1_961]] ).to(lowercase_ ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowercase_ , atol=lowercase_ ) ) UpperCAmelCase_= torch.tensor( [[0.2_852, -0.0_159, 0.9_735], [0.6_254, 0.1_858, 0.8_529], [-0.0_680, -0.4_116, 1.8_413]] ).to(lowercase_ ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowercase_ , atol=lowercase_ ) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]: UpperCAmelCase_= ( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" ) .to(lowercase_ ) .eval() ) UpperCAmelCase_= self.default_image_processor UpperCAmelCase_= prepare_img() UpperCAmelCase_= image_processor(lowercase_ , return_tensors="""pt""" ).to(lowercase_ ) UpperCAmelCase_= inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(lowercase_ , (1, 3, 800, 1_088) ) with torch.no_grad(): UpperCAmelCase_= model(**lowercase_ ) # masks_queries_logits UpperCAmelCase_= outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) UpperCAmelCase_= [ [-1.3_737_124, -1.7_724_937, -1.9_364_233], [-1.5_977_281, -1.9_867_939, -2.1_523_695], [-1.5_795_398, -1.9_269_832, -2.093_942], ] UpperCAmelCase_= torch.tensor(lowercase_ ).to(lowercase_ ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowercase_ , atol=lowercase_ ) ) # class_queries_logits UpperCAmelCase_= outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) UpperCAmelCase_= torch.tensor( [ [1.6_512E00, -5.2_572E00, -3.3_519E00], [3.6_169E-02, -5.9_025E00, -2.9_313E00], [1.0_766E-04, -7.7_630E00, -5.1_263E00], ] ).to(lowercase_ ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowercase_ , atol=lowercase_ ) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict: UpperCAmelCase_= ( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" ) .to(lowercase_ ) .eval() ) UpperCAmelCase_= self.default_image_processor UpperCAmelCase_= prepare_img() UpperCAmelCase_= image_processor(lowercase_ , return_tensors="""pt""" ).to(lowercase_ ) UpperCAmelCase_= inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(lowercase_ , (1, 3, 800, 1_088) ) with torch.no_grad(): UpperCAmelCase_= model(**lowercase_ ) # masks_queries_logits UpperCAmelCase_= outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) UpperCAmelCase_= [[-0.9_046, -2.6_366, -4.6_062], [-3.4_179, -5.7_890, -8.8_057], [-4.9_179, -7.6_560, -10.7_711]] UpperCAmelCase_= torch.tensor(lowercase_ ).to(lowercase_ ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowercase_ , atol=lowercase_ ) ) # class_queries_logits UpperCAmelCase_= outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) UpperCAmelCase_= torch.tensor( [[4.7_188, -3.2_585, -2.8_857], [6.6_871, -2.9_181, -1.2_487], [7.2_449, -2.2_764, -2.1_874]] ).to(lowercase_ ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowercase_ , atol=lowercase_ ) ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict: UpperCAmelCase_= ( MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" ) .to(lowercase_ ) .eval() ) UpperCAmelCase_= self.default_image_processor UpperCAmelCase_= image_processor( [np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="""pt""" , ) UpperCAmelCase_= inputs["pixel_values"].to(lowercase_ ) UpperCAmelCase_= [el.to(lowercase_ ) for el in inputs["mask_labels"]] UpperCAmelCase_= [el.to(lowercase_ ) for el in inputs["class_labels"]] with torch.no_grad(): UpperCAmelCase_= model(**lowercase_ ) self.assertTrue(outputs.loss is not None )
593
import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from ...models.controlnet import ControlNetModel, ControlNetOutput from ...models.modeling_utils import ModelMixin from ...utils import logging __SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__) class lowercase_ ( __snake_case ): def __init__( self , lowercase_ ): super().__init__() _snake_case : List[str] = nn.ModuleList(lowercase_ ) def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = False , lowercase_ = True , ): for i, (image, scale, controlnet) in enumerate(zip(lowercase_ , lowercase_ , self.nets ) ): _snake_case ,_snake_case : Optional[int] = controlnet( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) # merge samples if i == 0: _snake_case ,_snake_case : Tuple = down_samples, mid_sample else: _snake_case : Tuple = [ samples_prev + samples_curr for samples_prev, samples_curr in zip(lowercase_ , lowercase_ ) ] mid_block_res_sample += mid_sample return down_block_res_samples, mid_block_res_sample def UpperCamelCase ( self , lowercase_ , lowercase_ = True , lowercase_ = None , lowercase_ = False , lowercase_ = None , ): _snake_case : Tuple = 0 _snake_case : Dict = save_directory for controlnet in self.nets: controlnet.save_pretrained( lowercase_ , is_main_process=lowercase_ , save_function=lowercase_ , safe_serialization=lowercase_ , variant=lowercase_ , ) idx += 1 _snake_case : int = model_path_to_save + f"""_{idx}""" @classmethod def UpperCamelCase ( cls , lowercase_ , **lowercase_ ): _snake_case : List[str] = 0 _snake_case : Optional[Any] = [] # load controlnet and append to list until no controlnet directory exists anymore # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... _snake_case : Optional[Any] = pretrained_model_path while os.path.isdir(lowercase_ ): _snake_case : int = ControlNetModel.from_pretrained(lowercase_ , **lowercase_ ) controlnets.append(lowercase_ ) idx += 1 _snake_case : str = pretrained_model_path + f"""_{idx}""" logger.info(f"""{len(lowercase_ )} controlnets loaded from {pretrained_model_path}.""" ) if len(lowercase_ ) == 0: raise ValueError( f"""No ControlNets found under {os.path.dirname(lowercase_ )}. Expected at least {pretrained_model_path + '_0'}.""" ) return cls(lowercase_ )
670
0
from collections import defaultdict def UpperCAmelCase ( UpperCAmelCase )-> int: '''simple docstring''' SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = True for v in tree[start]: if v not in visited: ret += dfs(__lowercase ) if ret % 2 == 0: cuts.append(__lowercase ) return ret def UpperCAmelCase ( )-> Union[str, Any]: '''simple docstring''' dfs(1 ) if __name__ == "__main__": A_ = 1_0, 9 A_ = defaultdict(list) A_ = {} A_ = [] A_ = 0 A_ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (1_0, 8)] for u, v in edges: tree[u].append(v) tree[v].append(u) even_tree() print(len(cuts) - 1)
393
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowercase_ ( __snake_case ): _lowerCamelCase = ['image_processor', 'tokenizer'] _lowerCamelCase = 'CLIPImageProcessor' _lowerCamelCase = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast') def __init__( self , lowercase_=None , lowercase_=None , **lowercase_ ): _snake_case : Optional[Any] = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , lowercase_ , ) _snake_case : Dict = kwargs.pop("feature_extractor" ) _snake_case : Any = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(lowercase_ , lowercase_ ) def __call__( self , lowercase_=None , lowercase_=None , lowercase_=None , **lowercase_ ): if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be none." ) if text is not None: _snake_case : str = self.tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_ ) if images is not None: _snake_case : List[str] = self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_ ) if text is not None and images is not None: _snake_case : Tuple = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**lowercase_ ) , tensor_type=lowercase_ ) def UpperCamelCase ( self , *lowercase_ , **lowercase_ ): return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ ) def UpperCamelCase ( self , *lowercase_ , **lowercase_ ): return self.tokenizer.decode(*lowercase_ , **lowercase_ ) @property def UpperCamelCase ( self ): _snake_case : Any = self.tokenizer.model_input_names _snake_case : int = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
670
0
'''simple docstring''' import unittest from diffusers.models.unet_ad_blocks import * # noqa F403 from diffusers.utils import torch_device from .test_unet_blocks_common import UNetBlockTesterMixin class UpperCAmelCase ( __snake_case , unittest.TestCase ): _A : Optional[int] = DownBlockaD # noqa F405 _A : Optional[Any] = """down""" def __lowerCamelCase ( self ): __UpperCAmelCase = [-0.0_2_3_2, -0.9_8_6_9, 0.8_0_5_4, -0.0_6_3_7, -0.1_6_8_8, -1.4_2_6_4, 0.4_4_7_0, -1.3_3_9_4, 0.0_9_0_4] super().test_output(lowercase_ ) class UpperCAmelCase ( __snake_case , unittest.TestCase ): _A : int = ResnetDownsampleBlockaD # noqa F405 _A : int = """down""" def __lowerCamelCase ( self ): __UpperCAmelCase = [0.0_7_1_0, 0.2_4_1_0, -0.7_3_2_0, -1.0_7_5_7, -1.1_3_4_3, 0.3_5_4_0, -0.0_1_3_3, -0.2_5_7_6, 0.0_9_4_8] super().test_output(lowercase_ ) class UpperCAmelCase ( __snake_case , unittest.TestCase ): _A : List[Any] = AttnDownBlockaD # noqa F405 _A : str = """down""" def __lowerCamelCase ( self ): __UpperCAmelCase = [0.0_6_3_6, 0.8_9_6_4, -0.6_2_3_4, -1.0_1_3_1, 0.0_8_4_4, 0.4_9_3_5, 0.3_4_3_7, 0.0_9_1_1, -0.2_9_5_7] super().test_output(lowercase_ ) class UpperCAmelCase ( __snake_case , unittest.TestCase ): _A : Optional[int] = CrossAttnDownBlockaD # noqa F405 _A : int = """down""" def __lowerCamelCase ( self ): __UpperCAmelCase = super().prepare_init_args_and_inputs_for_common() __UpperCAmelCase = 32 return init_dict, inputs_dict def __lowerCamelCase ( self ): __UpperCAmelCase = [0.2_2_3_8, -0.7_3_9_6, -0.2_2_5_5, -0.3_8_2_9, 0.1_9_2_5, 1.1_6_6_5, 0.0_6_0_3, -0.7_2_9_5, 0.1_9_8_3] super().test_output(lowercase_ ) class UpperCAmelCase ( __snake_case , unittest.TestCase ): _A : Optional[int] = SimpleCrossAttnDownBlockaD # noqa F405 _A : List[str] = """down""" @property def __lowerCamelCase ( self ): return super().get_dummy_input(include_encoder_hidden_states=lowercase_ ) def __lowerCamelCase ( self ): __UpperCAmelCase = super().prepare_init_args_and_inputs_for_common() __UpperCAmelCase = 32 return init_dict, inputs_dict @unittest.skipIf(torch_device == 'mps' , 'MPS result is not consistent' ) def __lowerCamelCase ( self ): __UpperCAmelCase = [0.7_9_2_1, -0.0_9_9_2, -0.1_9_6_2, -0.7_6_9_5, -0.4_2_4_2, 0.7_8_0_4, 0.4_7_3_7, 0.2_7_6_5, 0.3_3_3_8] super().test_output(lowercase_ ) class UpperCAmelCase ( __snake_case , unittest.TestCase ): _A : List[str] = SkipDownBlockaD # noqa F405 _A : Tuple = """down""" @property def __lowerCamelCase ( self ): return super().get_dummy_input(include_skip_sample=lowercase_ ) def __lowerCamelCase ( self ): __UpperCAmelCase = [-0.0_8_4_5, -0.2_0_8_7, -0.2_4_6_5, 0.0_9_7_1, 0.1_9_0_0, -0.0_4_8_4, 0.2_6_6_4, 0.4_1_7_9, 0.5_0_6_9] super().test_output(lowercase_ ) class UpperCAmelCase ( __snake_case , unittest.TestCase ): _A : str = AttnSkipDownBlockaD # noqa F405 _A : List[str] = """down""" @property def __lowerCamelCase ( self ): return super().get_dummy_input(include_skip_sample=lowercase_ ) def __lowerCamelCase ( self ): __UpperCAmelCase = [0.5_5_3_9, 0.1_6_0_9, 0.4_9_2_4, 0.0_5_3_7, -0.1_9_9_5, 0.4_0_5_0, 0.0_9_7_9, -0.2_7_2_1, -0.0_6_4_2] super().test_output(lowercase_ ) class UpperCAmelCase ( __snake_case , unittest.TestCase ): _A : Tuple = DownEncoderBlockaD # noqa F405 _A : Optional[Any] = """down""" @property def __lowerCamelCase ( self ): return super().get_dummy_input(include_temb=lowercase_ ) def __lowerCamelCase ( self ): __UpperCAmelCase = { "in_channels": 32, "out_channels": 32, } __UpperCAmelCase = self.dummy_input return init_dict, inputs_dict def __lowerCamelCase ( self ): __UpperCAmelCase = [1.1_1_0_2, 0.5_3_0_2, 0.4_8_7_2, -0.0_0_2_3, -0.8_0_4_2, 0.0_4_8_3, -0.3_4_8_9, -0.5_6_3_2, 0.7_6_2_6] super().test_output(lowercase_ ) class UpperCAmelCase ( __snake_case , unittest.TestCase ): _A : Union[str, Any] = AttnDownEncoderBlockaD # noqa F405 _A : Any = """down""" @property def __lowerCamelCase ( self ): return super().get_dummy_input(include_temb=lowercase_ ) def __lowerCamelCase ( self ): __UpperCAmelCase = { "in_channels": 32, "out_channels": 32, } __UpperCAmelCase = self.dummy_input return init_dict, inputs_dict def __lowerCamelCase ( self ): __UpperCAmelCase = [0.8_9_6_6, -0.1_4_8_6, 0.8_5_6_8, 0.8_1_4_1, -0.9_0_4_6, -0.1_3_4_2, -0.0_9_7_2, -0.7_4_1_7, 0.1_5_3_8] super().test_output(lowercase_ ) class UpperCAmelCase ( __snake_case , unittest.TestCase ): _A : str = UNetMidBlockaD # noqa F405 _A : Any = """mid""" def __lowerCamelCase ( self ): __UpperCAmelCase = { "in_channels": 32, "temb_channels": 128, } __UpperCAmelCase = self.dummy_input return init_dict, inputs_dict def __lowerCamelCase ( self ): __UpperCAmelCase = [-0.1_0_6_2, 1.7_2_4_8, 0.3_4_9_4, 1.4_5_6_9, -0.0_9_1_0, -1.2_4_2_1, -0.9_9_8_4, 0.6_7_3_6, 1.0_0_2_8] super().test_output(lowercase_ ) class UpperCAmelCase ( __snake_case , unittest.TestCase ): _A : Dict = UNetMidBlockaDCrossAttn # noqa F405 _A : int = """mid""" def __lowerCamelCase ( self ): __UpperCAmelCase = super().prepare_init_args_and_inputs_for_common() __UpperCAmelCase = 32 return init_dict, inputs_dict def __lowerCamelCase ( self ): __UpperCAmelCase = [0.0_1_8_7, 2.4_2_2_0, 0.4_4_8_4, 1.1_2_0_3, -0.6_1_2_1, -1.5_1_2_2, -0.8_2_7_0, 0.7_8_5_1, 1.8_3_3_5] super().test_output(lowercase_ ) class UpperCAmelCase ( __snake_case , unittest.TestCase ): _A : Tuple = UNetMidBlockaDSimpleCrossAttn # noqa F405 _A : List[Any] = """mid""" @property def __lowerCamelCase ( self ): return super().get_dummy_input(include_encoder_hidden_states=lowercase_ ) def __lowerCamelCase ( self ): __UpperCAmelCase = super().prepare_init_args_and_inputs_for_common() __UpperCAmelCase = 32 return init_dict, inputs_dict def __lowerCamelCase ( self ): __UpperCAmelCase = [0.7_1_4_3, 1.9_9_7_4, 0.5_4_4_8, 1.3_9_7_7, 0.1_2_8_2, -1.1_2_3_7, -1.4_2_3_8, 0.5_5_3_0, 0.8_8_8_0] super().test_output(lowercase_ ) class UpperCAmelCase ( __snake_case , unittest.TestCase ): _A : str = UpBlockaD # noqa F405 _A : Dict = """up""" @property def __lowerCamelCase ( self ): return super().get_dummy_input(include_res_hidden_states_tuple=lowercase_ ) def __lowerCamelCase ( self ): __UpperCAmelCase = [-0.2_0_4_1, -0.4_1_6_5, -0.3_0_2_2, 0.0_0_4_1, -0.6_6_2_8, -0.7_0_5_3, 0.1_9_2_8, -0.0_3_2_5, 0.0_5_2_3] super().test_output(lowercase_ ) class UpperCAmelCase ( __snake_case , unittest.TestCase ): _A : List[Any] = ResnetUpsampleBlockaD # noqa F405 _A : Optional[Any] = """up""" @property def __lowerCamelCase ( self ): return super().get_dummy_input(include_res_hidden_states_tuple=lowercase_ ) def __lowerCamelCase ( self ): __UpperCAmelCase = [0.2_2_8_7, 0.3_5_4_9, -0.1_3_4_6, 0.4_7_9_7, -0.1_7_1_5, -0.9_6_4_9, 0.7_3_0_5, -0.5_8_6_4, -0.6_2_4_4] super().test_output(lowercase_ ) class UpperCAmelCase ( __snake_case , unittest.TestCase ): _A : Optional[int] = CrossAttnUpBlockaD # noqa F405 _A : str = """up""" @property def __lowerCamelCase ( self ): return super().get_dummy_input(include_res_hidden_states_tuple=lowercase_ ) def __lowerCamelCase ( self ): __UpperCAmelCase = super().prepare_init_args_and_inputs_for_common() __UpperCAmelCase = 32 return init_dict, inputs_dict def __lowerCamelCase ( self ): __UpperCAmelCase = [-0.1_4_0_3, -0.3_5_1_5, -0.0_4_2_0, -0.1_4_2_5, 0.3_1_6_7, 0.5_0_9_4, -0.2_1_8_1, 0.5_9_3_1, 0.5_5_8_2] super().test_output(lowercase_ ) class UpperCAmelCase ( __snake_case , unittest.TestCase ): _A : Optional[Any] = SimpleCrossAttnUpBlockaD # noqa F405 _A : List[Any] = """up""" @property def __lowerCamelCase ( self ): return super().get_dummy_input(include_res_hidden_states_tuple=lowercase_ , include_encoder_hidden_states=lowercase_ ) def __lowerCamelCase ( self ): __UpperCAmelCase = super().prepare_init_args_and_inputs_for_common() __UpperCAmelCase = 32 return init_dict, inputs_dict def __lowerCamelCase ( self ): __UpperCAmelCase = [0.2_6_4_5, 0.1_4_8_0, 0.0_9_0_9, 0.8_0_4_4, -0.9_7_5_8, -0.9_0_8_3, 0.0_9_9_4, -1.1_4_5_3, -0.7_4_0_2] super().test_output(lowercase_ ) class UpperCAmelCase ( __snake_case , unittest.TestCase ): _A : Any = AttnUpBlockaD # noqa F405 _A : List[Any] = """up""" @property def __lowerCamelCase ( self ): return super().get_dummy_input(include_res_hidden_states_tuple=lowercase_ ) @unittest.skipIf(torch_device == 'mps' , 'MPS result is not consistent' ) def __lowerCamelCase ( self ): __UpperCAmelCase = [0.0_9_7_9, 0.1_3_2_6, 0.0_0_2_1, 0.0_6_5_9, 0.2_2_4_9, 0.0_0_5_9, 0.1_1_3_2, 0.5_9_5_2, 0.1_0_3_3] super().test_output(lowercase_ ) class UpperCAmelCase ( __snake_case , unittest.TestCase ): _A : Union[str, Any] = SkipUpBlockaD # noqa F405 _A : Any = """up""" @property def __lowerCamelCase ( self ): return super().get_dummy_input(include_res_hidden_states_tuple=lowercase_ ) def __lowerCamelCase ( self ): __UpperCAmelCase = [-0.0_8_9_3, -0.1_2_3_4, -0.1_5_0_6, -0.0_3_3_2, 0.0_1_2_3, -0.0_2_1_1, 0.0_5_6_6, 0.0_1_4_3, 0.0_3_6_2] super().test_output(lowercase_ ) class UpperCAmelCase ( __snake_case , unittest.TestCase ): _A : Union[str, Any] = AttnSkipUpBlockaD # noqa F405 _A : Optional[int] = """up""" @property def __lowerCamelCase ( self ): return super().get_dummy_input(include_res_hidden_states_tuple=lowercase_ ) def __lowerCamelCase ( self ): __UpperCAmelCase = [0.0_3_6_1, 0.0_6_1_7, 0.2_7_8_7, -0.0_3_5_0, 0.0_3_4_2, 0.3_4_2_1, -0.0_8_4_3, 0.0_9_1_3, 0.3_0_1_5] super().test_output(lowercase_ ) class UpperCAmelCase ( __snake_case , unittest.TestCase ): _A : Tuple = UpDecoderBlockaD # noqa F405 _A : List[str] = """up""" @property def __lowerCamelCase ( self ): return super().get_dummy_input(include_temb=lowercase_ ) def __lowerCamelCase ( self ): __UpperCAmelCase = {"in_channels": 32, "out_channels": 32} __UpperCAmelCase = self.dummy_input return init_dict, inputs_dict def __lowerCamelCase ( self ): __UpperCAmelCase = [0.4_4_0_4, 0.1_9_9_8, -0.9_8_8_6, -0.3_3_2_0, -0.3_1_2_8, -0.7_0_3_4, -0.6_9_5_5, -0.2_3_3_8, -0.3_1_3_7] super().test_output(lowercase_ ) class UpperCAmelCase ( __snake_case , unittest.TestCase ): _A : str = AttnUpDecoderBlockaD # noqa F405 _A : Optional[int] = """up""" @property def __lowerCamelCase ( self ): return super().get_dummy_input(include_temb=lowercase_ ) def __lowerCamelCase ( self ): __UpperCAmelCase = {"in_channels": 32, "out_channels": 32} __UpperCAmelCase = self.dummy_input return init_dict, inputs_dict def __lowerCamelCase ( self ): __UpperCAmelCase = [0.6_7_3_8, 0.4_4_9_1, 0.1_0_5_5, 1.0_7_1_0, 0.7_3_1_6, 0.3_3_3_9, 0.3_3_5_2, 0.1_0_2_3, 0.3_5_6_8] super().test_output(lowercase_ )
126
from __future__ import annotations def snake_case (__lowercase , __lowercase , __lowercase ) -> dict[str, float]: '''simple docstring''' if (voltage, current, resistance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if resistance < 0: raise ValueError("Resistance cannot be negative" ) if voltage == 0: return {"voltage": float(current * resistance )} elif current == 0: return {"current": voltage / resistance} elif resistance == 0: return {"resistance": voltage / current} else: raise ValueError("Exactly one argument must be 0" ) if __name__ == "__main__": import doctest doctest.testmod()
670
0
"""simple docstring""" def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" if not grid or not grid[0]: raise TypeError("The grid does not contain the appropriate information" ) for cell_n in range(1 , len(grid[0] ) ): grid[0][cell_n] += grid[0][cell_n - 1] __snake_case = grid[0] for row_n in range(1 , len(__lowercase ) ): __snake_case = grid[row_n] __snake_case = fill_row(__lowercase , __lowercase ) __snake_case = grid[row_n] return grid[-1][-1] def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> list: """simple docstring""" current_row[0] += row_above[0] for cell_n in range(1 , len(__lowercase ) ): current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] ) return current_row if __name__ == "__main__": import doctest doctest.testmod()
163
import functools import gc import inspect import torch from .imports import is_npu_available, is_xpu_available def snake_case (*__lowercase ) -> Dict: '''simple docstring''' if not isinstance(__lowercase , __lowercase ): _snake_case : Dict = list(__lowercase ) for i in range(len(__lowercase ) ): _snake_case : List[str] = None gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() return objects def snake_case (__lowercase ) -> bool: '''simple docstring''' _snake_case : str = [ "CUDA out of memory.", # CUDA OOM "cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.", # CUDNN SNAFU "DefaultCPUAllocator: can't allocate memory", # CPU OOM ] if isinstance(__lowercase , __lowercase ) and len(exception.args ) == 1: return any(err in exception.args[0] for err in _statements ) return False def snake_case (__lowercase = None , __lowercase = 128 ) -> Any: '''simple docstring''' if function is None: return functools.partial(__lowercase , starting_batch_size=__lowercase ) _snake_case : List[str] = starting_batch_size def decorator(*__lowercase , **__lowercase ): nonlocal batch_size gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() _snake_case : Optional[Any] = list(inspect.signature(__lowercase ).parameters.keys() ) # Guard against user error if len(__lowercase ) < (len(__lowercase ) + 1): _snake_case : str = ", ".join([F"""{arg}={value}""" for arg, value in zip(params[1:] , args[1:] )] ) raise TypeError( F"""Batch size was passed into `{function.__name__}` as the first argument when called.""" F"""Remove this as the decorator already does so: `{function.__name__}({arg_str})`""" ) while True: if batch_size == 0: raise RuntimeError("No executable batch size found, reached zero." ) try: return function(__lowercase , *__lowercase , **__lowercase ) except Exception as e: if should_reduce_batch_size(__lowercase ): gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() batch_size //= 2 else: raise return decorator
670
0
import argparse import collections import os import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_table.py _lowerCAmelCase = 'src/transformers' _lowerCAmelCase = 'docs/source/en' _lowerCAmelCase = '.' def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ): '''simple docstring''' with open(__lowercase ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f: A_ : List[Any] = f.readlines() # Find the start prompt. A_ : Optional[int] = 0 while not lines[start_index].startswith(__lowercase ): start_index += 1 start_index += 1 A_ : Optional[Any] = start_index while not lines[end_index].startswith(__lowercase ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # Add here suffixes that are used to identify models, separated by | _lowerCAmelCase = 'Model|Encoder|Decoder|ForConditionalGeneration' # Regexes that match TF/Flax/PT model names. _lowerCAmelCase = re.compile(R"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""") _lowerCAmelCase = re.compile(R"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""") # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. _lowerCAmelCase = re.compile(R"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""") # This is to make sure the transformers module imported is the one in the repo. _lowerCAmelCase = direct_transformers_import(TRANSFORMERS_PATH) def _lowerCAmelCase ( _lowerCAmelCase ): '''simple docstring''' A_ : Optional[Any] = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" ,__lowercase ) return [m.group(0 ) for m in matches] def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ): '''simple docstring''' A_ : Any = 2 if text == "✅" or text == "❌" else len(__lowercase ) A_ : List[str] = (width - text_length) // 2 A_ : Union[str, Any] = width - text_length - left_indent return " " * left_indent + text + " " * right_indent def _lowerCAmelCase ( ): '''simple docstring''' A_ : int = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES A_ : Tuple = { name: config_maping_names[code] for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if code in config_maping_names } A_ : Tuple = {name: config.replace("""Config""" ,"""""" ) for name, config in model_name_to_config.items()} # Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax. A_ : Optional[int] = collections.defaultdict(__lowercase ) A_ : Optional[Any] = collections.defaultdict(__lowercase ) A_ : Union[str, Any] = collections.defaultdict(__lowercase ) A_ : str = collections.defaultdict(__lowercase ) A_ : int = collections.defaultdict(__lowercase ) # Let's lookup through all transformers object (once). for attr_name in dir(__lowercase ): A_ : str = None if attr_name.endswith("""Tokenizer""" ): A_ : Union[str, Any] = slow_tokenizers A_ : str = attr_name[:-9] elif attr_name.endswith("""TokenizerFast""" ): A_ : Dict = fast_tokenizers A_ : Optional[int] = attr_name[:-1_3] elif _re_tf_models.match(__lowercase ) is not None: A_ : str = tf_models A_ : int = _re_tf_models.match(__lowercase ).groups()[0] elif _re_flax_models.match(__lowercase ) is not None: A_ : List[str] = flax_models A_ : Optional[int] = _re_flax_models.match(__lowercase ).groups()[0] elif _re_pt_models.match(__lowercase ) is not None: A_ : Dict = pt_models A_ : Dict = _re_pt_models.match(__lowercase ).groups()[0] if lookup_dict is not None: while len(__lowercase ) > 0: if attr_name in model_name_to_prefix.values(): A_ : Dict = True break # Try again after removing the last word in the name A_ : int = "".join(camel_case_split(__lowercase )[:-1] ) # Let's build that table! A_ : List[str] = list(model_name_to_config.keys() ) model_names.sort(key=str.lower ) A_ : List[str] = ["Model", "Tokenizer slow", "Tokenizer fast", "PyTorch support", "TensorFlow support", "Flax Support"] # We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side). A_ : str = [len(__lowercase ) + 2 for c in columns] A_ : Optional[Any] = max([len(__lowercase ) for name in model_names] ) + 2 # Build the table per se A_ : str = "|" + "|".join([_center_text(__lowercase ,__lowercase ) for c, w in zip(__lowercase ,__lowercase )] ) + "|\n" # Use ":-----:" format to center-aligned table cell texts table += "|" + "|".join([""":""" + """-""" * (w - 2) + """:""" for w in widths] ) + "|\n" A_ : List[Any] = {True: "✅", False: "❌"} for name in model_names: A_ : Optional[Any] = model_name_to_prefix[name] A_ : List[str] = [ name, check[slow_tokenizers[prefix]], check[fast_tokenizers[prefix]], check[pt_models[prefix]], check[tf_models[prefix]], check[flax_models[prefix]], ] table += "|" + "|".join([_center_text(__lowercase ,__lowercase ) for l, w in zip(__lowercase ,__lowercase )] ) + "|\n" return table def _lowerCAmelCase ( _lowerCAmelCase=False ): '''simple docstring''' A_ : Any = _find_text_in_file( filename=os.path.join(__lowercase ,"""index.md""" ) ,start_prompt="""<!--This table is updated automatically from the auto modules""" ,end_prompt="""<!-- End table-->""" ,) A_ : List[str] = get_model_table_from_auto_modules() if current_table != new_table: if overwrite: with open(os.path.join(__lowercase ,"""index.md""" ) ,"""w""" ,encoding="""utf-8""" ,newline="""\n""" ) as f: f.writelines(lines[:start_index] + [new_table] + lines[end_index:] ) else: raise ValueError( """The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.""" ) if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") _lowerCAmelCase = parser.parse_args() check_model_table(args.fix_and_overwrite)
569
__SCREAMING_SNAKE_CASE : Union[str, Any] = { 'a': 'AAAAA', 'b': 'AAAAB', 'c': 'AAABA', 'd': 'AAABB', 'e': 'AABAA', 'f': 'AABAB', 'g': 'AABBA', 'h': 'AABBB', 'i': 'ABAAA', 'j': 'BBBAA', 'k': 'ABAAB', 'l': 'ABABA', 'm': 'ABABB', 'n': 'ABBAA', 'o': 'ABBAB', 'p': 'ABBBA', 'q': 'ABBBB', 'r': 'BAAAA', 's': 'BAAAB', 't': 'BAABA', 'u': 'BAABB', 'v': 'BBBAB', 'w': 'BABAA', 'x': 'BABAB', 'y': 'BABBA', 'z': 'BABBB', ' ': ' ', } __SCREAMING_SNAKE_CASE : int = {value: key for key, value in encode_dict.items()} def snake_case (__lowercase ) -> str: '''simple docstring''' _snake_case : Any = "" for letter in word.lower(): if letter.isalpha() or letter == " ": encoded += encode_dict[letter] else: raise Exception("encode() accepts only letters of the alphabet and spaces" ) return encoded def snake_case (__lowercase ) -> str: '''simple docstring''' if set(__lowercase ) - {"A", "B", " "} != set(): raise Exception("decode() accepts only 'A', 'B' and spaces" ) _snake_case : str = "" for word in coded.split(): while len(__lowercase ) != 0: decoded += decode_dict[word[:5]] _snake_case : int = word[5:] decoded += " " return decoded.strip() if __name__ == "__main__": from doctest import testmod testmod()
670
0
import random def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = False ): """simple docstring""" a_ = {i: [] for i in range(__lowercase )} # if probability is greater or equal than 1, then generate a complete graph if probability >= 1: return complete_graph(__lowercase ) # if probability is lower or equal than 0, then return a graph without edges if probability <= 0: return graph # for each couple of nodes, add an edge from u to v # if the number randomly generated is greater than probability probability for i in range(__lowercase ): for j in range(i + 1 , __lowercase ): if random.random() < probability: graph[i].append(__lowercase ) if not directed: # if the graph is undirected, add an edge in from j to i, either graph[j].append(__lowercase ) return graph def lowerCamelCase_ ( UpperCAmelCase__ ): """simple docstring""" return { i: [j for j in range(__lowercase ) if i != j] for i in range(__lowercase ) } if __name__ == "__main__": import doctest doctest.testmod()
483
import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class lowercase_ ( unittest.TestCase ): def UpperCamelCase ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() def UpperCamelCase ( self ): _snake_case ,_snake_case : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-2" , revision="bf16" , dtype=jnp.bfloataa , ) _snake_case : List[Any] = "A painting of a squirrel eating a burger" _snake_case : Union[str, Any] = jax.device_count() _snake_case : List[Any] = num_samples * [prompt] _snake_case : Tuple = sd_pipe.prepare_inputs(lowercase_ ) _snake_case : str = replicate(lowercase_ ) _snake_case : Dict = shard(lowercase_ ) _snake_case : List[Any] = jax.random.PRNGKey(0 ) _snake_case : List[Any] = jax.random.split(lowercase_ , jax.device_count() ) _snake_case : Tuple = sd_pipe(lowercase_ , lowercase_ , lowercase_ , num_inference_steps=25 , jit=lowercase_ )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) _snake_case : List[Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) _snake_case : str = images[0, 253:256, 253:256, -1] _snake_case : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) ) _snake_case : Optional[Any] = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 def UpperCamelCase ( self ): _snake_case : Optional[Any] = "stabilityai/stable-diffusion-2" _snake_case ,_snake_case : List[Any] = FlaxDPMSolverMultistepScheduler.from_pretrained(lowercase_ , subfolder="scheduler" ) _snake_case ,_snake_case : int = FlaxStableDiffusionPipeline.from_pretrained( lowercase_ , scheduler=lowercase_ , revision="bf16" , dtype=jnp.bfloataa , ) _snake_case : str = scheduler_params _snake_case : Dict = "A painting of a squirrel eating a burger" _snake_case : Dict = jax.device_count() _snake_case : Optional[int] = num_samples * [prompt] _snake_case : List[str] = sd_pipe.prepare_inputs(lowercase_ ) _snake_case : Optional[int] = replicate(lowercase_ ) _snake_case : Union[str, Any] = shard(lowercase_ ) _snake_case : List[Any] = jax.random.PRNGKey(0 ) _snake_case : Union[str, Any] = jax.random.split(lowercase_ , jax.device_count() ) _snake_case : str = sd_pipe(lowercase_ , lowercase_ , lowercase_ , num_inference_steps=25 , jit=lowercase_ )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) _snake_case : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) _snake_case : List[str] = images[0, 253:256, 253:256, -1] _snake_case : Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) _snake_case : Dict = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
670
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCAmelCase : int = logging.get_logger(__name__) _UpperCAmelCase : int = { 's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json', } class UpperCAmelCase ( __snake_case ): """simple docstring""" A__ : Any = 'open-llama' def __init__( self , _snake_case=100000 , _snake_case=4096 , _snake_case=11008 , _snake_case=32 , _snake_case=32 , _snake_case="silu" , _snake_case=2048 , _snake_case=0.02 , _snake_case=1E-6 , _snake_case=True , _snake_case=0 , _snake_case=1 , _snake_case=2 , _snake_case=False , _snake_case=True , _snake_case=0.1 , _snake_case=0.1 , _snake_case=True , _snake_case=True , _snake_case=None , **_snake_case , ) -> Optional[int]: _UpperCamelCase : Tuple = vocab_size _UpperCamelCase : Optional[Any] = max_position_embeddings _UpperCamelCase : Dict = hidden_size _UpperCamelCase : Tuple = intermediate_size _UpperCamelCase : Optional[Any] = num_hidden_layers _UpperCamelCase : Optional[Any] = num_attention_heads _UpperCamelCase : Tuple = hidden_act _UpperCamelCase : str = initializer_range _UpperCamelCase : int = rms_norm_eps _UpperCamelCase : Dict = use_cache _UpperCamelCase : Optional[Any] = kwargs.pop( '''use_memorry_efficient_attention''' , lowercase_ ) _UpperCamelCase : List[Any] = hidden_dropout_prob _UpperCamelCase : Optional[Any] = attention_dropout_prob _UpperCamelCase : Union[str, Any] = use_stable_embedding _UpperCamelCase : Optional[int] = shared_input_output_embedding _UpperCamelCase : Dict = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , tie_word_embeddings=lowercase_ , **lowercase_ , ) def _lowercase ( self ) -> str: if self.rope_scaling is None: return if not isinstance(self.rope_scaling , lowercase_ ) or len(self.rope_scaling ) != 2: raise ValueError( '''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ''' F'''got {self.rope_scaling}''' ) _UpperCamelCase : List[Any] = self.rope_scaling.get('''type''' , lowercase_ ) _UpperCamelCase : str = self.rope_scaling.get('''factor''' , lowercase_ ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(lowercase_ , lowercase_ ) or rope_scaling_factor <= 1.0: raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
683
from manim import * class lowercase_ ( __snake_case ): def UpperCamelCase ( self ): _snake_case : Tuple = Rectangle(height=0.5 , width=0.5 ) _snake_case : List[str] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) _snake_case : List[str] = [mem.copy() for i in range(6 )] _snake_case : Any = [mem.copy() for i in range(6 )] _snake_case : Any = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 ) _snake_case : Any = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 ) _snake_case : str = VGroup(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0 ) _snake_case : int = Text("CPU" , font_size=24 ) _snake_case : str = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(lowercase_ ) _snake_case : int = [mem.copy() for i in range(4 )] _snake_case : Dict = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 ) _snake_case : str = Text("GPU" , font_size=24 ) _snake_case : Optional[int] = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ ) gpu.move_to([-1, -1, 0] ) self.add(lowercase_ ) _snake_case : Any = [mem.copy() for i in range(6 )] _snake_case : Any = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 ) _snake_case : Dict = Text("Model" , font_size=24 ) _snake_case : Dict = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ ) model.move_to([3, -1.0, 0] ) self.add(lowercase_ ) _snake_case : str = [] for i, rect in enumerate(lowercase_ ): rect.set_stroke(lowercase_ ) # target = fill.copy().set_fill(YELLOW, opacity=0.7) # target.move_to(rect) # self.add(target) _snake_case : Union[str, Any] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowercase_ , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowercase_ ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(cpu_targs[0] , direction=lowercase_ , buff=0.0 ) else: cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase_ , buff=0.0 ) self.add(lowercase_ ) cpu_targs.append(lowercase_ ) _snake_case : List[Any] = [mem.copy() for i in range(6 )] _snake_case : Union[str, Any] = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 ) _snake_case : Optional[Any] = Text("Loaded Checkpoint" , font_size=24 ) _snake_case : Union[str, Any] = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , aligned_edge=lowercase_ , buff=0.4 ) checkpoint.move_to([3, 0.5, 0] ) _snake_case : Optional[int] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) _snake_case : Optional[Any] = MarkupText( f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(lowercase_ , lowercase_ ) _snake_case : Union[str, Any] = MarkupText( f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , ) blue_text.next_to(lowercase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() ) _snake_case : List[Any] = MarkupText( f"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(lowercase_ ) , Write(lowercase_ ) ) self.play(Write(lowercase_ , run_time=1 ) , Create(lowercase_ , run_time=1 ) ) _snake_case : int = [] _snake_case : str = [] for i, rect in enumerate(lowercase_ ): _snake_case : Dict = fill.copy().set_fill(lowercase_ , opacity=0.7 ) target.move_to(lowercase_ ) first_animations.append(GrowFromCenter(lowercase_ , run_time=1 ) ) _snake_case : Dict = target.copy() cpu_target.generate_target() if i < 5: cpu_target.target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.target.move_to(cpu_right_col_base[i - 5] ) second_animations.append(MoveToTarget(lowercase_ , run_time=1.5 ) ) self.play(*lowercase_ ) self.play(*lowercase_ ) self.wait()
670
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available A : int = {'tokenization_herbert': ['HerbertTokenizer']} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Optional[Any] = ['HerbertTokenizerFast'] if TYPE_CHECKING: from .tokenization_herbert import HerbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_herbert_fast import HerbertTokenizerFast else: import sys A : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
636
import math from enum import Enum from typing import Optional, Union from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR from .utils import logging __SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__) class lowercase_ ( __snake_case ): _lowerCamelCase = 'linear' _lowerCamelCase = 'cosine' _lowerCamelCase = 'cosine_with_restarts' _lowerCamelCase = 'polynomial' _lowerCamelCase = 'constant' _lowerCamelCase = 'constant_with_warmup' _lowerCamelCase = 'piecewise_constant' def snake_case (__lowercase , __lowercase = -1 ) -> List[Any]: '''simple docstring''' return LambdaLR(__lowercase , lambda __lowercase : 1 , last_epoch=__lowercase ) def snake_case (__lowercase , __lowercase , __lowercase = -1 ) -> List[str]: '''simple docstring''' def lr_lambda(__lowercase ): if current_step < num_warmup_steps: return float(__lowercase ) / float(max(1.0 , __lowercase ) ) return 1.0 return LambdaLR(__lowercase , __lowercase , last_epoch=__lowercase ) def snake_case (__lowercase , __lowercase , __lowercase = -1 ) -> Optional[int]: '''simple docstring''' _snake_case : Optional[Any] = {} _snake_case : Optional[int] = step_rules.split("," ) for rule_str in rule_list[:-1]: _snake_case ,_snake_case : str = rule_str.split(":" ) _snake_case : Dict = int(__lowercase ) _snake_case : List[str] = float(__lowercase ) _snake_case : Tuple = value _snake_case : str = float(rule_list[-1] ) def create_rules_function(__lowercase , __lowercase ): def rule_func(__lowercase ) -> float: _snake_case : List[str] = sorted(rules_dict.keys() ) for i, sorted_step in enumerate(__lowercase ): if steps < sorted_step: return rules_dict[sorted_steps[i]] return last_lr_multiple return rule_func _snake_case : int = create_rules_function(__lowercase , __lowercase ) return LambdaLR(__lowercase , __lowercase , last_epoch=__lowercase ) def snake_case (__lowercase , __lowercase , __lowercase , __lowercase=-1 ) -> List[str]: '''simple docstring''' def lr_lambda(__lowercase ): if current_step < num_warmup_steps: return float(__lowercase ) / float(max(1 , __lowercase ) ) return max( 0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) ) return LambdaLR(__lowercase , __lowercase , __lowercase ) def snake_case (__lowercase , __lowercase , __lowercase , __lowercase = 0.5 , __lowercase = -1 ) -> Dict: '''simple docstring''' def lr_lambda(__lowercase ): if current_step < num_warmup_steps: return float(__lowercase ) / float(max(1 , __lowercase ) ) _snake_case : Optional[int] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__lowercase ) * 2.0 * progress )) ) return LambdaLR(__lowercase , __lowercase , __lowercase ) def snake_case (__lowercase , __lowercase , __lowercase , __lowercase = 1 , __lowercase = -1 ) -> Optional[int]: '''simple docstring''' def lr_lambda(__lowercase ): if current_step < num_warmup_steps: return float(__lowercase ) / float(max(1 , __lowercase ) ) _snake_case : Any = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) if progress >= 1.0: return 0.0 return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__lowercase ) * progress) % 1.0) )) ) return LambdaLR(__lowercase , __lowercase , __lowercase ) def snake_case (__lowercase , __lowercase , __lowercase , __lowercase=1e-7 , __lowercase=1.0 , __lowercase=-1 ) -> List[Any]: '''simple docstring''' _snake_case : List[Any] = optimizer.defaults["lr"] if not (lr_init > lr_end): raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" ) def lr_lambda(__lowercase ): if current_step < num_warmup_steps: return float(__lowercase ) / float(max(1 , __lowercase ) ) elif current_step > num_training_steps: return lr_end / lr_init # as LambdaLR multiplies by lr_init else: _snake_case : Tuple = lr_init - lr_end _snake_case : Any = num_training_steps - num_warmup_steps _snake_case : Optional[int] = 1 - (current_step - num_warmup_steps) / decay_steps _snake_case : Optional[Any] = lr_range * pct_remaining**power + lr_end return decay / lr_init # as LambdaLR multiplies by lr_init return LambdaLR(__lowercase , __lowercase , __lowercase ) __SCREAMING_SNAKE_CASE : Union[str, Any] = { SchedulerType.LINEAR: get_linear_schedule_with_warmup, SchedulerType.COSINE: get_cosine_schedule_with_warmup, SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, SchedulerType.CONSTANT: get_constant_schedule, SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup, SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule, } def snake_case (__lowercase , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = 1 , __lowercase = 1.0 , __lowercase = -1 , ) -> List[Any]: '''simple docstring''' _snake_case : Any = SchedulerType(__lowercase ) _snake_case : Union[str, Any] = TYPE_TO_SCHEDULER_FUNCTION[name] if name == SchedulerType.CONSTANT: return schedule_func(__lowercase , last_epoch=__lowercase ) if name == SchedulerType.PIECEWISE_CONSTANT: return schedule_func(__lowercase , step_rules=__lowercase , last_epoch=__lowercase ) # All other schedulers require `num_warmup_steps` if num_warmup_steps is None: raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" ) if name == SchedulerType.CONSTANT_WITH_WARMUP: return schedule_func(__lowercase , num_warmup_steps=__lowercase , last_epoch=__lowercase ) # All other schedulers require `num_training_steps` if num_training_steps is None: raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" ) if name == SchedulerType.COSINE_WITH_RESTARTS: return schedule_func( __lowercase , num_warmup_steps=__lowercase , num_training_steps=__lowercase , num_cycles=__lowercase , last_epoch=__lowercase , ) if name == SchedulerType.POLYNOMIAL: return schedule_func( __lowercase , num_warmup_steps=__lowercase , num_training_steps=__lowercase , power=__lowercase , last_epoch=__lowercase , ) return schedule_func( __lowercase , num_warmup_steps=__lowercase , num_training_steps=__lowercase , last_epoch=__lowercase )
670
0
import argparse import json import logging import os import shutil import sys import tempfile import unittest from unittest import mock import torch from accelerate.utils import write_basic_config from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device from transformers.utils import is_apex_available logging.basicConfig(level=logging.DEBUG) lowerCAmelCase = logging.getLogger() def __SCREAMING_SNAKE_CASE ( ) -> int: '''simple docstring''' __UpperCAmelCase : str = argparse.ArgumentParser() parser.add_argument('''-f''' ) __UpperCAmelCase : Optional[int] = parser.parse_args() return args.f def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Optional[int] = {} __UpperCAmelCase : Union[str, Any] = os.path.join(__lowercase , '''all_results.json''' ) if os.path.exists(__lowercase ): with open(__lowercase , '''r''' ) as f: __UpperCAmelCase : Union[str, Any] = json.load(__lowercase ) else: raise ValueError(f"can't find {path}" ) return results def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : List[Any] = torch.cuda.is_available() and torch_device == "cuda" return is_using_cuda and is_apex_available() lowerCAmelCase = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class lowerCamelCase ( __snake_case ): @classmethod def A( cls): # Write Accelerate config, will pick up on CPU, GPU, and multi-GPU __UpperCAmelCase : Optional[Any] = tempfile.mkdtemp() __UpperCAmelCase : List[Any] = os.path.join(cls.tmpdir , '''default_config.yml''') write_basic_config(save_location=cls.configPath) __UpperCAmelCase : int = ["accelerate", "launch", "--config_file", cls.configPath] @classmethod def A( cls): shutil.rmtree(cls.tmpdir) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''}) def A( self): __UpperCAmelCase : str = self.get_auto_remove_tmp_dir() __UpperCAmelCase : Dict = F"\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n ".split() if is_cuda_and_apex_available(): testargs.append('''--fp16''') run_command(self._launch_args + testargs) __UpperCAmelCase : Optional[int] = get_results(lowercase_) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5) self.assertTrue(os.path.exists(os.path.join(lowercase_ , '''epoch_0'''))) self.assertTrue(os.path.exists(os.path.join(lowercase_ , '''glue_no_trainer'''))) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''}) def A( self): __UpperCAmelCase : Tuple = self.get_auto_remove_tmp_dir() __UpperCAmelCase : Union[str, Any] = F"\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n ".split() if torch.cuda.device_count() > 1: # Skipping because there are not enough batches to train the model + would need a drop_last to work. return run_command(self._launch_args + testargs) __UpperCAmelCase : List[str] = get_results(lowercase_) self.assertLess(result['''perplexity'''] , 1_0_0) self.assertTrue(os.path.exists(os.path.join(lowercase_ , '''epoch_0'''))) self.assertTrue(os.path.exists(os.path.join(lowercase_ , '''clm_no_trainer'''))) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''}) def A( self): __UpperCAmelCase : str = self.get_auto_remove_tmp_dir() __UpperCAmelCase : List[str] = F"\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n ".split() run_command(self._launch_args + testargs) __UpperCAmelCase : List[Any] = get_results(lowercase_) self.assertLess(result['''perplexity'''] , 4_2) self.assertTrue(os.path.exists(os.path.join(lowercase_ , '''epoch_0'''))) self.assertTrue(os.path.exists(os.path.join(lowercase_ , '''mlm_no_trainer'''))) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''}) def A( self): # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu __UpperCAmelCase : Optional[Any] = 7 if get_gpu_count() > 1 else 2 __UpperCAmelCase : Tuple = self.get_auto_remove_tmp_dir() __UpperCAmelCase : Any = F"\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n ".split() run_command(self._launch_args + testargs) __UpperCAmelCase : List[Any] = get_results(lowercase_) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5) self.assertLess(result['''train_loss'''] , 0.5) self.assertTrue(os.path.exists(os.path.join(lowercase_ , '''epoch_0'''))) self.assertTrue(os.path.exists(os.path.join(lowercase_ , '''ner_no_trainer'''))) @unittest.skip(reason='''Fix me @muellerzr''') @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''}) def A( self): __UpperCAmelCase : Tuple = self.get_auto_remove_tmp_dir() __UpperCAmelCase : str = F"\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split() run_command(self._launch_args + testargs) __UpperCAmelCase : Tuple = get_results(lowercase_) # Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics. self.assertGreaterEqual(result['''eval_f1'''] , 2_8) self.assertGreaterEqual(result['''eval_exact'''] , 2_8) self.assertTrue(os.path.exists(os.path.join(lowercase_ , '''epoch_0'''))) self.assertTrue(os.path.exists(os.path.join(lowercase_ , '''qa_no_trainer'''))) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''}) def A( self): __UpperCAmelCase : Union[str, Any] = self.get_auto_remove_tmp_dir() __UpperCAmelCase : Optional[int] = F"\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n ".split() run_command(self._launch_args + testargs) __UpperCAmelCase : Tuple = get_results(lowercase_) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8) self.assertTrue(os.path.exists(os.path.join(lowercase_ , '''swag_no_trainer'''))) @slow @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''}) def A( self): __UpperCAmelCase : Optional[Any] = self.get_auto_remove_tmp_dir() __UpperCAmelCase : Dict = F"\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split() run_command(self._launch_args + testargs) __UpperCAmelCase : Any = get_results(lowercase_) self.assertGreaterEqual(result['''eval_rouge1'''] , 1_0) self.assertGreaterEqual(result['''eval_rouge2'''] , 2) self.assertGreaterEqual(result['''eval_rougeL'''] , 7) self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7) self.assertTrue(os.path.exists(os.path.join(lowercase_ , '''epoch_0'''))) self.assertTrue(os.path.exists(os.path.join(lowercase_ , '''summarization_no_trainer'''))) @slow @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''}) def A( self): __UpperCAmelCase : Tuple = self.get_auto_remove_tmp_dir() __UpperCAmelCase : Any = F"\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n ".split() run_command(self._launch_args + testargs) __UpperCAmelCase : str = get_results(lowercase_) self.assertGreaterEqual(result['''eval_bleu'''] , 3_0) self.assertTrue(os.path.exists(os.path.join(lowercase_ , '''epoch_0'''))) self.assertTrue(os.path.exists(os.path.join(lowercase_ , '''translation_no_trainer'''))) @slow def A( self): __UpperCAmelCase : Union[str, Any] = logging.StreamHandler(sys.stdout) logger.addHandler(lowercase_) __UpperCAmelCase : Tuple = self.get_auto_remove_tmp_dir() __UpperCAmelCase : Tuple = F"\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n ".split() run_command(self._launch_args + testargs) __UpperCAmelCase : str = get_results(lowercase_) self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.1_0) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''}) def A( self): __UpperCAmelCase : Union[str, Any] = self.get_auto_remove_tmp_dir() __UpperCAmelCase : Tuple = F"\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n ".split() if is_cuda_and_apex_available(): testargs.append('''--fp16''') run_command(self._launch_args + testargs) __UpperCAmelCase : Optional[int] = get_results(lowercase_) # The base model scores a 25% self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6) self.assertTrue(os.path.exists(os.path.join(lowercase_ , '''step_1'''))) self.assertTrue(os.path.exists(os.path.join(lowercase_ , '''image_classification_no_trainer''')))
462
from ...configuration_utils import PretrainedConfig from ...utils import logging __SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : int = { 'weiweishi/roc-bert-base-zh': 'https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json', } class lowercase_ ( __snake_case ): _lowerCamelCase = 'roc_bert' def __init__( self , lowercase_=30_522 , lowercase_=768 , lowercase_=12 , lowercase_=12 , lowercase_=3_072 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=512 , lowercase_=2 , lowercase_=0.02 , lowercase_=1e-12 , lowercase_=True , lowercase_=0 , lowercase_="absolute" , lowercase_=None , lowercase_=True , lowercase_=True , lowercase_=768 , lowercase_=910 , lowercase_=512 , lowercase_=24_858 , lowercase_=True , **lowercase_ , ): _snake_case : int = vocab_size _snake_case : Union[str, Any] = max_position_embeddings _snake_case : Union[str, Any] = hidden_size _snake_case : Dict = num_hidden_layers _snake_case : Any = num_attention_heads _snake_case : Dict = intermediate_size _snake_case : List[Any] = hidden_act _snake_case : Optional[int] = hidden_dropout_prob _snake_case : Union[str, Any] = attention_probs_dropout_prob _snake_case : Union[str, Any] = initializer_range _snake_case : List[Any] = type_vocab_size _snake_case : int = layer_norm_eps _snake_case : Optional[Any] = use_cache _snake_case : List[Any] = enable_pronunciation _snake_case : Dict = enable_shape _snake_case : Dict = pronunciation_embed_dim _snake_case : Tuple = pronunciation_vocab_size _snake_case : Tuple = shape_embed_dim _snake_case : List[str] = shape_vocab_size _snake_case : Dict = concat_input _snake_case : int = position_embedding_type _snake_case : int = classifier_dropout super().__init__(pad_token_id=lowercase_ , **lowercase_ )
670
0
"""simple docstring""" from ..utils import DummyObject, requires_backends class __UpperCAmelCase( metaclass=__snake_case ): """simple docstring""" __lowerCamelCase = ["flax"] def __init__( self , *snake_case__ , **snake_case__ ): '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def UpperCAmelCase_ ( cls , *snake_case__ , **snake_case__ ): '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def UpperCAmelCase_ ( cls , *snake_case__ , **snake_case__ ): '''simple docstring''' requires_backends(cls , ["flax"] ) class __UpperCAmelCase( metaclass=__snake_case ): """simple docstring""" __lowerCamelCase = ["flax"] def __init__( self , *snake_case__ , **snake_case__ ): '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def UpperCAmelCase_ ( cls , *snake_case__ , **snake_case__ ): '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def UpperCAmelCase_ ( cls , *snake_case__ , **snake_case__ ): '''simple docstring''' requires_backends(cls , ["flax"] ) class __UpperCAmelCase( metaclass=__snake_case ): """simple docstring""" __lowerCamelCase = ["flax"] def __init__( self , *snake_case__ , **snake_case__ ): '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def UpperCAmelCase_ ( cls , *snake_case__ , **snake_case__ ): '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def UpperCAmelCase_ ( cls , *snake_case__ , **snake_case__ ): '''simple docstring''' requires_backends(cls , ["flax"] ) class __UpperCAmelCase( metaclass=__snake_case ): """simple docstring""" __lowerCamelCase = ["flax"] def __init__( self , *snake_case__ , **snake_case__ ): '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def UpperCAmelCase_ ( cls , *snake_case__ , **snake_case__ ): '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def UpperCAmelCase_ ( cls , *snake_case__ , **snake_case__ ): '''simple docstring''' requires_backends(cls , ["flax"] ) class __UpperCAmelCase( metaclass=__snake_case ): """simple docstring""" __lowerCamelCase = ["flax"] def __init__( self , *snake_case__ , **snake_case__ ): '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def UpperCAmelCase_ ( cls , *snake_case__ , **snake_case__ ): '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def UpperCAmelCase_ ( cls , *snake_case__ , **snake_case__ ): '''simple docstring''' requires_backends(cls , ["flax"] ) class __UpperCAmelCase( metaclass=__snake_case ): """simple docstring""" __lowerCamelCase = ["flax"] def __init__( self , *snake_case__ , **snake_case__ ): '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def UpperCAmelCase_ ( cls , *snake_case__ , **snake_case__ ): '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def UpperCAmelCase_ ( cls , *snake_case__ , **snake_case__ ): '''simple docstring''' requires_backends(cls , ["flax"] ) class __UpperCAmelCase( metaclass=__snake_case ): """simple docstring""" __lowerCamelCase = ["flax"] def __init__( self , *snake_case__ , **snake_case__ ): '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def UpperCAmelCase_ ( cls , *snake_case__ , **snake_case__ ): '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def UpperCAmelCase_ ( cls , *snake_case__ , **snake_case__ ): '''simple docstring''' requires_backends(cls , ["flax"] ) class __UpperCAmelCase( metaclass=__snake_case ): """simple docstring""" __lowerCamelCase = ["flax"] def __init__( self , *snake_case__ , **snake_case__ ): '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def UpperCAmelCase_ ( cls , *snake_case__ , **snake_case__ ): '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def UpperCAmelCase_ ( cls , *snake_case__ , **snake_case__ ): '''simple docstring''' requires_backends(cls , ["flax"] ) class __UpperCAmelCase( metaclass=__snake_case ): """simple docstring""" __lowerCamelCase = ["flax"] def __init__( self , *snake_case__ , **snake_case__ ): '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def UpperCAmelCase_ ( cls , *snake_case__ , **snake_case__ ): '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def UpperCAmelCase_ ( cls , *snake_case__ , **snake_case__ ): '''simple docstring''' requires_backends(cls , ["flax"] ) class __UpperCAmelCase( metaclass=__snake_case ): """simple docstring""" __lowerCamelCase = ["flax"] def __init__( self , *snake_case__ , **snake_case__ ): '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def UpperCAmelCase_ ( cls , *snake_case__ , **snake_case__ ): '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def UpperCAmelCase_ ( cls , *snake_case__ , **snake_case__ ): '''simple docstring''' requires_backends(cls , ["flax"] ) class __UpperCAmelCase( metaclass=__snake_case ): """simple docstring""" __lowerCamelCase = ["flax"] def __init__( self , *snake_case__ , **snake_case__ ): '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def UpperCAmelCase_ ( cls , *snake_case__ , **snake_case__ ): '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def UpperCAmelCase_ ( cls , *snake_case__ , **snake_case__ ): '''simple docstring''' requires_backends(cls , ["flax"] ) class __UpperCAmelCase( metaclass=__snake_case ): """simple docstring""" __lowerCamelCase = ["flax"] def __init__( self , *snake_case__ , **snake_case__ ): '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def UpperCAmelCase_ ( cls , *snake_case__ , **snake_case__ ): '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def UpperCAmelCase_ ( cls , *snake_case__ , **snake_case__ ): '''simple docstring''' requires_backends(cls , ["flax"] ) class __UpperCAmelCase( metaclass=__snake_case ): """simple docstring""" __lowerCamelCase = ["flax"] def __init__( self , *snake_case__ , **snake_case__ ): '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def UpperCAmelCase_ ( cls , *snake_case__ , **snake_case__ ): '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def UpperCAmelCase_ ( cls , *snake_case__ , **snake_case__ ): '''simple docstring''' requires_backends(cls , ["flax"] )
218
from cva import destroyAllWindows, imread, imshow, waitKey def snake_case (__lowercase ) -> Tuple: '''simple docstring''' _snake_case ,_snake_case : int = img.shape[0], img.shape[1] # converting each pixel's color to its negative for i in range(__lowercase ): for j in range(__lowercase ): _snake_case : Optional[Any] = [255, 255, 255] - img[i][j] return img if __name__ == "__main__": # read original image __SCREAMING_SNAKE_CASE : Optional[Any] = imread('image_data/lena.jpg', 1) # convert to its negative __SCREAMING_SNAKE_CASE : Tuple = convert_to_negative(img) # show result image imshow('negative of original image', img) waitKey(0) destroyAllWindows()
670
0
'''simple docstring''' import unittest from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowerCamelCase_ = get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece @require_tokenizers class lowercase_ ( __snake_case , unittest.TestCase ): """simple docstring""" lowerCamelCase_ = ReformerTokenizer lowerCamelCase_ = ReformerTokenizerFast lowerCamelCase_ = True lowerCamelCase_ = False lowerCamelCase_ = True def lowerCAmelCase_ ( self : Tuple ): """simple docstring""" super().setUp() _SCREAMING_SNAKE_CASE = ReformerTokenizer(lowercase_ , keep_accents=lowercase_ ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCAmelCase_ ( self : Any ): """simple docstring""" _SCREAMING_SNAKE_CASE = "<s>" _SCREAMING_SNAKE_CASE = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ ) def lowerCAmelCase_ ( self : int ): """simple docstring""" _SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<unk>" ) self.assertEqual(vocab_keys[1] , "<s>" ) self.assertEqual(vocab_keys[-1] , "j" ) self.assertEqual(len(lowercase_ ) , 1_0_0_0 ) def lowerCAmelCase_ ( self : Union[str, Any] ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 ) def lowerCAmelCase_ ( self : List[Any] ): """simple docstring""" if not self.test_rust_tokenizer: return _SCREAMING_SNAKE_CASE = self.get_tokenizer() _SCREAMING_SNAKE_CASE = self.get_rust_tokenizer() _SCREAMING_SNAKE_CASE = "I was born in 92000, and this is falsé." _SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowercase_ ) _SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(lowercase_ ) self.assertListEqual(lowercase_ , lowercase_ ) _SCREAMING_SNAKE_CASE = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) _SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) self.assertListEqual(lowercase_ , lowercase_ ) _SCREAMING_SNAKE_CASE = self.get_rust_tokenizer() _SCREAMING_SNAKE_CASE = tokenizer.encode(lowercase_ ) _SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowercase_ ) self.assertListEqual(lowercase_ , lowercase_ ) def lowerCAmelCase_ ( self : List[str] , __lowerCamelCase : int=1_5 ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): _SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ ) # Simple input _SCREAMING_SNAKE_CASE = "This is a simple input" _SCREAMING_SNAKE_CASE = ["This is a simple input 1", "This is a simple input 2"] _SCREAMING_SNAKE_CASE = ("This is a simple input", "This is a pair") _SCREAMING_SNAKE_CASE = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests self.assertRaises(lowercase_ , tokenizer_r.encode , lowercase_ , max_length=lowercase_ , padding="max_length" ) # Simple input self.assertRaises(lowercase_ , tokenizer_r.encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" ) # Simple input self.assertRaises( lowercase_ , tokenizer_r.batch_encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" , ) # Pair input self.assertRaises(lowercase_ , tokenizer_r.encode , lowercase_ , max_length=lowercase_ , padding="max_length" ) # Pair input self.assertRaises(lowercase_ , tokenizer_r.encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" ) # Pair input self.assertRaises( lowercase_ , tokenizer_r.batch_encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" , ) def lowerCAmelCase_ ( self : List[Any] ): """simple docstring""" pass def lowerCAmelCase_ ( self : List[str] ): """simple docstring""" _SCREAMING_SNAKE_CASE = ReformerTokenizer(lowercase_ , keep_accents=lowercase_ ) _SCREAMING_SNAKE_CASE = tokenizer.tokenize("This is a test" ) self.assertListEqual(lowercase_ , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowercase_ ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , ) _SCREAMING_SNAKE_CASE = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( lowercase_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) _SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(lowercase_ ) self.assertListEqual( lowercase_ , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , ) _SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(lowercase_ ) self.assertListEqual( lowercase_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) @cached_property def lowerCAmelCase_ ( self : str ): """simple docstring""" return ReformerTokenizer.from_pretrained("google/reformer-crime-and-punishment" ) @slow def lowerCAmelCase_ ( self : Optional[Any] ): """simple docstring""" _SCREAMING_SNAKE_CASE = "Hello World!" _SCREAMING_SNAKE_CASE = [1_2_6, 3_2, 2_6_2, 1_5_2, 3_8, 7_2, 2_8_7] self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) ) @slow def lowerCAmelCase_ ( self : List[str] ): """simple docstring""" _SCREAMING_SNAKE_CASE = ( "This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will" " add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth" ) _SCREAMING_SNAKE_CASE = [ 1_0_8, 2_6_5, 2_4, 1_1_1, 4, 2_5_8, 1_5_6, 3_5, 2_8, 2_7_5, 3, 2_5_9, 2_9_7, 2_6_0, 8_4, 4, 3_5, 1_1_0, 4_4, 8, 2_5_9, 9_1, 2_6_8, 2_1, 1_1, 2_0_9, 2_7_4, 1_0_9, 2_6_6, 2_7_7, 1_1_7, 8_6, 9_3, 3_1_5, 2_5_8, 2_7_8, 2_5_8, 2_7_7, 2_5_8, 0, 2_5_8, 2_8_8, 2_5_8, 3_1_9, 2_5_8, 0, 2_5_8, 0, 2_5_8, 0, 2_5_8, 0, 2_5_8, 2_8_7, 2_5_8, 3_1_5, 2_5_8, 2_8_9, 2_5_8, 2_7_8, 9_9, 2_6_9, 2_6_6, 2_6_2, 8, 2_5_9, 2_4_1, 4, 2_1_7, 2_3_0, 2_6_8, 2_6_6, 5_5, 1_6_8, 1_0_6, 7_5, 1_9_3, 2_6_6, 2_2_3, 2_7, 4_9, 2_6, 2_8_2, 2_5, 2_6_4, 2_9_9, 1_9, 2_6, 0, 2_5_8, 2_7_7, 1_1_7, 8_6, 9_3, 1_7_6, 1_8_3, 2_7_0, 1_1, 2_6_2, 4_2, 6_1, 2_6_5, ] self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) ) @require_torch @slow def lowerCAmelCase_ ( self : Optional[Any] ): """simple docstring""" import torch from transformers import ReformerConfig, ReformerModel # Build sequence _SCREAMING_SNAKE_CASE = list(self.big_tokenizer.get_vocab().keys() )[:1_0] _SCREAMING_SNAKE_CASE = " ".join(lowercase_ ) _SCREAMING_SNAKE_CASE = self.big_tokenizer.encode_plus(lowercase_ , return_tensors="pt" ) _SCREAMING_SNAKE_CASE = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="pt" ) _SCREAMING_SNAKE_CASE = ReformerConfig() # The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024) _SCREAMING_SNAKE_CASE = encoded_sequence["input_ids"].shape _SCREAMING_SNAKE_CASE = ReformerModel(lowercase_ ) # Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**lowercase_ ) model(**lowercase_ ) @slow def lowerCAmelCase_ ( self : List[Any] ): """simple docstring""" # fmt: off _SCREAMING_SNAKE_CASE = {"input_ids": [[1_0_8, 2_6_5, 2_4, 1_1_1, 4, 2_5_8, 1_5_6, 7, 5_1, 2_7_9, 5_8, 7, 7_6, 2_5, 6_9, 2_7_8], [1_4_0, 2_4_3, 2_6_4, 1_3_4, 1_7, 2_6_7, 7_7, 2_6_3, 2_2, 2_6_2, 2_9_7, 2_5_8, 3_0_4, 1_7_7, 2_7_9, 2_6_6, 1_4, 8_9, 1_3, 3_5, 2_6_1, 2_9_9, 2_7_2, 1_3_7, 2_7_5, 2_7_8]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # This tokenizer does not know some characters like ")". # That is the reason why we use very simple texts here. # Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064 _SCREAMING_SNAKE_CASE = [ "This is a very simple sentence.", "The quick brown fox jumps over the lazy dog.", ] self.tokenizer_integration_test_util( expected_encoding=lowercase_ , model_name="google/reformer-crime-and-punishment" , revision="0e6c3decb8211d49bf881013425dc8b0448b3f5a" , padding=lowercase_ , sequences=lowercase_ , )
418
import dataclasses import re import string from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple import numpy as np from . import residue_constants __SCREAMING_SNAKE_CASE : List[str] = Mapping[str, np.ndarray] __SCREAMING_SNAKE_CASE : List[Any] = Mapping[str, Any] # Is a nested dict. __SCREAMING_SNAKE_CASE : List[Any] = 0.01 @dataclasses.dataclass(frozen=__snake_case ) class lowercase_ : _lowerCamelCase = 42 # [num_res, num_atom_type, 3] # Amino-acid type for each residue represented as an integer between 0 and # 20, where 20 is 'X'. _lowerCamelCase = 42 # [num_res] # Binary float mask to indicate presence of a particular atom. 1.0 if an atom # is present and 0.0 if not. This should be used for loss masking. _lowerCamelCase = 42 # [num_res, num_atom_type] # Residue index as used in PDB. It is not necessarily continuous or 0-indexed. _lowerCamelCase = 42 # [num_res] # B-factors, or temperature factors, of each residue (in sq. angstroms units), # representing the displacement of the residue from its ground truth mean # value. _lowerCamelCase = 42 # [num_res, num_atom_type] # Chain indices for multi-chain predictions _lowerCamelCase = None # Optional remark about the protein. Included as a comment in output PDB # files _lowerCamelCase = None # Templates used to generate this protein (prediction-only) _lowerCamelCase = None # Chain corresponding to each parent _lowerCamelCase = None def snake_case (__lowercase ) -> Protein: '''simple docstring''' _snake_case : str = r"(\[[A-Z]+\]\n)" _snake_case : List[str] = [tag.strip() for tag in re.split(__lowercase , __lowercase ) if len(__lowercase ) > 0] _snake_case : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split("\n" ) for l in tags[1::2]] ) _snake_case : List[str] = ["N", "CA", "C"] _snake_case : Any = None _snake_case : Union[str, Any] = None _snake_case : Optional[int] = None for g in groups: if "[PRIMARY]" == g[0]: _snake_case : Tuple = g[1][0].strip() for i in range(len(__lowercase ) ): if seq[i] not in residue_constants.restypes: _snake_case : Tuple = "X" # FIXME: strings are immutable _snake_case : int = np.array( [residue_constants.restype_order.get(__lowercase , residue_constants.restype_num ) for res_symbol in seq] ) elif "[TERTIARY]" == g[0]: _snake_case : List[List[float]] = [] for axis in range(3 ): tertiary.append(list(map(__lowercase , g[1][axis].split() ) ) ) _snake_case : Dict = np.array(__lowercase ) _snake_case : Dict = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa ) for i, atom in enumerate(__lowercase ): _snake_case : List[Any] = np.transpose(tertiary_np[:, i::3] ) atom_positions *= PICO_TO_ANGSTROM elif "[MASK]" == g[0]: _snake_case : int = np.array(list(map({"-": 0, "+": 1}.get , g[1][0].strip() ) ) ) _snake_case : Any = np.zeros( ( len(__lowercase ), residue_constants.atom_type_num, ) ).astype(np.floataa ) for i, atom in enumerate(__lowercase ): _snake_case : Dict = 1 atom_mask *= mask[..., None] assert aatype is not None return Protein( atom_positions=__lowercase , atom_mask=__lowercase , aatype=__lowercase , residue_index=np.arange(len(__lowercase ) ) , b_factors=__lowercase , ) def snake_case (__lowercase , __lowercase = 0 ) -> List[str]: '''simple docstring''' _snake_case : List[str] = [] _snake_case : Optional[Any] = prot.remark if remark is not None: pdb_headers.append(F"""REMARK {remark}""" ) _snake_case : str = prot.parents _snake_case : str = prot.parents_chain_index if parents is not None and parents_chain_index is not None: _snake_case : int = [p for i, p in zip(__lowercase , __lowercase ) if i == chain_id] if parents is None or len(__lowercase ) == 0: _snake_case : Optional[int] = ["N/A"] pdb_headers.append(F"""PARENT {' '.join(__lowercase )}""" ) return pdb_headers def snake_case (__lowercase , __lowercase ) -> str: '''simple docstring''' _snake_case : List[str] = [] _snake_case : Optional[int] = pdb_str.split("\n" ) _snake_case : List[str] = prot.remark if remark is not None: out_pdb_lines.append(F"""REMARK {remark}""" ) _snake_case : List[List[str]] if prot.parents is not None and len(prot.parents ) > 0: _snake_case : str = [] if prot.parents_chain_index is not None: _snake_case : Dict[str, List[str]] = {} for p, i in zip(prot.parents , prot.parents_chain_index ): parent_dict.setdefault(str(__lowercase ) , [] ) parent_dict[str(__lowercase )].append(__lowercase ) _snake_case : Any = max([int(__lowercase ) for chain_idx in parent_dict] ) for i in range(max_idx + 1 ): _snake_case : Tuple = parent_dict.get(str(__lowercase ) , ["N/A"] ) parents_per_chain.append(__lowercase ) else: parents_per_chain.append(list(prot.parents ) ) else: _snake_case : List[str] = [["N/A"]] def make_parent_line(__lowercase ) -> str: return F"""PARENT {' '.join(__lowercase )}""" out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) ) _snake_case : int = 0 for i, l in enumerate(__lowercase ): if "PARENT" not in l and "REMARK" not in l: out_pdb_lines.append(__lowercase ) if "TER" in l and "END" not in lines[i + 1]: chain_counter += 1 if not chain_counter >= len(__lowercase ): _snake_case : Tuple = parents_per_chain[chain_counter] else: _snake_case : str = ["N/A"] out_pdb_lines.append(make_parent_line(__lowercase ) ) return "\n".join(__lowercase ) def snake_case (__lowercase ) -> str: '''simple docstring''' _snake_case : Optional[Any] = residue_constants.restypes + ["X"] def res_atoa(__lowercase ) -> str: return residue_constants.restype_atoa.get(restypes[r] , "UNK" ) _snake_case : Optional[int] = residue_constants.atom_types _snake_case : List[str] = [] _snake_case : Tuple = prot.atom_mask _snake_case : List[str] = prot.aatype _snake_case : int = prot.atom_positions _snake_case : int = prot.residue_index.astype(np.intaa ) _snake_case : List[Any] = prot.b_factors _snake_case : str = prot.chain_index if np.any(aatype > residue_constants.restype_num ): raise ValueError("Invalid aatypes." ) _snake_case : Union[str, Any] = get_pdb_headers(__lowercase ) if len(__lowercase ) > 0: pdb_lines.extend(__lowercase ) _snake_case : Optional[Any] = aatype.shape[0] _snake_case : str = 1 _snake_case : Tuple = 0 _snake_case : int = string.ascii_uppercase _snake_case : Optional[Any] = None # Add all atom sites. for i in range(__lowercase ): _snake_case : Dict = res_atoa(aatype[i] ) for atom_name, pos, mask, b_factor in zip(__lowercase , atom_positions[i] , atom_mask[i] , b_factors[i] ): if mask < 0.5: continue _snake_case : List[Any] = "ATOM" _snake_case : Union[str, Any] = atom_name if len(__lowercase ) == 4 else F""" {atom_name}""" _snake_case : str = "" _snake_case : str = "" _snake_case : Any = 1.00 _snake_case : str = atom_name[0] # Protein supports only C, N, O, S, this works. _snake_case : Dict = "" _snake_case : Any = "A" if chain_index is not None: _snake_case : List[Any] = chain_tags[chain_index[i]] # PDB is a columnar format, every space matters here! _snake_case : Optional[int] = ( F"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}""" F"""{res_name_a:>3} {chain_tag:>1}""" F"""{residue_index[i]:>4}{insertion_code:>1} """ F"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}""" F"""{occupancy:>6.2f}{b_factor:>6.2f} """ F"""{element:>2}{charge:>2}""" ) pdb_lines.append(__lowercase ) atom_index += 1 _snake_case : Dict = i == n - 1 if chain_index is not None: if i != n - 1 and chain_index[i + 1] != prev_chain_index: _snake_case : Optional[int] = True _snake_case : Union[str, Any] = chain_index[i + 1] if should_terminate: # Close the chain. _snake_case : List[str] = "TER" _snake_case : str = ( F"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}""" ) pdb_lines.append(__lowercase ) atom_index += 1 if i != n - 1: # "prev" is a misnomer here. This happens at the beginning of # each new chain. pdb_lines.extend(get_pdb_headers(__lowercase , __lowercase ) ) pdb_lines.append("END" ) pdb_lines.append("" ) return "\n".join(__lowercase ) def snake_case (__lowercase ) -> np.ndarray: '''simple docstring''' return residue_constants.STANDARD_ATOM_MASK[prot.aatype] def snake_case (__lowercase , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , ) -> Protein: '''simple docstring''' return Protein( aatype=features["aatype"] , atom_positions=result["final_atom_positions"] , atom_mask=result["final_atom_mask"] , residue_index=features["residue_index"] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["final_atom_mask"] ) , chain_index=__lowercase , remark=__lowercase , parents=__lowercase , parents_chain_index=__lowercase , )
670
0
import unittest import numpy as np import torch from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class lowercase ( unittest.TestCase): """simple docstring""" @property def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]: torch.manual_seed(0 ) UpperCAmelCase_= UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , ) return model def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple: UpperCAmelCase_= self.dummy_uncond_unet UpperCAmelCase_= PNDMScheduler() UpperCAmelCase_= PNDMPipeline(unet=lowercase_ , scheduler=lowercase_ ) pndm.to(lowercase_ ) pndm.set_progress_bar_config(disable=lowercase_ ) UpperCAmelCase_= torch.manual_seed(0 ) UpperCAmelCase_= pndm(generator=lowercase_ , num_inference_steps=20 , output_type="""numpy""" ).images UpperCAmelCase_= torch.manual_seed(0 ) UpperCAmelCase_= pndm(generator=lowercase_ , num_inference_steps=20 , output_type="""numpy""" , return_dict=lowercase_ )[0] UpperCAmelCase_= image[0, -3:, -3:, -1] UpperCAmelCase_= image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) UpperCAmelCase_= np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class lowercase ( unittest.TestCase): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int: UpperCAmelCase_= "google/ddpm-cifar10-32" UpperCAmelCase_= UNetaDModel.from_pretrained(lowercase_ ) UpperCAmelCase_= PNDMScheduler() UpperCAmelCase_= PNDMPipeline(unet=lowercase_ , scheduler=lowercase_ ) pndm.to(lowercase_ ) pndm.set_progress_bar_config(disable=lowercase_ ) UpperCAmelCase_= torch.manual_seed(0 ) UpperCAmelCase_= pndm(generator=lowercase_ , output_type="""numpy""" ).images UpperCAmelCase_= image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) UpperCAmelCase_= np.array([0.1_564, 0.14_645, 0.1_406, 0.14_715, 0.12_425, 0.14_045, 0.13_115, 0.12_175, 0.125] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
593
from copy import deepcopy from typing import Optional, Union import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, is_tf_available, is_torch_available if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf class lowercase_ ( __snake_case ): _lowerCamelCase = ['image_processor'] _lowerCamelCase = 'SamImageProcessor' def __init__( self , lowercase_ ): super().__init__(lowercase_ ) _snake_case : Optional[Any] = self.image_processor _snake_case : Tuple = -10 _snake_case : str = self.image_processor.size["longest_edge"] def __call__( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_ = None , **lowercase_ , ): _snake_case : List[Any] = self.image_processor( lowercase_ , return_tensors=lowercase_ , **lowercase_ , ) # pop arguments that are not used in the foward but used nevertheless _snake_case : Any = encoding_image_processor["original_sizes"] if hasattr(lowercase_ , "numpy" ): # Checks if Torch or TF tensor _snake_case : int = original_sizes.numpy() _snake_case ,_snake_case ,_snake_case : Union[str, Any] = self._check_and_preprocess_points( input_points=lowercase_ , input_labels=lowercase_ , input_boxes=lowercase_ , ) _snake_case : Dict = self._normalize_and_convert( lowercase_ , lowercase_ , input_points=lowercase_ , input_labels=lowercase_ , input_boxes=lowercase_ , return_tensors=lowercase_ , ) return encoding_image_processor def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_="pt" , ): if input_points is not None: if len(lowercase_ ) != len(lowercase_ ): _snake_case : int = [ self._normalize_coordinates(self.target_size , lowercase_ , original_sizes[0] ) for point in input_points ] else: _snake_case : Dict = [ self._normalize_coordinates(self.target_size , lowercase_ , lowercase_ ) for point, original_size in zip(lowercase_ , lowercase_ ) ] # check that all arrays have the same shape if not all(point.shape == input_points[0].shape for point in input_points ): if input_labels is not None: _snake_case ,_snake_case : int = self._pad_points_and_labels(lowercase_ , lowercase_ ) _snake_case : Any = np.array(lowercase_ ) if input_labels is not None: _snake_case : Optional[Any] = np.array(lowercase_ ) if input_boxes is not None: if len(lowercase_ ) != len(lowercase_ ): _snake_case : Optional[Any] = [ self._normalize_coordinates(self.target_size , lowercase_ , original_sizes[0] , is_bounding_box=lowercase_ ) for box in input_boxes ] else: _snake_case : List[str] = [ self._normalize_coordinates(self.target_size , lowercase_ , lowercase_ , is_bounding_box=lowercase_ ) for box, original_size in zip(lowercase_ , lowercase_ ) ] _snake_case : Tuple = np.array(lowercase_ ) if input_boxes is not None: if return_tensors == "pt": _snake_case : List[str] = torch.from_numpy(lowercase_ ) # boxes batch size of 1 by default _snake_case : Optional[Any] = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes elif return_tensors == "tf": _snake_case : List[str] = tf.convert_to_tensor(lowercase_ ) # boxes batch size of 1 by default _snake_case : Optional[int] = tf.expand_dims(lowercase_ , 1 ) if len(input_boxes.shape ) != 3 else input_boxes encoding_image_processor.update({"input_boxes": input_boxes} ) if input_points is not None: if return_tensors == "pt": _snake_case : Tuple = torch.from_numpy(lowercase_ ) # point batch size of 1 by default _snake_case : int = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points elif return_tensors == "tf": _snake_case : List[str] = tf.convert_to_tensor(lowercase_ ) # point batch size of 1 by default _snake_case : Tuple = tf.expand_dims(lowercase_ , 1 ) if len(input_points.shape ) != 4 else input_points encoding_image_processor.update({"input_points": input_points} ) if input_labels is not None: if return_tensors == "pt": _snake_case : Dict = torch.from_numpy(lowercase_ ) # point batch size of 1 by default _snake_case : str = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels elif return_tensors == "tf": _snake_case : Optional[Any] = tf.convert_to_tensor(lowercase_ ) # point batch size of 1 by default _snake_case : List[Any] = tf.expand_dims(lowercase_ , 1 ) if len(input_labels.shape ) != 3 else input_labels encoding_image_processor.update({"input_labels": input_labels} ) return encoding_image_processor def UpperCamelCase ( self , lowercase_ , lowercase_ ): _snake_case : List[Any] = max([point.shape[0] for point in input_points] ) _snake_case : List[str] = [] for i, point in enumerate(lowercase_ ): if point.shape[0] != expected_nb_points: _snake_case : Optional[Any] = np.concatenate( [point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 ) _snake_case : Union[str, Any] = np.append(input_labels[i] , [self.point_pad_value] ) processed_input_points.append(lowercase_ ) _snake_case : Optional[Any] = processed_input_points return input_points, input_labels def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=False ): _snake_case ,_snake_case : Optional[int] = original_size _snake_case ,_snake_case : List[str] = self.image_processor._get_preprocess_shape(lowercase_ , longest_edge=lowercase_ ) _snake_case : Optional[Any] = deepcopy(lowercase_ ).astype(lowercase_ ) if is_bounding_box: _snake_case : str = coords.reshape(-1 , 2 , 2 ) _snake_case : Optional[Any] = coords[..., 0] * (new_w / old_w) _snake_case : Dict = coords[..., 1] * (new_h / old_h) if is_bounding_box: _snake_case : Optional[Any] = coords.reshape(-1 , 4 ) return coords def UpperCamelCase ( self , lowercase_=None , lowercase_=None , lowercase_=None , ): if input_points is not None: if hasattr(lowercase_ , "numpy" ): # Checks for TF or Torch tensor _snake_case : Union[str, Any] = input_points.numpy().tolist() if not isinstance(lowercase_ , lowercase_ ) or not isinstance(input_points[0] , lowercase_ ): raise ValueError("Input points must be a list of list of floating points." ) _snake_case : Any = [np.array(lowercase_ ) for input_point in input_points] else: _snake_case : Optional[int] = None if input_labels is not None: if hasattr(lowercase_ , "numpy" ): _snake_case : Tuple = input_labels.numpy().tolist() if not isinstance(lowercase_ , lowercase_ ) or not isinstance(input_labels[0] , lowercase_ ): raise ValueError("Input labels must be a list of list integers." ) _snake_case : Tuple = [np.array(lowercase_ ) for label in input_labels] else: _snake_case : Optional[Any] = None if input_boxes is not None: if hasattr(lowercase_ , "numpy" ): _snake_case : List[str] = input_boxes.numpy().tolist() if ( not isinstance(lowercase_ , lowercase_ ) or not isinstance(input_boxes[0] , lowercase_ ) or not isinstance(input_boxes[0][0] , lowercase_ ) ): raise ValueError("Input boxes must be a list of list of list of floating points." ) _snake_case : List[Any] = [np.array(lowercase_ ).astype(np.floataa ) for box in input_boxes] else: _snake_case : Optional[int] = None return input_points, input_labels, input_boxes @property def UpperCamelCase ( self ): _snake_case : Union[str, Any] = self.image_processor.model_input_names return list(dict.fromkeys(lowercase_ ) ) def UpperCamelCase ( self , *lowercase_ , **lowercase_ ): return self.image_processor.post_process_masks(*lowercase_ , **lowercase_ )
670
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) A_ = { 'configuration_roberta_prelayernorm': [ 'ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaPreLayerNormConfig', 'RobertaPreLayerNormOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = [ 'ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST', 'RobertaPreLayerNormForCausalLM', 'RobertaPreLayerNormForMaskedLM', 'RobertaPreLayerNormForMultipleChoice', 'RobertaPreLayerNormForQuestionAnswering', 'RobertaPreLayerNormForSequenceClassification', 'RobertaPreLayerNormForTokenClassification', 'RobertaPreLayerNormModel', 'RobertaPreLayerNormPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = [ 'TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFRobertaPreLayerNormForCausalLM', 'TFRobertaPreLayerNormForMaskedLM', 'TFRobertaPreLayerNormForMultipleChoice', 'TFRobertaPreLayerNormForQuestionAnswering', 'TFRobertaPreLayerNormForSequenceClassification', 'TFRobertaPreLayerNormForTokenClassification', 'TFRobertaPreLayerNormMainLayer', 'TFRobertaPreLayerNormModel', 'TFRobertaPreLayerNormPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = [ 'FlaxRobertaPreLayerNormForCausalLM', 'FlaxRobertaPreLayerNormForMaskedLM', 'FlaxRobertaPreLayerNormForMultipleChoice', 'FlaxRobertaPreLayerNormForQuestionAnswering', 'FlaxRobertaPreLayerNormForSequenceClassification', 'FlaxRobertaPreLayerNormForTokenClassification', 'FlaxRobertaPreLayerNormModel', 'FlaxRobertaPreLayerNormPreTrainedModel', ] if TYPE_CHECKING: from .configuration_roberta_prelayernorm import ( ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaPreLayerNormConfig, RobertaPreLayerNormOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta_prelayernorm import ( ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaPreLayerNormForCausalLM, RobertaPreLayerNormForMaskedLM, RobertaPreLayerNormForMultipleChoice, RobertaPreLayerNormForQuestionAnswering, RobertaPreLayerNormForSequenceClassification, RobertaPreLayerNormForTokenClassification, RobertaPreLayerNormModel, RobertaPreLayerNormPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta_prelayernorm import ( TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaPreLayerNormForCausalLM, TFRobertaPreLayerNormForMaskedLM, TFRobertaPreLayerNormForMultipleChoice, TFRobertaPreLayerNormForQuestionAnswering, TFRobertaPreLayerNormForSequenceClassification, TFRobertaPreLayerNormForTokenClassification, TFRobertaPreLayerNormMainLayer, TFRobertaPreLayerNormModel, TFRobertaPreLayerNormPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormPreTrainedModel, ) else: import sys A_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
393
def snake_case (__lowercase ) -> int: '''simple docstring''' if not grid or not grid[0]: raise TypeError("The grid does not contain the appropriate information" ) for cell_n in range(1 , len(grid[0] ) ): grid[0][cell_n] += grid[0][cell_n - 1] _snake_case : Union[str, Any] = grid[0] for row_n in range(1 , len(__lowercase ) ): _snake_case : Union[str, Any] = grid[row_n] _snake_case : List[Any] = fill_row(__lowercase , __lowercase ) _snake_case : List[Any] = grid[row_n] return grid[-1][-1] def snake_case (__lowercase , __lowercase ) -> list: '''simple docstring''' current_row[0] += row_above[0] for cell_n in range(1 , len(__lowercase ) ): current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] ) return current_row if __name__ == "__main__": import doctest doctest.testmod()
670
0
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import ( DiffusionPipeline, UnCLIPImageVariationPipeline, UnCLIPScheduler, UNetaDConditionModel, UNetaDModel, ) from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel from diffusers.utils import floats_tensor, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class UpperCAmelCase ( __snake_case , unittest.TestCase ): _A : int = UnCLIPImageVariationPipeline _A : Tuple = IMAGE_VARIATION_PARAMS - {"""height""", """width""", """guidance_scale"""} _A : Optional[int] = IMAGE_VARIATION_BATCH_PARAMS _A : Any = [ """generator""", """return_dict""", """decoder_num_inference_steps""", """super_res_num_inference_steps""", ] _A : str = False @property def __lowerCamelCase ( self ): return 32 @property def __lowerCamelCase ( self ): return 32 @property def __lowerCamelCase ( self ): return self.time_input_dim @property def __lowerCamelCase ( self ): return self.time_input_dim * 4 @property def __lowerCamelCase ( self ): return 100 @property def __lowerCamelCase ( self ): __UpperCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) return tokenizer @property def __lowerCamelCase ( self ): torch.manual_seed(0 ) __UpperCAmelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) return CLIPTextModelWithProjection(lowercase_ ) @property def __lowerCamelCase ( self ): torch.manual_seed(0 ) __UpperCAmelCase = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) return CLIPVisionModelWithProjection(lowercase_ ) @property def __lowerCamelCase ( self ): torch.manual_seed(0 ) __UpperCAmelCase = { "clip_embeddings_dim": self.text_embedder_hidden_size, "time_embed_dim": self.time_embed_dim, "cross_attention_dim": self.cross_attention_dim, } __UpperCAmelCase = UnCLIPTextProjModel(**lowercase_ ) return model @property def __lowerCamelCase ( self ): torch.manual_seed(0 ) __UpperCAmelCase = { "sample_size": 32, # RGB in channels "in_channels": 3, # Out channels is double in channels because predicts mean and variance "out_channels": 6, "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": "identity", } __UpperCAmelCase = UNetaDConditionModel(**lowercase_ ) return model @property def __lowerCamelCase ( self ): return { "sample_size": 64, "layers_per_block": 1, "down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"), "up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"), "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "in_channels": 6, "out_channels": 3, } @property def __lowerCamelCase ( self ): torch.manual_seed(0 ) __UpperCAmelCase = UNetaDModel(**self.dummy_super_res_kwargs ) return model @property def __lowerCamelCase ( self ): # seeded differently to get different unet than `self.dummy_super_res_first` torch.manual_seed(1 ) __UpperCAmelCase = UNetaDModel(**self.dummy_super_res_kwargs ) return model def __lowerCamelCase ( self ): __UpperCAmelCase = self.dummy_decoder __UpperCAmelCase = self.dummy_text_proj __UpperCAmelCase = self.dummy_text_encoder __UpperCAmelCase = self.dummy_tokenizer __UpperCAmelCase = self.dummy_super_res_first __UpperCAmelCase = self.dummy_super_res_last __UpperCAmelCase = UnCLIPScheduler( variance_type='learned_range' , prediction_type='epsilon' , num_train_timesteps=1_000 , ) __UpperCAmelCase = UnCLIPScheduler( variance_type='fixed_small_log' , prediction_type='epsilon' , num_train_timesteps=1_000 , ) __UpperCAmelCase = CLIPImageProcessor(crop_size=32 , size=32 ) __UpperCAmelCase = self.dummy_image_encoder return { "decoder": decoder, "text_encoder": text_encoder, "tokenizer": tokenizer, "text_proj": text_proj, "feature_extractor": feature_extractor, "image_encoder": image_encoder, "super_res_first": super_res_first, "super_res_last": super_res_last, "decoder_scheduler": decoder_scheduler, "super_res_scheduler": super_res_scheduler, } def __lowerCamelCase ( self , __A , __A=0 , __A=True ): __UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ ) if str(lowercase_ ).startswith('mps' ): __UpperCAmelCase = torch.manual_seed(lowercase_ ) else: __UpperCAmelCase = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ ) if pil_image: __UpperCAmelCase = input_image * 0.5 + 0.5 __UpperCAmelCase = input_image.clamp(0 , 1 ) __UpperCAmelCase = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() __UpperCAmelCase = DiffusionPipeline.numpy_to_pil(lowercase_ )[0] return { "image": input_image, "generator": generator, "decoder_num_inference_steps": 2, "super_res_num_inference_steps": 2, "output_type": "np", } def __lowerCamelCase ( self ): __UpperCAmelCase = "cpu" __UpperCAmelCase = self.get_dummy_components() __UpperCAmelCase = self.pipeline_class(**lowercase_ ) __UpperCAmelCase = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) __UpperCAmelCase = self.get_dummy_inputs(lowercase_ , pil_image=lowercase_ ) __UpperCAmelCase = pipe(**lowercase_ ) __UpperCAmelCase = output.images __UpperCAmelCase = self.get_dummy_inputs(lowercase_ , pil_image=lowercase_ ) __UpperCAmelCase = pipe( **lowercase_ , return_dict=lowercase_ , )[0] __UpperCAmelCase = image[0, -3:, -3:, -1] __UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __UpperCAmelCase = np.array( [ 0.9_9_9_7, 0.0_0_0_2, 0.9_9_9_7, 0.9_9_9_7, 0.9_9_6_9, 0.0_0_2_3, 0.9_9_9_7, 0.9_9_6_9, 0.9_9_7_0, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def __lowerCamelCase ( self ): __UpperCAmelCase = "cpu" __UpperCAmelCase = self.get_dummy_components() __UpperCAmelCase = self.pipeline_class(**lowercase_ ) __UpperCAmelCase = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) __UpperCAmelCase = self.get_dummy_inputs(lowercase_ , pil_image=lowercase_ ) __UpperCAmelCase = pipe(**lowercase_ ) __UpperCAmelCase = output.images __UpperCAmelCase = self.get_dummy_inputs(lowercase_ , pil_image=lowercase_ ) __UpperCAmelCase = pipe( **lowercase_ , return_dict=lowercase_ , )[0] __UpperCAmelCase = image[0, -3:, -3:, -1] __UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __UpperCAmelCase = np.array([0.9_9_9_7, 0.0_0_0_3, 0.9_9_9_7, 0.9_9_9_7, 0.9_9_7_0, 0.0_0_2_4, 0.9_9_9_7, 0.9_9_7_1, 0.9_9_7_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def __lowerCamelCase ( self ): __UpperCAmelCase = "cpu" __UpperCAmelCase = self.get_dummy_components() __UpperCAmelCase = self.pipeline_class(**lowercase_ ) __UpperCAmelCase = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) __UpperCAmelCase = self.get_dummy_inputs(lowercase_ , pil_image=lowercase_ ) __UpperCAmelCase = [ pipeline_inputs["image"], pipeline_inputs["image"], ] __UpperCAmelCase = pipe(**lowercase_ ) __UpperCAmelCase = output.images __UpperCAmelCase = self.get_dummy_inputs(lowercase_ , pil_image=lowercase_ ) __UpperCAmelCase = [ tuple_pipeline_inputs["image"], tuple_pipeline_inputs["image"], ] __UpperCAmelCase = pipe( **lowercase_ , return_dict=lowercase_ , )[0] __UpperCAmelCase = image[0, -3:, -3:, -1] __UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (2, 64, 64, 3) __UpperCAmelCase = np.array( [ 0.9_9_9_7, 0.9_9_8_9, 0.0_0_0_8, 0.0_0_2_1, 0.9_9_6_0, 0.0_0_1_8, 0.0_0_1_4, 0.0_0_0_2, 0.9_9_3_3, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def __lowerCamelCase ( self ): __UpperCAmelCase = torch.device('cpu' ) class UpperCAmelCase : _A : Optional[Any] = 1 __UpperCAmelCase = self.get_dummy_components() __UpperCAmelCase = self.pipeline_class(**lowercase_ ) __UpperCAmelCase = pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) __UpperCAmelCase = torch.Generator(device=lowercase_ ).manual_seed(0 ) __UpperCAmelCase = pipe.decoder.dtype __UpperCAmelCase = 1 __UpperCAmelCase = ( batch_size, pipe.decoder.config.in_channels, pipe.decoder.config.sample_size, pipe.decoder.config.sample_size, ) __UpperCAmelCase = pipe.prepare_latents( lowercase_ , dtype=lowercase_ , device=lowercase_ , generator=lowercase_ , latents=lowercase_ , scheduler=DummyScheduler() ) __UpperCAmelCase = ( batch_size, pipe.super_res_first.config.in_channels // 2, pipe.super_res_first.config.sample_size, pipe.super_res_first.config.sample_size, ) __UpperCAmelCase = pipe.prepare_latents( lowercase_ , dtype=lowercase_ , device=lowercase_ , generator=lowercase_ , latents=lowercase_ , scheduler=DummyScheduler() ) __UpperCAmelCase = self.get_dummy_inputs(lowercase_ , pil_image=lowercase_ ) __UpperCAmelCase = pipe( **lowercase_ , decoder_latents=lowercase_ , super_res_latents=lowercase_ ).images __UpperCAmelCase = self.get_dummy_inputs(lowercase_ , pil_image=lowercase_ ) # Don't pass image, instead pass embedding __UpperCAmelCase = pipeline_inputs.pop('image' ) __UpperCAmelCase = pipe.image_encoder(lowercase_ ).image_embeds __UpperCAmelCase = pipe( **lowercase_ , decoder_latents=lowercase_ , super_res_latents=lowercase_ , image_embeddings=lowercase_ , ).images # make sure passing text embeddings manually is identical assert np.abs(img_out_a - img_out_a ).max() < 1E-4 @skip_mps def __lowerCamelCase ( self ): __UpperCAmelCase = torch_device == "cpu" # Check is relaxed because there is not a torch 2.0 sliced attention added kv processor __UpperCAmelCase = 1E-2 self._test_attention_slicing_forward_pass( test_max_difference=lowercase_ , expected_max_diff=lowercase_ ) @skip_mps def __lowerCamelCase ( self ): __UpperCAmelCase = torch_device == "cpu" __UpperCAmelCase = True __UpperCAmelCase = [ "decoder_num_inference_steps", "super_res_num_inference_steps", ] self._test_inference_batch_single_identical( test_max_difference=lowercase_ , relax_max_difference=lowercase_ , additional_params_copy_to_batched_inputs=lowercase_ , ) def __lowerCamelCase ( self ): __UpperCAmelCase = [ "decoder_num_inference_steps", "super_res_num_inference_steps", ] if torch_device == "mps": # TODO: MPS errors with larger batch sizes __UpperCAmelCase = [2, 3] self._test_inference_batch_consistent( batch_sizes=lowercase_ , additional_params_copy_to_batched_inputs=lowercase_ , ) else: self._test_inference_batch_consistent( additional_params_copy_to_batched_inputs=lowercase_ ) @skip_mps def __lowerCamelCase ( self ): return super().test_dict_tuple_outputs_equivalent() @skip_mps def __lowerCamelCase ( self ): return super().test_save_load_local() @skip_mps def __lowerCamelCase ( self ): return super().test_save_load_optional_components() @slow @require_torch_gpu class UpperCAmelCase ( unittest.TestCase ): def __lowerCamelCase ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCamelCase ( self ): __UpperCAmelCase = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png' ) __UpperCAmelCase = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/unclip/karlo_v1_alpha_cat_variation_fp16.npy' ) __UpperCAmelCase = UnCLIPImageVariationPipeline.from_pretrained( 'kakaobrain/karlo-v1-alpha-image-variations' , torch_dtype=torch.floataa ) __UpperCAmelCase = pipeline.to(lowercase_ ) pipeline.set_progress_bar_config(disable=lowercase_ ) __UpperCAmelCase = torch.Generator(device='cpu' ).manual_seed(0 ) __UpperCAmelCase = pipeline( lowercase_ , generator=lowercase_ , output_type='np' , ) __UpperCAmelCase = output.images[0] assert image.shape == (256, 256, 3) assert_mean_pixel_difference(lowercase_ , lowercase_ , 15 )
126
import random def snake_case (__lowercase , __lowercase ) -> tuple: '''simple docstring''' _snake_case ,_snake_case ,_snake_case : List[Any] = [], [], [] for element in data: if element < pivot: less.append(__lowercase ) elif element > pivot: greater.append(__lowercase ) else: equal.append(__lowercase ) return less, equal, greater def snake_case (__lowercase , __lowercase ) -> List[Any]: '''simple docstring''' if index >= len(__lowercase ) or index < 0: return None _snake_case : Any = items[random.randint(0 , len(__lowercase ) - 1 )] _snake_case : Tuple = 0 _snake_case ,_snake_case ,_snake_case : Tuple = _partition(__lowercase , __lowercase ) _snake_case : Tuple = len(__lowercase ) _snake_case : List[str] = len(__lowercase ) # index is the pivot if m <= index < m + count: return pivot # must be in smaller elif m > index: return quick_select(__lowercase , __lowercase ) # must be in larger else: return quick_select(__lowercase , index - (m + count) )
670
0
"""simple docstring""" import os from math import logaa def __UpperCamelCase ( SCREAMING_SNAKE_CASE = "base_exp.txt" ) -> int: """simple docstring""" __snake_case = 0 __snake_case = 0 for i, line in enumerate(open(os.path.join(os.path.dirname(__lowercase ) , __lowercase ) ) ): __snake_case = list(map(__lowercase , line.split("," ) ) ) if x * logaa(__lowercase ) > largest: __snake_case = x * logaa(__lowercase ) __snake_case = i + 1 return result if __name__ == "__main__": print(solution())
163
from math import pow, sqrt def snake_case (*__lowercase ) -> bool: '''simple docstring''' _snake_case : str = len(__lowercase ) > 0 and all(value > 0.0 for value in values ) return result def snake_case (__lowercase , __lowercase ) -> float | ValueError: '''simple docstring''' return ( round(sqrt(molar_mass_a / molar_mass_a ) , 6 ) if validate(__lowercase , __lowercase ) else ValueError("Input Error: Molar mass values must greater than 0." ) ) def snake_case (__lowercase , __lowercase , __lowercase ) -> float | ValueError: '''simple docstring''' return ( round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 ) if validate(__lowercase , __lowercase , __lowercase ) else ValueError( "Input Error: Molar mass and effusion rate values must greater than 0." ) ) def snake_case (__lowercase , __lowercase , __lowercase ) -> float | ValueError: '''simple docstring''' return ( round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 ) if validate(__lowercase , __lowercase , __lowercase ) else ValueError( "Input Error: Molar mass and effusion rate values must greater than 0." ) ) def snake_case (__lowercase , __lowercase , __lowercase ) -> float | ValueError: '''simple docstring''' return ( round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 ) if validate(__lowercase , __lowercase , __lowercase ) else ValueError( "Input Error: Molar mass and effusion rate values must greater than 0." ) ) def snake_case (__lowercase , __lowercase , __lowercase ) -> float | ValueError: '''simple docstring''' return ( round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 ) if validate(__lowercase , __lowercase , __lowercase ) else ValueError( "Input Error: Molar mass and effusion rate values must greater than 0." ) )
670
0
from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = OrderedDict( [ # Base model mapping ("""albert""", """FlaxAlbertModel"""), ("""bart""", """FlaxBartModel"""), ("""beit""", """FlaxBeitModel"""), ("""bert""", """FlaxBertModel"""), ("""big_bird""", """FlaxBigBirdModel"""), ("""blenderbot""", """FlaxBlenderbotModel"""), ("""blenderbot-small""", """FlaxBlenderbotSmallModel"""), ("""clip""", """FlaxCLIPModel"""), ("""distilbert""", """FlaxDistilBertModel"""), ("""electra""", """FlaxElectraModel"""), ("""gpt-sw3""", """FlaxGPT2Model"""), ("""gpt2""", """FlaxGPT2Model"""), ("""gpt_neo""", """FlaxGPTNeoModel"""), ("""gptj""", """FlaxGPTJModel"""), ("""longt5""", """FlaxLongT5Model"""), ("""marian""", """FlaxMarianModel"""), ("""mbart""", """FlaxMBartModel"""), ("""mt5""", """FlaxMT5Model"""), ("""opt""", """FlaxOPTModel"""), ("""pegasus""", """FlaxPegasusModel"""), ("""regnet""", """FlaxRegNetModel"""), ("""resnet""", """FlaxResNetModel"""), ("""roberta""", """FlaxRobertaModel"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""), ("""roformer""", """FlaxRoFormerModel"""), ("""t5""", """FlaxT5Model"""), ("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""), ("""vit""", """FlaxViTModel"""), ("""wav2vec2""", """FlaxWav2Vec2Model"""), ("""whisper""", """FlaxWhisperModel"""), ("""xglm""", """FlaxXGLMModel"""), ("""xlm-roberta""", """FlaxXLMRobertaModel"""), ] ) _lowerCAmelCase = OrderedDict( [ # Model for pre-training mapping ("""albert""", """FlaxAlbertForPreTraining"""), ("""bart""", """FlaxBartForConditionalGeneration"""), ("""bert""", """FlaxBertForPreTraining"""), ("""big_bird""", """FlaxBigBirdForPreTraining"""), ("""electra""", """FlaxElectraForPreTraining"""), ("""longt5""", """FlaxLongT5ForConditionalGeneration"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""mt5""", """FlaxMT5ForConditionalGeneration"""), ("""roberta""", """FlaxRobertaForMaskedLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""), ("""roformer""", """FlaxRoFormerForMaskedLM"""), ("""t5""", """FlaxT5ForConditionalGeneration"""), ("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""), ("""whisper""", """FlaxWhisperForConditionalGeneration"""), ("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""), ] ) _lowerCAmelCase = OrderedDict( [ # Model for Masked LM mapping ("""albert""", """FlaxAlbertForMaskedLM"""), ("""bart""", """FlaxBartForConditionalGeneration"""), ("""bert""", """FlaxBertForMaskedLM"""), ("""big_bird""", """FlaxBigBirdForMaskedLM"""), ("""distilbert""", """FlaxDistilBertForMaskedLM"""), ("""electra""", """FlaxElectraForMaskedLM"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""roberta""", """FlaxRobertaForMaskedLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""), ("""roformer""", """FlaxRoFormerForMaskedLM"""), ("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""), ] ) _lowerCAmelCase = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ("""bart""", """FlaxBartForConditionalGeneration"""), ("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""), ("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""), ("""encoder-decoder""", """FlaxEncoderDecoderModel"""), ("""longt5""", """FlaxLongT5ForConditionalGeneration"""), ("""marian""", """FlaxMarianMTModel"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""mt5""", """FlaxMT5ForConditionalGeneration"""), ("""pegasus""", """FlaxPegasusForConditionalGeneration"""), ("""t5""", """FlaxT5ForConditionalGeneration"""), ] ) _lowerCAmelCase = OrderedDict( [ # Model for Image-classsification ("""beit""", """FlaxBeitForImageClassification"""), ("""regnet""", """FlaxRegNetForImageClassification"""), ("""resnet""", """FlaxResNetForImageClassification"""), ("""vit""", """FlaxViTForImageClassification"""), ] ) _lowerCAmelCase = OrderedDict( [ ("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""), ] ) _lowerCAmelCase = OrderedDict( [ # Model for Causal LM mapping ("""bart""", """FlaxBartForCausalLM"""), ("""bert""", """FlaxBertForCausalLM"""), ("""big_bird""", """FlaxBigBirdForCausalLM"""), ("""electra""", """FlaxElectraForCausalLM"""), ("""gpt-sw3""", """FlaxGPT2LMHeadModel"""), ("""gpt2""", """FlaxGPT2LMHeadModel"""), ("""gpt_neo""", """FlaxGPTNeoForCausalLM"""), ("""gptj""", """FlaxGPTJForCausalLM"""), ("""opt""", """FlaxOPTForCausalLM"""), ("""roberta""", """FlaxRobertaForCausalLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""), ("""xglm""", """FlaxXGLMForCausalLM"""), ("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""), ] ) _lowerCAmelCase = OrderedDict( [ # Model for Sequence Classification mapping ("""albert""", """FlaxAlbertForSequenceClassification"""), ("""bart""", """FlaxBartForSequenceClassification"""), ("""bert""", """FlaxBertForSequenceClassification"""), ("""big_bird""", """FlaxBigBirdForSequenceClassification"""), ("""distilbert""", """FlaxDistilBertForSequenceClassification"""), ("""electra""", """FlaxElectraForSequenceClassification"""), ("""mbart""", """FlaxMBartForSequenceClassification"""), ("""roberta""", """FlaxRobertaForSequenceClassification"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""), ("""roformer""", """FlaxRoFormerForSequenceClassification"""), ("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""), ] ) _lowerCAmelCase = OrderedDict( [ # Model for Question Answering mapping ("""albert""", """FlaxAlbertForQuestionAnswering"""), ("""bart""", """FlaxBartForQuestionAnswering"""), ("""bert""", """FlaxBertForQuestionAnswering"""), ("""big_bird""", """FlaxBigBirdForQuestionAnswering"""), ("""distilbert""", """FlaxDistilBertForQuestionAnswering"""), ("""electra""", """FlaxElectraForQuestionAnswering"""), ("""mbart""", """FlaxMBartForQuestionAnswering"""), ("""roberta""", """FlaxRobertaForQuestionAnswering"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""), ("""roformer""", """FlaxRoFormerForQuestionAnswering"""), ("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""), ] ) _lowerCAmelCase = OrderedDict( [ # Model for Token Classification mapping ("""albert""", """FlaxAlbertForTokenClassification"""), ("""bert""", """FlaxBertForTokenClassification"""), ("""big_bird""", """FlaxBigBirdForTokenClassification"""), ("""distilbert""", """FlaxDistilBertForTokenClassification"""), ("""electra""", """FlaxElectraForTokenClassification"""), ("""roberta""", """FlaxRobertaForTokenClassification"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""), ("""roformer""", """FlaxRoFormerForTokenClassification"""), ("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""), ] ) _lowerCAmelCase = OrderedDict( [ # Model for Multiple Choice mapping ("""albert""", """FlaxAlbertForMultipleChoice"""), ("""bert""", """FlaxBertForMultipleChoice"""), ("""big_bird""", """FlaxBigBirdForMultipleChoice"""), ("""distilbert""", """FlaxDistilBertForMultipleChoice"""), ("""electra""", """FlaxElectraForMultipleChoice"""), ("""roberta""", """FlaxRobertaForMultipleChoice"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""), ("""roformer""", """FlaxRoFormerForMultipleChoice"""), ("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""), ] ) _lowerCAmelCase = OrderedDict( [ ("""bert""", """FlaxBertForNextSentencePrediction"""), ] ) _lowerCAmelCase = OrderedDict( [ ("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""), ("""whisper""", """FlaxWhisperForConditionalGeneration"""), ] ) _lowerCAmelCase = OrderedDict( [ ("""whisper""", """FlaxWhisperForAudioClassification"""), ] ) _lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) _lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) _lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) _lowerCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) _lowerCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) _lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) _lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) _lowerCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) _lowerCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) _lowerCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) _lowerCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) _lowerCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) _lowerCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) _lowerCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class _UpperCAmelCase ( _BaseAutoModelClass ): a = FLAX_MODEL_MAPPING _lowerCAmelCase = auto_class_update(FlaxAutoModel) class _UpperCAmelCase ( _BaseAutoModelClass ): a = FLAX_MODEL_FOR_PRETRAINING_MAPPING _lowerCAmelCase = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""") class _UpperCAmelCase ( _BaseAutoModelClass ): a = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING _lowerCAmelCase = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""") class _UpperCAmelCase ( _BaseAutoModelClass ): a = FLAX_MODEL_FOR_MASKED_LM_MAPPING _lowerCAmelCase = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""") class _UpperCAmelCase ( _BaseAutoModelClass ): a = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING _lowerCAmelCase = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base""" ) class _UpperCAmelCase ( _BaseAutoModelClass ): a = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING _lowerCAmelCase = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc="""sequence classification""" ) class _UpperCAmelCase ( _BaseAutoModelClass ): a = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING _lowerCAmelCase = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""") class _UpperCAmelCase ( _BaseAutoModelClass ): a = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING _lowerCAmelCase = auto_class_update( FlaxAutoModelForTokenClassification, head_doc="""token classification""" ) class _UpperCAmelCase ( _BaseAutoModelClass ): a = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING _lowerCAmelCase = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""") class _UpperCAmelCase ( _BaseAutoModelClass ): a = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING _lowerCAmelCase = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction""" ) class _UpperCAmelCase ( _BaseAutoModelClass ): a = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING _lowerCAmelCase = auto_class_update( FlaxAutoModelForImageClassification, head_doc="""image classification""" ) class _UpperCAmelCase ( _BaseAutoModelClass ): a = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING _lowerCAmelCase = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""") class _UpperCAmelCase ( _BaseAutoModelClass ): a = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING _lowerCAmelCase = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling""" )
569
import warnings from ...utils import logging from .image_processing_layoutlmva import LayoutLMvaImageProcessor __SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__) class lowercase_ ( __snake_case ): def __init__( self , *lowercase_ , **lowercase_ ): warnings.warn( "The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use LayoutLMv2ImageProcessor instead." , lowercase_ , ) super().__init__(*lowercase_ , **lowercase_ )
670
0
import math import os import re import sys import unittest from pathlib import Path from typing import Tuple from unittest.mock import patch from parameterized import parameterized from transformers.testing_utils import ( CaptureStderr, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, get_torch_dist_unique_port, require_apex, require_bitsandbytes, require_fairscale, require_torch, require_torch_gpu, require_torch_multi_gpu, require_torch_non_multi_gpu, slow, ) from transformers.trainer_callback import TrainerState from transformers.trainer_utils import set_seed A_ : int =os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(F'''{bindir}/../../examples/pytorch/translation'''): from run_translation import main # noqa set_seed(42) A_ : Optional[int] ='sshleifer/student_marian_en_ro_6_1' A_ : int ='sshleifer/tiny-mbart' @require_torch class lowercase_ ( __snake_case): """simple docstring""" def lowercase__ ( self , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , ): """simple docstring""" a_ = self.run_trainer( eval_steps=1 , max_len=12 , model_name=lowercase_ , num_train_epochs=1 , distributed=lowercase_ , extra_args_str=lowercase_ , predict_with_generate=lowercase_ , do_train=lowercase_ , do_eval=lowercase_ , do_predict=lowercase_ , ) a_ = TrainerState.load_from_json(os.path.join(lowercase_ , """trainer_state.json""" ) ).log_history if not do_eval: return a_ = [log for log in logs if "eval_loss" in log.keys()] a_ = eval_metrics[0] if predict_with_generate: assert "eval_bleu" in first_step_stats a_ = eval_metrics[-1] assert isinstance(last_step_stats["""eval_bleu"""] , lowercase_ ) assert not math.isnan(float(last_step_stats["""eval_loss"""] ) ), "eval_loss must not be `nan`" @require_torch_non_multi_gpu def lowercase__ ( self ): """simple docstring""" self.run_seqaseq_quick() @require_torch_multi_gpu def lowercase__ ( self ): """simple docstring""" self.run_seqaseq_quick(distributed=lowercase_ ) @require_torch_multi_gpu def lowercase__ ( self ): """simple docstring""" self.run_seqaseq_quick(distributed=lowercase_ ) @unittest.skip("""Requires an update of the env running those tests""" ) @require_torch_multi_gpu @require_fairscale def lowercase__ ( self ): """simple docstring""" self.run_seqaseq_quick(distributed=lowercase_ , extra_args_str="""--sharded_ddp simple""" ) @unittest.skip("""Requires an update of the env running those tests""" ) @require_torch_multi_gpu @require_fairscale def lowercase__ ( self ): """simple docstring""" self.run_seqaseq_quick(distributed=lowercase_ , extra_args_str="""--sharded_ddp simple --fp16""" ) @unittest.skip("""Requires an update of the env running those tests""" ) @require_torch_multi_gpu @require_fairscale def lowercase__ ( self ): """simple docstring""" self.run_seqaseq_quick(distributed=lowercase_ , extra_args_str="""--sharded_ddp zero_dp_2""" , predict_with_generate=lowercase_ ) @unittest.skip("""Requires an update of the env running those tests""" ) @require_torch_multi_gpu @require_fairscale def lowercase__ ( self ): """simple docstring""" self.run_seqaseq_quick( distributed=lowercase_ , extra_args_str="""--sharded_ddp zero_dp_2 --fp16""" , predict_with_generate=lowercase_ ) @require_apex @require_torch_gpu def lowercase__ ( self ): """simple docstring""" self.run_seqaseq_quick(distributed=lowercase_ , extra_args_str="""--fp16 --fp16_backend=apex""" ) # test 2nd time - was getting eval_loss': nan' # to reproduce the problem set distributed=False self.run_seqaseq_quick(distributed=lowercase_ , extra_args_str="""--fp16 --fp16_backend=apex""" ) @parameterized.expand(["""base""", """low""", """high""", """mixed"""] ) @require_torch_multi_gpu def lowercase__ ( self , _UpperCAmelCase ): """simple docstring""" a_ = { # test with the default log_level - should be info and thus log info once "base": {"extra_args_str": "", "n_matches": 1}, # test with low log_level and log_level_replica - should be noisy on all processes # now the info string should appear twice on 2 processes "low": {"extra_args_str": "--log_level debug --log_level_replica debug", "n_matches": 2}, # test with high log_level and low log_level_replica # now the info string should appear once only on the replica "high": {"extra_args_str": "--log_level error --log_level_replica debug", "n_matches": 1}, # test with high log_level and log_level_replica - should be quiet on all processes "mixed": {"extra_args_str": "--log_level error --log_level_replica error", "n_matches": 0}, } a_ = experiments[experiment_id] a_ = {"distributed": True, "predict_with_generate": False, "do_eval": False, "do_predict": False} a_ = "Running training" with CaptureStderr() as cl: self.run_seqaseq_quick(**lowercase_ , extra_args_str=data["""extra_args_str"""] ) a_ = len(re.findall(lowercase_ , cl.err ) ) self.assertEqual(lowercase_ , data["""n_matches"""] ) @slow def lowercase__ ( self ): """simple docstring""" a_ = self.run_trainer( eval_steps=2 , max_len=128 , model_name=lowercase_ , learning_rate=3e-4 , num_train_epochs=10 , distributed=lowercase_ , ) # Check metrics a_ = TrainerState.load_from_json(os.path.join(lowercase_ , """trainer_state.json""" ) ).log_history a_ = [log for log in logs if "eval_loss" in log.keys()] a_ = eval_metrics[0] a_ = eval_metrics[-1] assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing" assert isinstance(last_step_stats["""eval_bleu"""] , lowercase_ ) # test if do_predict saves generations and metrics a_ = os.listdir(lowercase_ ) a_ = {os.path.basename(lowercase_ ) for p in contents} assert "generated_predictions.txt" in contents assert "predict_results.json" in contents @slow @require_bitsandbytes def lowercase__ ( self ): """simple docstring""" from transformers.training_args import OptimizerNames def train_and_return_metrics(_UpperCAmelCase ) -> Tuple[int, float]: a_ = "--skip_memory_metrics 0" a_ = self.run_trainer( max_len=128 , model_name=lowercase_ , learning_rate=3e-4 , num_train_epochs=1 , optim=lowercase_ , distributed=lowercase_ , extra_args_str=lowercase_ , do_eval=lowercase_ , do_predict=lowercase_ , n_gpus_to_use=1 , ) # Check metrics a_ = TrainerState.load_from_json(Path(lowercase_ , """trainer_state.json""" ) ).log_history a_ = int(logs[0]["""train_mem_gpu_peaked_delta"""] / 2**20 ) a_ = int(logs[0]["""train_mem_gpu_alloc_delta"""] / 2**20 ) a_ = logs[0]["train_loss"] return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss a_ = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value ) a_ = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value ) a_ = gpu_alloc_mem_orig - gpu_alloc_mem_bnb a_ = gpu_peak_mem_orig + gpu_alloc_mem_orig a_ = gpu_peak_mem_bnb + gpu_alloc_mem_bnb a_ = gpu_total_mem_orig - gpu_total_mem_bnb # sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which # doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized # in 2 bytes and the diff in optim memory usage is derived as so: # # - normal 25*8=~200MB (8 bytes per param) # - bnb 25*2= ~50MB (2 bytes per param) # # Thus we should expect ~150MB total memory saved. # # Peak memory should be the same - the total should be different by about that same margin # # After leaving a small margin to accommodate for differences between gpus let's check # that we have at least 120MB in savings a_ = 120 # uncomment the following if this test starts failing - requires py38 for a new print feature # gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb # print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB") # print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB") # print(f"{gpu_alloc_mem_diff=}MB") # print(f"{gpu_peak_mem_diff=}MB") # print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB") # print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB") self.assertGreater( lowercase_ , lowercase_ , """should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got""" f" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and" f" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB" , ) self.assertGreater( lowercase_ , lowercase_ , """should use ~150MB less total gpu memory with BNB, compared to without it for this model but got""" f" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and" f" gpu_total_mem_bnb={gpu_total_mem_bnb}MB" , ) self.assertEqual( lowercase_ , lowercase_ , f"loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}" ) def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 3e-3 , _UpperCAmelCase = "adafactor" , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = 0 , _UpperCAmelCase = True , _UpperCAmelCase = True , _UpperCAmelCase = True , _UpperCAmelCase = True , _UpperCAmelCase = None , ): """simple docstring""" a_ = self.test_file_dir / "../fixtures/tests_samples/wmt_en_ro" a_ = self.get_auto_remove_tmp_dir() a_ = f"\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(lowercase_ )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(lowercase_ )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n ".split() a_ = f"\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(lowercase_ )}\n ".split() a_ = "\n --do_predict\n ".split() a_ = [] if do_train: args += args_train if do_eval: args += args_eval if do_predict: args += args_predict if predict_with_generate: args += "--predict_with_generate".split() if do_train: if optim == "adafactor": args += "--adafactor".split() else: args += f"--optim {optim}".split() if extra_args_str is not None: args += extra_args_str.split() if distributed: if n_gpus_to_use is None: a_ = get_gpu_count() a_ = get_torch_dist_unique_port() a_ = f"\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n ".split() a_ = [sys.executable] + distributed_args + args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(lowercase_ , env=self.get_env() ) else: a_ = ["run_translation.py"] + args with patch.object(lowercase_ , """argv""" , lowercase_ ): main() return output_dir
483
from __future__ import annotations from typing import TypedDict class lowercase_ ( __snake_case ): _lowerCamelCase = 42 _lowerCamelCase = 42 def snake_case (__lowercase ) -> list[str]: '''simple docstring''' if not isinstance(__lowercase , __lowercase ): raise TypeError("The parameter s type must be str." ) return [s[i:] + s[:i] for i in range(len(__lowercase ) )] def snake_case (__lowercase ) -> BWTTransformDict: '''simple docstring''' if not isinstance(__lowercase , __lowercase ): raise TypeError("The parameter s type must be str." ) if not s: raise ValueError("The parameter s must not be empty." ) _snake_case : List[str] = all_rotations(__lowercase ) rotations.sort() # sort the list of rotations in alphabetically order # make a string composed of the last char of each rotation _snake_case : BWTTransformDict = { "bwt_string": "".join([word[-1] for word in rotations] ), "idx_original_string": rotations.index(__lowercase ), } return response def snake_case (__lowercase , __lowercase ) -> str: '''simple docstring''' if not isinstance(__lowercase , __lowercase ): raise TypeError("The parameter bwt_string type must be str." ) if not bwt_string: raise ValueError("The parameter bwt_string must not be empty." ) try: _snake_case : Union[str, Any] = int(__lowercase ) except ValueError: raise TypeError( "The parameter idx_original_string type must be int or passive" " of cast to int." ) if idx_original_string < 0: raise ValueError("The parameter idx_original_string must not be lower than 0." ) if idx_original_string >= len(__lowercase ): raise ValueError( "The parameter idx_original_string must be lower than" " len(bwt_string)." ) _snake_case : Optional[Any] = [""] * len(__lowercase ) for _ in range(len(__lowercase ) ): for i in range(len(__lowercase ) ): _snake_case : Tuple = bwt_string[i] + ordered_rotations[i] ordered_rotations.sort() return ordered_rotations[idx_original_string] if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Optional[Any] = 'Provide a string that I will generate its BWT transform: ' __SCREAMING_SNAKE_CASE : Optional[Any] = input(entry_msg).strip() __SCREAMING_SNAKE_CASE : int = bwt_transform(s) print( F'''Burrows Wheeler transform for string \'{s}\' results ''' F'''in \'{result['bwt_string']}\'''' ) __SCREAMING_SNAKE_CASE : List[str] = reverse_bwt(result['bwt_string'], result['idx_original_string']) print( F'''Reversing Burrows Wheeler transform for entry \'{result['bwt_string']}\' ''' F'''we get original string \'{original_string}\'''' )
670
0
'''simple docstring''' # Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version _UpperCAmelCase : List[Any] = get_logger(__name__) class UpperCAmelCase : """simple docstring""" A__ : Optional[int] = 'dummy_data' A__ : Dict = 'datasets' A__ : List[Any] = False def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case = None , _snake_case = False , _snake_case = True , _snake_case = None , ) -> Any: _UpperCamelCase : Tuple = 0 _UpperCamelCase : Any = dataset_name _UpperCamelCase : int = cache_dir _UpperCamelCase : Optional[Any] = use_local_dummy_data _UpperCamelCase : str = config # download_callbacks take a single url as input _UpperCamelCase : List[Callable] = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root _UpperCamelCase : Optional[int] = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general _UpperCamelCase : Union[str, Any] = str(lowercase_ ) # to be downloaded _UpperCamelCase : str = None _UpperCamelCase : str = None @property def _lowercase ( self ) -> Optional[int]: if self._dummy_file is None: _UpperCamelCase : Tuple = self.download_dummy_data() return self._dummy_file @property def _lowercase ( self ) -> int: if self.config is not None: # structure is dummy / config_name / version_name return os.path.join('''dummy''' , self.config.name , self.version_name ) # structure is dummy / version_name return os.path.join('''dummy''' , self.version_name ) @property def _lowercase ( self ) -> Optional[Any]: return os.path.join(self.dummy_data_folder , '''dummy_data.zip''' ) def _lowercase ( self ) -> Tuple: _UpperCamelCase : List[Any] = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) _UpperCamelCase : Optional[int] = cached_path( lowercase_ , cache_dir=self.cache_dir , extract_compressed_file=lowercase_ , force_extract=lowercase_ ) return os.path.join(lowercase_ , self.dummy_file_name ) @property def _lowercase ( self ) -> Dict: return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file ) @property def _lowercase ( self ) -> Any: if self._bucket_url is None: _UpperCamelCase : Dict = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '''/''' ) ) return self._bucket_url @property def _lowercase ( self ) -> Union[str, Any]: # return full path if its a dir if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep , '''/''' ).split('''/''' )[:-1] ) def _lowercase ( self , _snake_case , *_snake_case ) -> Any: if self.load_existing_dummy_data: # dummy data is downloaded and tested _UpperCamelCase : str = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned _UpperCamelCase : Optional[int] = self.dummy_file_name # special case when data_url is a dict if isinstance(lowercase_ , lowercase_ ): return self.create_dummy_data_dict(lowercase_ , lowercase_ ) elif isinstance(lowercase_ , (list, tuple) ): return self.create_dummy_data_list(lowercase_ , lowercase_ ) else: return self.create_dummy_data_single(lowercase_ , lowercase_ ) def _lowercase ( self , _snake_case , *_snake_case ) -> Optional[Any]: return self.download_and_extract(lowercase_ ) def _lowercase ( self , _snake_case , _snake_case ) -> str: return self.download_and_extract(lowercase_ ) def _lowercase ( self , _snake_case , *_snake_case , **_snake_case ) -> Any: return path def _lowercase ( self ) -> Any: return {} def _lowercase ( self , _snake_case , _snake_case ) -> Optional[Any]: _UpperCamelCase : Optional[int] = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(lowercase_ , lowercase_ ): for single_url in single_urls: download_callback(lowercase_ ) else: _UpperCamelCase : Union[str, Any] = single_urls download_callback(lowercase_ ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(lowercase_ , lowercase_ ): _UpperCamelCase : Optional[int] = [os.path.join(lowercase_ , urllib.parse.quote_plus(Path(lowercase_ ).name ) ) for x in single_urls] else: _UpperCamelCase : List[Any] = single_urls _UpperCamelCase : Optional[Any] = os.path.join(lowercase_ , urllib.parse.quote_plus(Path(lowercase_ ).name ) ) _UpperCamelCase : Tuple = value # make sure that values are unique if all(isinstance(lowercase_ , lowercase_ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique _UpperCamelCase : List[Any] = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def _lowercase ( self , _snake_case , _snake_case ) -> Tuple: _UpperCamelCase : str = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one _UpperCamelCase : Optional[int] = all(bool(re.findall('''[0-9]{3,}-of-[0-9]{3,}''' , lowercase_ ) ) for url in data_url ) _UpperCamelCase : int = all( url.startswith('''https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed''' ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): _UpperCamelCase : List[Any] = [data_url[0]] * len(lowercase_ ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(lowercase_ ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _UpperCamelCase : Any = os.path.join(lowercase_ , urllib.parse.quote_plus(single_url.split('''/''' )[-1] ) ) dummy_data_list.append(lowercase_ ) return dummy_data_list def _lowercase ( self , _snake_case , _snake_case ) -> Any: for download_callback in self.download_callbacks: download_callback(lowercase_ ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _UpperCamelCase : Optional[Any] = os.path.join(lowercase_ , urllib.parse.quote_plus(data_url.split('''/''' )[-1] ) ) if os.path.exists(lowercase_ ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def _lowercase ( self ) -> str: pass def _lowercase ( self ) -> Any: pass def _lowercase ( self , _snake_case ) -> Dict: def _iter_archive_members(_snake_case ): # this preserves the order of the members inside the ZIP archive _UpperCamelCase : Tuple = Path(self.dummy_file ).parent _UpperCamelCase : Tuple = path.relative_to(lowercase_ ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: _UpperCamelCase : Union[str, Any] = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(lowercase_ ) _UpperCamelCase : List[Any] = Path(lowercase_ ) _UpperCamelCase : Tuple = _iter_archive_members(lowercase_ ) if self.use_local_dummy_data else path.rglob('''*''' ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith(('''.''', '''__''') ): yield file_path.relative_to(lowercase_ ).as_posix(), file_path.open('''rb''' ) def _lowercase ( self , _snake_case ) -> List[str]: if not isinstance(lowercase_ , lowercase_ ): _UpperCamelCase : Dict = [paths] for path in paths: if os.path.isfile(lowercase_ ): if os.path.basename(lowercase_ ).startswith(('''.''', '''__''') ): return yield path else: for dirpath, dirnames, filenames in os.walk(lowercase_ ): if os.path.basename(lowercase_ ).startswith(('''.''', '''__''') ): continue dirnames.sort() for filename in sorted(lowercase_ ): if filename.startswith(('''.''', '''__''') ): continue yield os.path.join(lowercase_ , lowercase_ )
683
# NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401 from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401 deprecate( 'stable diffusion controlnet', '0.22.0', 'Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.', standard_warn=False, stacklevel=3, )
670
0
"""simple docstring""" from __future__ import annotations from collections.abc import Sequence from typing import Literal def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = list(__lowercase ) __lowerCAmelCase = list(__lowercase ) __lowerCAmelCase = 0 for i in range(len(__lowercase ) ): if lista[i] != lista[i]: count += 1 __lowerCAmelCase = "_" if count > 1: return False else: return "".join(__lowercase ) def _lowerCamelCase ( _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = [] while True: __lowerCAmelCase = ["$"] * len(__lowercase ) __lowerCAmelCase = [] for i in range(len(__lowercase ) ): for j in range(i + 1 , len(__lowercase ) ): __lowerCAmelCase = compare_string(binary[i] , binary[j] ) if k is False: __lowerCAmelCase = "*" __lowerCAmelCase = "*" temp.append("X" ) for i in range(len(__lowercase ) ): if checka[i] == "$": pi.append(binary[i] ) if len(__lowercase ) == 0: return pi __lowerCAmelCase = list(set(__lowercase ) ) def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = [] for minterm in minterms: __lowerCAmelCase = "" for _ in range(__lowercase ): __lowerCAmelCase = str(minterm % 2 ) + string minterm //= 2 temp.append(__lowercase ) return temp def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = list(__lowercase ) __lowerCAmelCase = list(__lowercase ) __lowerCAmelCase = 0 for i in range(len(__lowercase ) ): if lista[i] != lista[i]: count_n += 1 return count_n == count def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = [] __lowerCAmelCase = [0] * len(__lowercase ) for i in range(len(chart[0] ) ): __lowerCAmelCase = 0 __lowerCAmelCase = -1 for j in range(len(__lowercase ) ): if chart[j][i] == 1: count += 1 __lowerCAmelCase = j if count == 1: __lowerCAmelCase = 1 for i in range(len(__lowercase ) ): if select[i] == 1: for j in range(len(chart[0] ) ): if chart[i][j] == 1: for k in range(len(__lowercase ) ): __lowerCAmelCase = 0 temp.append(prime_implicants[i] ) while True: __lowerCAmelCase = 0 __lowerCAmelCase = -1 __lowerCAmelCase = 0 for i in range(len(__lowercase ) ): __lowerCAmelCase = chart[i].count(1 ) if count_n > max_n: __lowerCAmelCase = count_n __lowerCAmelCase = i if max_n == 0: return temp temp.append(prime_implicants[rem] ) for i in range(len(chart[0] ) ): if chart[rem][i] == 1: for j in range(len(__lowercase ) ): __lowerCAmelCase = 0 def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = [[0 for x in range(len(__lowercase ) )] for x in range(len(__lowercase ) )] for i in range(len(__lowercase ) ): __lowerCAmelCase = prime_implicants[i].count("_" ) for j in range(len(__lowercase ) ): if is_for_table(prime_implicants[i] , binary[j] , __lowercase ): __lowerCAmelCase = 1 return chart def _lowerCamelCase ( ): '''simple docstring''' __lowerCAmelCase = int(input("Enter the no. of variables\n" ) ) __lowerCAmelCase = [ float(__lowercase ) for x in input( "Enter the decimal representation of Minterms 'Spaces Separated'\n" ).split() ] __lowerCAmelCase = decimal_to_binary(__lowercase , __lowercase ) __lowerCAmelCase = check(__lowercase ) print("Prime Implicants are:" ) print(__lowercase ) __lowerCAmelCase = prime_implicant_chart(__lowercase , __lowercase ) __lowerCAmelCase = selection(__lowercase , __lowercase ) print("Essential Prime Implicants are:" ) print(__lowercase ) if __name__ == "__main__": import doctest doctest.testmod() main()
636
from __future__ import annotations import unittest from transformers import LEDConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFLEDForConditionalGeneration, TFLEDModel @require_tf class lowercase_ : _lowerCamelCase = LEDConfig _lowerCamelCase = {} _lowerCamelCase = 'gelu' def __init__( self , lowercase_ , lowercase_=13 , lowercase_=7 , lowercase_=True , lowercase_=False , lowercase_=99 , lowercase_=32 , lowercase_=2 , lowercase_=4 , lowercase_=37 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=20 , lowercase_=2 , lowercase_=1 , lowercase_=0 , lowercase_=4 , ): _snake_case : Optional[int] = parent _snake_case : str = batch_size _snake_case : int = seq_length _snake_case : Dict = is_training _snake_case : Optional[Any] = use_labels _snake_case : Tuple = vocab_size _snake_case : str = hidden_size _snake_case : int = num_hidden_layers _snake_case : Union[str, Any] = num_attention_heads _snake_case : int = intermediate_size _snake_case : List[str] = hidden_dropout_prob _snake_case : List[Any] = attention_probs_dropout_prob _snake_case : int = max_position_embeddings _snake_case : Union[str, Any] = eos_token_id _snake_case : str = pad_token_id _snake_case : Any = bos_token_id _snake_case : str = attention_window # `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size # [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention # returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1] # because its local attention only attends to `self.attention_window` and one before and one after _snake_case : List[Any] = self.attention_window + 2 # because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for # the `test_attention_outputs` and `test_hidden_states_output` tests _snake_case : List[str] = ( self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window ) def UpperCamelCase ( self ): _snake_case : str = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) _snake_case : Any = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) _snake_case : Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 ) _snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _snake_case : List[str] = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , ) _snake_case : Optional[Any] = prepare_led_inputs_dict(lowercase_ , lowercase_ , lowercase_ ) _snake_case : int = tf.concat( [tf.zeros_like(lowercase_ )[:, :-1], tf.ones_like(lowercase_ )[:, -1:]] , axis=-1 , ) _snake_case : List[Any] = global_attention_mask return config, inputs_dict def UpperCamelCase ( self , lowercase_ , lowercase_ ): _snake_case : Dict = TFLEDModel(config=lowercase_ ).get_decoder() _snake_case : Optional[Any] = inputs_dict["input_ids"] _snake_case : Optional[int] = input_ids[:1, :] _snake_case : int = inputs_dict["attention_mask"][:1, :] _snake_case : int = 1 # first forward pass _snake_case : str = model(lowercase_ , attention_mask=lowercase_ , use_cache=lowercase_ ) _snake_case ,_snake_case : Optional[int] = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _snake_case : Any = ids_tensor((self.batch_size, 3) , config.vocab_size ) _snake_case : List[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and _snake_case : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 ) _snake_case : List[str] = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) _snake_case : str = model(lowercase_ , attention_mask=lowercase_ )[0] _snake_case : List[str] = model(lowercase_ , attention_mask=lowercase_ , past_key_values=lowercase_ )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice _snake_case : Optional[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) _snake_case : List[str] = output_from_no_past[:, -3:, random_slice_idx] _snake_case : List[str] = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(lowercase_ , lowercase_ , rtol=1e-3 ) def snake_case (__lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , ) -> List[Any]: '''simple docstring''' if attention_mask is None: _snake_case : int = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: _snake_case : Optional[int] = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: _snake_case : Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _snake_case : Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, } @require_tf class lowercase_ ( __snake_case , __snake_case , unittest.TestCase ): _lowerCamelCase = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else () _lowerCamelCase = (TFLEDForConditionalGeneration,) if is_tf_available() else () _lowerCamelCase = ( { 'conversational': TFLEDForConditionalGeneration, 'feature-extraction': TFLEDModel, 'summarization': TFLEDForConditionalGeneration, 'text2text-generation': TFLEDForConditionalGeneration, 'translation': TFLEDForConditionalGeneration, } if is_tf_available() else {} ) _lowerCamelCase = True _lowerCamelCase = False _lowerCamelCase = False _lowerCamelCase = False def UpperCamelCase ( self ): _snake_case : Optional[Any] = TFLEDModelTester(self ) _snake_case : List[Any] = ConfigTester(self , config_class=lowercase_ ) def UpperCamelCase ( self ): self.config_tester.run_common_tests() def UpperCamelCase ( self ): _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowercase_ ) def UpperCamelCase ( self ): _snake_case ,_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() _snake_case : Tuple = tf.zeros_like(inputs_dict["attention_mask"] ) _snake_case : Tuple = 2 _snake_case : Dict = tf.where( tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , ) _snake_case : Tuple = True _snake_case : Union[str, Any] = self.model_tester.seq_length _snake_case : Union[str, Any] = self.model_tester.encoder_seq_length def check_decoder_attentions_output(lowercase_ ): _snake_case : Optional[Any] = outputs.decoder_attentions self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) def check_encoder_attentions_output(lowercase_ ): _snake_case : int = [t.numpy() for t in outputs.encoder_attentions] _snake_case : Optional[int] = [t.numpy() for t in outputs.encoder_global_attentions] self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers ) self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) self.assertListEqual( list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , ) for model_class in self.all_model_classes: _snake_case : Union[str, Any] = True _snake_case : Dict = False _snake_case : Any = False _snake_case : Any = model_class(lowercase_ ) _snake_case : Union[str, Any] = model(self._prepare_for_class(lowercase_ , lowercase_ ) ) _snake_case : Tuple = len(lowercase_ ) self.assertEqual(config.output_hidden_states , lowercase_ ) check_encoder_attentions_output(lowercase_ ) if self.is_encoder_decoder: _snake_case : int = model_class(lowercase_ ) _snake_case : Union[str, Any] = model(self._prepare_for_class(lowercase_ , lowercase_ ) ) self.assertEqual(config.output_hidden_states , lowercase_ ) check_decoder_attentions_output(lowercase_ ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] _snake_case : List[Any] = True _snake_case : Any = model_class(lowercase_ ) _snake_case : Optional[Any] = model(self._prepare_for_class(lowercase_ , lowercase_ ) ) self.assertEqual(config.output_hidden_states , lowercase_ ) check_encoder_attentions_output(lowercase_ ) # Check attention is always last and order is fine _snake_case : Optional[int] = True _snake_case : Optional[int] = True _snake_case : List[Any] = model_class(lowercase_ ) _snake_case : Union[str, Any] = model(self._prepare_for_class(lowercase_ , lowercase_ ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase_ ) ) self.assertEqual(model.config.output_hidden_states , lowercase_ ) check_encoder_attentions_output(lowercase_ ) @unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." ) def UpperCamelCase ( self ): pass def UpperCamelCase ( self ): # TODO: Head-masking not yet implement pass def snake_case (__lowercase ) -> Optional[Any]: '''simple docstring''' return tf.constant(__lowercase , dtype=tf.intaa ) __SCREAMING_SNAKE_CASE : List[Any] = 1E-4 @slow @require_tf class lowercase_ ( unittest.TestCase ): def UpperCamelCase ( self ): _snake_case : Dict = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led # change to intended input here _snake_case : Union[str, Any] = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) _snake_case : Optional[int] = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) _snake_case : Union[str, Any] = prepare_led_inputs_dict(model.config , lowercase_ , lowercase_ ) _snake_case : Optional[Any] = model(**lowercase_ )[0] _snake_case : str = (1, 1_024, 768) self.assertEqual(output.shape , lowercase_ ) # change to expected output here _snake_case : Optional[Any] = tf.convert_to_tensor( [[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , ) tf.debugging.assert_near(output[:, :3, :3] , lowercase_ , atol=1e-3 ) def UpperCamelCase ( self ): _snake_case : List[Any] = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ) # change to intended input here _snake_case : int = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) _snake_case : int = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) _snake_case : Optional[Any] = prepare_led_inputs_dict(model.config , lowercase_ , lowercase_ ) _snake_case : Tuple = model(**lowercase_ )[0] _snake_case : Any = (1, 1_024, model.config.vocab_size) self.assertEqual(output.shape , lowercase_ ) # change to expected output here _snake_case : Optional[int] = tf.convert_to_tensor( [[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , ) tf.debugging.assert_near(output[:, :3, :3] , lowercase_ , atol=1e-3 , rtol=1e-3 )
670
0
from typing import Dict import numpy as np import torch from . import residue_constants as rc from .tensor_utils import tensor_tree_map, tree_map def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict[str, torch.Tensor]: '''simple docstring''' __UpperCAmelCase : int = [] __UpperCAmelCase : Optional[int] = [] __UpperCAmelCase : str = [] for rt in rc.restypes: __UpperCAmelCase : Dict = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]] restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] ) __UpperCAmelCase : List[Any] = {name: i for i, name in enumerate(__lowercase )} restype_atomaa_to_atomaa_list.append( [(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] ) restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] ) # Add dummy mapping for restype 'UNK' restype_atomaa_to_atomaa_list.append([0] * 14 ) restype_atomaa_to_atomaa_list.append([0] * 37 ) restype_atomaa_mask_list.append([0.0] * 14 ) __UpperCAmelCase : List[Any] = torch.tensor( __lowercase , dtype=torch.intaa , device=protein['''aatype'''].device , ) __UpperCAmelCase : List[Any] = torch.tensor( __lowercase , dtype=torch.intaa , device=protein['''aatype'''].device , ) __UpperCAmelCase : Union[str, Any] = torch.tensor( __lowercase , dtype=torch.floataa , device=protein['''aatype'''].device , ) __UpperCAmelCase : str = protein["aatype"].to(torch.long ) # create the mapping for (residx, atom14) --> atom37, i.e. an array # with shape (num_res, 14) containing the atom37 indices for this protein __UpperCAmelCase : Any = restype_atomaa_to_atomaa[protein_aatype] __UpperCAmelCase : Dict = restype_atomaa_mask[protein_aatype] __UpperCAmelCase : Dict = residx_atomaa_mask __UpperCAmelCase : List[str] = residx_atomaa_to_atomaa.long() # create the gather indices for mapping back __UpperCAmelCase : int = restype_atomaa_to_atomaa[protein_aatype] __UpperCAmelCase : Any = residx_atomaa_to_atomaa.long() # create the corresponding mask __UpperCAmelCase : Tuple = torch.zeros([21, 37] , dtype=torch.floataa , device=protein['''aatype'''].device ) for restype, restype_letter in enumerate(rc.restypes ): __UpperCAmelCase : int = rc.restype_atoa[restype_letter] __UpperCAmelCase : Union[str, Any] = rc.residue_atoms[restype_name] for atom_name in atom_names: __UpperCAmelCase : Dict = rc.atom_order[atom_name] __UpperCAmelCase : Any = 1 __UpperCAmelCase : List[str] = restype_atomaa_mask[protein_aatype] __UpperCAmelCase : List[Any] = residx_atomaa_mask return protein def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict[str, np.ndarray]: '''simple docstring''' __UpperCAmelCase : Optional[int] = tree_map(lambda lowercase_ : torch.tensor(__lowercase , device=batch['''aatype'''].device ) , __lowercase , np.ndarray ) __UpperCAmelCase : Any = tensor_tree_map(lambda lowercase_ : np.array(__lowercase ) , make_atomaa_masks(__lowercase ) ) return out
462
import unittest from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __SCREAMING_SNAKE_CASE : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece @require_tokenizers class lowercase_ ( __snake_case , unittest.TestCase ): _lowerCamelCase = ReformerTokenizer _lowerCamelCase = ReformerTokenizerFast _lowerCamelCase = True _lowerCamelCase = False _lowerCamelCase = True def UpperCamelCase ( self ): super().setUp() _snake_case : Union[str, Any] = ReformerTokenizer(lowercase_ , keep_accents=lowercase_ ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase ( self ): _snake_case : int = "<s>" _snake_case : int = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ ) def UpperCamelCase ( self ): _snake_case : str = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<unk>" ) self.assertEqual(vocab_keys[1] , "<s>" ) self.assertEqual(vocab_keys[-1] , "j" ) self.assertEqual(len(lowercase_ ) , 1_000 ) def UpperCamelCase ( self ): self.assertEqual(self.get_tokenizer().vocab_size , 1_000 ) def UpperCamelCase ( self ): if not self.test_rust_tokenizer: return _snake_case : Tuple = self.get_tokenizer() _snake_case : List[str] = self.get_rust_tokenizer() _snake_case : int = "I was born in 92000, and this is falsé." _snake_case : Tuple = tokenizer.tokenize(lowercase_ ) _snake_case : List[Any] = rust_tokenizer.tokenize(lowercase_ ) self.assertListEqual(lowercase_ , lowercase_ ) _snake_case : str = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) _snake_case : Tuple = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) self.assertListEqual(lowercase_ , lowercase_ ) _snake_case : Dict = self.get_rust_tokenizer() _snake_case : List[Any] = tokenizer.encode(lowercase_ ) _snake_case : str = rust_tokenizer.encode(lowercase_ ) self.assertListEqual(lowercase_ , lowercase_ ) def UpperCamelCase ( self , lowercase_=15 ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): _snake_case : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ ) # Simple input _snake_case : List[str] = "This is a simple input" _snake_case : Optional[Any] = ["This is a simple input 1", "This is a simple input 2"] _snake_case : Union[str, Any] = ("This is a simple input", "This is a pair") _snake_case : int = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests self.assertRaises(lowercase_ , tokenizer_r.encode , lowercase_ , max_length=lowercase_ , padding="max_length" ) # Simple input self.assertRaises(lowercase_ , tokenizer_r.encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" ) # Simple input self.assertRaises( lowercase_ , tokenizer_r.batch_encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" , ) # Pair input self.assertRaises(lowercase_ , tokenizer_r.encode , lowercase_ , max_length=lowercase_ , padding="max_length" ) # Pair input self.assertRaises(lowercase_ , tokenizer_r.encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" ) # Pair input self.assertRaises( lowercase_ , tokenizer_r.batch_encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" , ) def UpperCamelCase ( self ): pass def UpperCamelCase ( self ): _snake_case : Dict = ReformerTokenizer(lowercase_ , keep_accents=lowercase_ ) _snake_case : Tuple = tokenizer.tokenize("This is a test" ) self.assertListEqual(lowercase_ , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowercase_ ) , [285, 46, 10, 170, 382] , ) _snake_case : str = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( lowercase_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) _snake_case : Any = tokenizer.convert_tokens_to_ids(lowercase_ ) self.assertListEqual( lowercase_ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) _snake_case : List[Any] = tokenizer.convert_ids_to_tokens(lowercase_ ) self.assertListEqual( lowercase_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) @cached_property def UpperCamelCase ( self ): return ReformerTokenizer.from_pretrained("google/reformer-crime-and-punishment" ) @slow def UpperCamelCase ( self ): _snake_case : int = "Hello World!" _snake_case : Dict = [126, 32, 262, 152, 38, 72, 287] self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) ) @slow def UpperCamelCase ( self ): _snake_case : Optional[int] = ( "This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will" " add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth" ) _snake_case : Dict = [ 108, 265, 24, 111, 4, 258, 156, 35, 28, 275, 3, 259, 297, 260, 84, 4, 35, 110, 44, 8, 259, 91, 268, 21, 11, 209, 274, 109, 266, 277, 117, 86, 93, 315, 258, 278, 258, 277, 258, 0, 258, 288, 258, 319, 258, 0, 258, 0, 258, 0, 258, 0, 258, 287, 258, 315, 258, 289, 258, 278, 99, 269, 266, 262, 8, 259, 241, 4, 217, 230, 268, 266, 55, 168, 106, 75, 193, 266, 223, 27, 49, 26, 282, 25, 264, 299, 19, 26, 0, 258, 277, 117, 86, 93, 176, 183, 270, 11, 262, 42, 61, 265, ] self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) ) @require_torch @slow def UpperCamelCase ( self ): import torch from transformers import ReformerConfig, ReformerModel # Build sequence _snake_case : str = list(self.big_tokenizer.get_vocab().keys() )[:10] _snake_case : str = " ".join(lowercase_ ) _snake_case : Tuple = self.big_tokenizer.encode_plus(lowercase_ , return_tensors="pt" ) _snake_case : Tuple = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="pt" ) _snake_case : int = ReformerConfig() # The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024) _snake_case : Union[str, Any] = encoded_sequence["input_ids"].shape _snake_case : List[str] = ReformerModel(lowercase_ ) # Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**lowercase_ ) model(**lowercase_ ) @slow def UpperCamelCase ( self ): # fmt: off _snake_case : Union[str, Any] = {"input_ids": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # This tokenizer does not know some characters like ")". # That is the reason why we use very simple texts here. # Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064 _snake_case : Tuple = [ "This is a very simple sentence.", "The quick brown fox jumps over the lazy dog.", ] self.tokenizer_integration_test_util( expected_encoding=lowercase_ , model_name="google/reformer-crime-and-punishment" , revision="0e6c3decb8211d49bf881013425dc8b0448b3f5a" , padding=lowercase_ , sequences=lowercase_ , )
670
0
"""simple docstring""" import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING def lowercase__(A ) ->Tuple: """simple docstring""" return 1.0 / (1.0 + np.exp(-_outputs )) def lowercase__(A ) ->Any: """simple docstring""" lowercase__ : Optional[Any]= np.max(_outputs , axis=-1 , keepdims=__lowercase ) lowercase__ : int= np.exp(_outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=__lowercase ) class __UpperCAmelCase( __snake_case ): """simple docstring""" __lowerCamelCase = "sigmoid" __lowerCamelCase = "softmax" __lowerCamelCase = "none" @add_end_docstrings( __snake_case , r"\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `\"default\"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `\"sigmoid\"`: Applies the sigmoid function on the output.\n - `\"softmax\"`: Applies the softmax function on the output.\n - `\"none\"`: Does not apply any function on the output.\n " , ) class __UpperCAmelCase( __snake_case ): """simple docstring""" __lowerCamelCase = False __lowerCamelCase = ClassificationFunction.NONE def __init__( self , **snake_case__ ): '''simple docstring''' super().__init__(**lowercase_ ) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if self.framework == "tf" else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING ) def UpperCAmelCase_ ( self , snake_case__=None , snake_case__=None , snake_case__="" , **snake_case__ ): '''simple docstring''' # Using "" as default argument because we're going to use `top_k=None` in user code to declare # "No top_k" lowercase__ : List[str]= tokenizer_kwargs lowercase__ : List[str]= {} if hasattr(self.model.config , "return_all_scores" ) and return_all_scores is None: lowercase__ : Union[str, Any]= self.model.config.return_all_scores if isinstance(lowercase_ , lowercase_ ) or top_k is None: lowercase__ : List[str]= top_k lowercase__ : Union[str, Any]= False elif return_all_scores is not None: warnings.warn( "`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of" " `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`." , lowercase_ , ) if return_all_scores: lowercase__ : int= None else: lowercase__ : List[Any]= 1 if isinstance(lowercase_ , lowercase_ ): lowercase__ : str= ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: lowercase__ : Tuple= function_to_apply return preprocess_params, {}, postprocess_params def __call__( self , *snake_case__ , **snake_case__ ): '''simple docstring''' lowercase__ : List[str]= super().__call__(*lowercase_ , **lowercase_ ) # TODO try and retrieve it in a nicer way from _sanitize_parameters. lowercase__ : Any= "top_k" not in kwargs if isinstance(args[0] , lowercase_ ) and _legacy: # This pipeline is odd, and return a list when single item is run return [result] else: return result def UpperCAmelCase_ ( self , snake_case__ , **snake_case__ ): '''simple docstring''' lowercase__ : Any= self.framework if isinstance(lowercase_ , lowercase_ ): return self.tokenizer(**lowercase_ , return_tensors=lowercase_ , **lowercase_ ) elif isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) == 1 and isinstance(inputs[0] , lowercase_ ) and len(inputs[0] ) == 2: # It used to be valid to use a list of list of list for text pairs, keeping this path for BC return self.tokenizer( text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=lowercase_ , **lowercase_ ) elif isinstance(lowercase_ , lowercase_ ): # This is likely an invalid usage of the pipeline attempting to pass text pairs. raise ValueError( "The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a" " dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair." ) return self.tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_ ) def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' return self.model(**lowercase_ ) def UpperCAmelCase_ ( self , snake_case__ , snake_case__=None , snake_case__=1 , snake_case__=True ): '''simple docstring''' # `_legacy` is used to determine if we're running the naked pipeline and in backward # compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running # the more natural result containing the list. # Default value before `set_parameters` if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: lowercase__ : Optional[Any]= ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: lowercase__ : Union[str, Any]= ClassificationFunction.SOFTMAX elif hasattr(self.model.config , "function_to_apply" ) and function_to_apply is None: lowercase__ : Dict= self.model.config.function_to_apply else: lowercase__ : Optional[Any]= ClassificationFunction.NONE lowercase__ : Optional[Any]= model_outputs["logits"][0] lowercase__ : int= outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: lowercase__ : Dict= sigmoid(lowercase_ ) elif function_to_apply == ClassificationFunction.SOFTMAX: lowercase__ : Tuple= softmax(lowercase_ ) elif function_to_apply == ClassificationFunction.NONE: lowercase__ : List[Any]= outputs else: raise ValueError(F'''Unrecognized `function_to_apply` argument: {function_to_apply}''' ) if top_k == 1 and _legacy: return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()} lowercase__ : Optional[Any]= [ {"label": self.model.config.idalabel[i], "score": score.item()} for i, score in enumerate(lowercase_ ) ] if not _legacy: dict_scores.sort(key=lambda snake_case__ : x["score"] , reverse=lowercase_ ) if top_k is not None: lowercase__ : Tuple= dict_scores[:top_k] return dict_scores
218
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPSegProcessor, ViTImageProcessor @require_vision class lowercase_ ( unittest.TestCase ): def UpperCamelCase ( self ): _snake_case : Any = tempfile.mkdtemp() # fmt: off _snake_case : Optional[Any] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: on _snake_case : Dict = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) ) _snake_case : Dict = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""] _snake_case : Optional[int] = {"unk_token": "<unk>"} _snake_case : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) _snake_case : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(lowercase_ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(lowercase_ ) ) _snake_case : Any = { "do_resize": True, "size": 20, "do_center_crop": True, "crop_size": 18, "do_normalize": True, "image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073], "image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711], } _snake_case : Optional[Any] = os.path.join(self.tmpdirname , lowercase_ ) with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp: json.dump(lowercase_ , lowercase_ ) def UpperCamelCase ( self , **lowercase_ ): return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowercase_ ) def UpperCamelCase ( self , **lowercase_ ): return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowercase_ ) def UpperCamelCase ( self , **lowercase_ ): return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowercase_ ) def UpperCamelCase ( self ): shutil.rmtree(self.tmpdirname ) def UpperCamelCase ( self ): _snake_case : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] _snake_case : Union[str, Any] = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def UpperCamelCase ( self ): _snake_case : Tuple = self.get_tokenizer() _snake_case : Any = self.get_rust_tokenizer() _snake_case : Optional[Any] = self.get_image_processor() _snake_case : Any = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) processor_slow.save_pretrained(self.tmpdirname ) _snake_case : Optional[int] = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase_ ) _snake_case : List[Any] = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) processor_fast.save_pretrained(self.tmpdirname ) _snake_case : Optional[Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , lowercase_ ) self.assertIsInstance(processor_fast.tokenizer , lowercase_ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , lowercase_ ) self.assertIsInstance(processor_fast.image_processor , lowercase_ ) def UpperCamelCase ( self ): _snake_case : List[Any] = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _snake_case : List[Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) _snake_case : Optional[Any] = self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0 ) _snake_case : Tuple = CLIPSegProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowercase_ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , lowercase_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , lowercase_ ) def UpperCamelCase ( self ): _snake_case : Union[str, Any] = self.get_image_processor() _snake_case : Any = self.get_tokenizer() _snake_case : int = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) _snake_case : Optional[int] = self.prepare_image_inputs() _snake_case : Optional[Any] = image_processor(lowercase_ , return_tensors="np" ) _snake_case : str = processor(images=lowercase_ , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def UpperCamelCase ( self ): _snake_case : Optional[Any] = self.get_image_processor() _snake_case : Any = self.get_tokenizer() _snake_case : Dict = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) _snake_case : List[str] = "lower newer" _snake_case : int = processor(text=lowercase_ ) _snake_case : str = tokenizer(lowercase_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def UpperCamelCase ( self ): _snake_case : List[Any] = self.get_image_processor() _snake_case : int = self.get_tokenizer() _snake_case : Tuple = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) _snake_case : List[Any] = "lower newer" _snake_case : int = self.prepare_image_inputs() _snake_case : Dict = processor(text=lowercase_ , images=lowercase_ ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(lowercase_ ): processor() def UpperCamelCase ( self ): _snake_case : Dict = self.get_image_processor() _snake_case : List[str] = self.get_tokenizer() _snake_case : Union[str, Any] = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) _snake_case : Optional[int] = self.prepare_image_inputs() _snake_case : Dict = self.prepare_image_inputs() _snake_case : List[Any] = processor(images=lowercase_ , visual_prompt=lowercase_ ) self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "conditional_pixel_values"] ) # test if it raises when no input is passed with pytest.raises(lowercase_ ): processor() def UpperCamelCase ( self ): _snake_case : Dict = self.get_image_processor() _snake_case : List[Any] = self.get_tokenizer() _snake_case : str = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) _snake_case : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _snake_case : Any = processor.batch_decode(lowercase_ ) _snake_case : Any = tokenizer.batch_decode(lowercase_ ) self.assertListEqual(lowercase_ , lowercase_ )
670
0
'''simple docstring''' from manim import * class lowercase_ ( __snake_case ): """simple docstring""" def lowerCAmelCase_ ( self : Any ): """simple docstring""" _SCREAMING_SNAKE_CASE = Rectangle(height=0.5 , width=0.5 ) _SCREAMING_SNAKE_CASE = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 ) _SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )] _SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )] _SCREAMING_SNAKE_CASE = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 ) _SCREAMING_SNAKE_CASE = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 ) _SCREAMING_SNAKE_CASE = VGroup(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0 ) _SCREAMING_SNAKE_CASE = Text("CPU" , font_size=2_4 ) _SCREAMING_SNAKE_CASE = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(lowercase_ ) _SCREAMING_SNAKE_CASE = [mem.copy() for i in range(4 )] _SCREAMING_SNAKE_CASE = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 ) _SCREAMING_SNAKE_CASE = Text("GPU" , font_size=2_4 ) _SCREAMING_SNAKE_CASE = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ ) gpu.move_to([-1, -1, 0] ) self.add(lowercase_ ) _SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )] _SCREAMING_SNAKE_CASE = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 ) _SCREAMING_SNAKE_CASE = Text("Model" , font_size=2_4 ) _SCREAMING_SNAKE_CASE = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ ) model.move_to([3, -1.0, 0] ) self.add(lowercase_ ) _SCREAMING_SNAKE_CASE = [] for i, rect in enumerate(lowercase_ ): rect.set_stroke(lowercase_ ) # target = fill.copy().set_fill(YELLOW, opacity=0.7) # target.move_to(rect) # self.add(target) _SCREAMING_SNAKE_CASE = Rectangle(height=0.4_6 / 4 , width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(lowercase_ , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=lowercase_ ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(cpu_targs[0] , direction=lowercase_ , buff=0.0 ) else: cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase_ , buff=0.0 ) self.add(lowercase_ ) cpu_targs.append(lowercase_ ) _SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )] _SCREAMING_SNAKE_CASE = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 ) _SCREAMING_SNAKE_CASE = Text("Loaded Checkpoint" , font_size=2_4 ) _SCREAMING_SNAKE_CASE = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , aligned_edge=lowercase_ , buff=0.4 ) checkpoint.move_to([3, 0.5, 0] ) _SCREAMING_SNAKE_CASE = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) _SCREAMING_SNAKE_CASE = MarkupText( F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=1_8 , ) key_text.move_to([-5, 2.4, 0] ) self.add(lowercase_ , lowercase_ ) _SCREAMING_SNAKE_CASE = MarkupText( F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=1_8 , ) blue_text.next_to(lowercase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() ) _SCREAMING_SNAKE_CASE = MarkupText( F"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=2_4 , ) step_a.move_to([2, 2, 0] ) self.play(Write(lowercase_ ) , Write(lowercase_ ) ) self.play(Write(lowercase_ , run_time=1 ) , Create(lowercase_ , run_time=1 ) ) _SCREAMING_SNAKE_CASE = [] _SCREAMING_SNAKE_CASE = [] for i, rect in enumerate(lowercase_ ): _SCREAMING_SNAKE_CASE = fill.copy().set_fill(lowercase_ , opacity=0.7 ) target.move_to(lowercase_ ) first_animations.append(GrowFromCenter(lowercase_ , run_time=1 ) ) _SCREAMING_SNAKE_CASE = target.copy() cpu_target.generate_target() if i < 5: cpu_target.target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.target.move_to(cpu_right_col_base[i - 5] ) second_animations.append(MoveToTarget(lowercase_ , run_time=1.5 ) ) self.play(*lowercase_ ) self.play(*lowercase_ ) self.wait()
418
from argparse import ArgumentParser from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline from ..utils import logging from . import BaseTransformersCLICommand __SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__) # pylint: disable=invalid-name def snake_case (__lowercase ) -> Any: '''simple docstring''' if not path: return "pipe" for ext in PipelineDataFormat.SUPPORTED_FORMATS: if path.endswith(__lowercase ): return ext raise Exception( F"""Unable to determine file format from file extension {path}. """ F"""Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}""" ) def snake_case (__lowercase ) -> Any: '''simple docstring''' _snake_case : int = pipeline( task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , ) _snake_case : List[Any] = try_infer_format_from_ext(args.input ) if args.format == "infer" else args.format _snake_case : Optional[int] = PipelineDataFormat.from_str( format=__lowercase , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , ) return RunCommand(__lowercase , __lowercase ) class lowercase_ ( __snake_case ): def __init__( self , lowercase_ , lowercase_ ): _snake_case : str = nlp _snake_case : str = reader @staticmethod def UpperCamelCase ( lowercase_ ): _snake_case : Dict = parser.add_parser("run" , help="Run a pipeline through the CLI" ) run_parser.add_argument("--task" , choices=get_supported_tasks() , help="Task to run" ) run_parser.add_argument("--input" , type=lowercase_ , help="Path to the file to use for inference" ) run_parser.add_argument("--output" , type=lowercase_ , help="Path to the file that will be used post to write results." ) run_parser.add_argument("--model" , type=lowercase_ , help="Name or path to the model to instantiate." ) run_parser.add_argument("--config" , type=lowercase_ , help="Name or path to the model's config to instantiate." ) run_parser.add_argument( "--tokenizer" , type=lowercase_ , help="Name of the tokenizer to use. (default: same as the model name)" ) run_parser.add_argument( "--column" , type=lowercase_ , help="Name of the column to use as input. (For multi columns input as QA use column1,columns2)" , ) run_parser.add_argument( "--format" , type=lowercase_ , default="infer" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="Input format to read from" , ) run_parser.add_argument( "--device" , type=lowercase_ , default=-1 , help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" , ) run_parser.add_argument("--overwrite" , action="store_true" , help="Allow overwriting the output file." ) run_parser.set_defaults(func=lowercase_ ) def UpperCamelCase ( self ): _snake_case ,_snake_case : Tuple = self._nlp, [] for entry in self._reader: _snake_case : Optional[Any] = nlp(**lowercase_ ) if self._reader.is_multi_columns else nlp(lowercase_ ) if isinstance(lowercase_ , lowercase_ ): outputs.append(lowercase_ ) else: outputs += output # Saving data if self._nlp.binary_output: _snake_case : str = self._reader.save_binary(lowercase_ ) logger.warning(f"""Current pipeline requires output to be in binary format, saving at {binary_path}""" ) else: self._reader.save(lowercase_ )
670
0
import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer __A = logging.get_logger(__name__) __A = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} __A = { 'vocab_file': { 'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json', }, 'merges_file': { 'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt', }, 'tokenizer_file': { 'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json', }, } __A = { 'allenai/led-base-16384': 1_6384, } class lowercase ( __snake_case): """simple docstring""" a__ : Union[str, Any] = VOCAB_FILES_NAMES a__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP a__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ : Optional[int] = LEDTokenizer a__ : List[str] = ["input_ids", "attention_mask"] def __init__( self : int , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Any=None , __UpperCAmelCase : List[str]="replace" , __UpperCAmelCase : Any="<s>" , __UpperCAmelCase : str="</s>" , __UpperCAmelCase : Dict="</s>" , __UpperCAmelCase : Optional[Any]="<s>" , __UpperCAmelCase : int="<unk>" , __UpperCAmelCase : List[Any]="<pad>" , __UpperCAmelCase : Dict="<mask>" , __UpperCAmelCase : Optional[int]=False , __UpperCAmelCase : Any=True , **__UpperCAmelCase : Dict , ) -> Union[str, Any]: super().__init__( lowercase_ , lowercase_ , tokenizer_file=lowercase_ , errors=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , mask_token=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ , **lowercase_ , ) UpperCAmelCase_= json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""" , lowercase_ ) != add_prefix_space: UpperCAmelCase_= getattr(lowercase_ , pre_tok_state.pop("""type""" ) ) UpperCAmelCase_= add_prefix_space UpperCAmelCase_= pre_tok_class(**lowercase_ ) UpperCAmelCase_= add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` UpperCAmelCase_= "post_processor" UpperCAmelCase_= getattr(self.backend_tokenizer , lowercase_ , lowercase_ ) if tokenizer_component_instance: UpperCAmelCase_= json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: UpperCAmelCase_= tuple(state["""sep"""] ) if "cls" in state: UpperCAmelCase_= tuple(state["""cls"""] ) UpperCAmelCase_= False if state.get("""add_prefix_space""" , lowercase_ ) != add_prefix_space: UpperCAmelCase_= add_prefix_space UpperCAmelCase_= True if state.get("""trim_offsets""" , lowercase_ ) != trim_offsets: UpperCAmelCase_= trim_offsets UpperCAmelCase_= True if changes_to_apply: UpperCAmelCase_= getattr(lowercase_ , state.pop("""type""" ) ) UpperCAmelCase_= component_class(**lowercase_ ) setattr(self.backend_tokenizer , lowercase_ , lowercase_ ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]: if self._mask_token is None: if self.verbose: logger.error("""Using mask_token, but it is not set yet.""" ) return None return str(self._mask_token ) @mask_token.setter def _SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : Optional[Any] ) -> Any: UpperCAmelCase_= AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else value UpperCAmelCase_= value def _SCREAMING_SNAKE_CASE ( self : List[str] , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : int ) -> Tuple: UpperCAmelCase_= kwargs.get("""is_split_into_words""" , lowercase_ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ """to use it with pretokenized inputs.""" ) return super()._batch_encode_plus(*lowercase_ , **lowercase_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , *__UpperCAmelCase : List[Any] , **__UpperCAmelCase : Union[str, Any] ) -> int: UpperCAmelCase_= kwargs.get("""is_split_into_words""" , lowercase_ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ """to use it with pretokenized inputs.""" ) return super()._encode_plus(*lowercase_ , **lowercase_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict = None ) -> str: UpperCAmelCase_= self._tokenizer.model.save(lowercase_ , name=lowercase_ ) return tuple(lowercase_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[Any]=None ) -> Optional[Any]: UpperCAmelCase_= [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict = None ) -> Any: UpperCAmelCase_= [self.sep_token_id] UpperCAmelCase_= [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Any = None , __UpperCAmelCase : Dict = PaddingStrategy.DO_NOT_PAD , __UpperCAmelCase : int = None , __UpperCAmelCase : Union[str, Any] = None , ) -> str: UpperCAmelCase_= super()._pad( encoded_inputs=lowercase_ , max_length=lowercase_ , padding_strategy=lowercase_ , pad_to_multiple_of=lowercase_ , return_attention_mask=lowercase_ , ) # Load from model defaults if return_attention_mask is None: UpperCAmelCase_= "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: UpperCAmelCase_= encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. UpperCAmelCase_= len(encoded_inputs["""global_attention_mask"""] ) != len(lowercase_ ) if needs_to_be_padded: UpperCAmelCase_= len(lowercase_ ) - len(encoded_inputs["""global_attention_mask"""] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` UpperCAmelCase_= ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": UpperCAmelCase_= [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) ) return encoded_inputs
593
import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from ...models.controlnet import ControlNetModel, ControlNetOutput from ...models.modeling_utils import ModelMixin from ...utils import logging __SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__) class lowercase_ ( __snake_case ): def __init__( self , lowercase_ ): super().__init__() _snake_case : List[str] = nn.ModuleList(lowercase_ ) def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = False , lowercase_ = True , ): for i, (image, scale, controlnet) in enumerate(zip(lowercase_ , lowercase_ , self.nets ) ): _snake_case ,_snake_case : Optional[int] = controlnet( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) # merge samples if i == 0: _snake_case ,_snake_case : Tuple = down_samples, mid_sample else: _snake_case : Tuple = [ samples_prev + samples_curr for samples_prev, samples_curr in zip(lowercase_ , lowercase_ ) ] mid_block_res_sample += mid_sample return down_block_res_samples, mid_block_res_sample def UpperCamelCase ( self , lowercase_ , lowercase_ = True , lowercase_ = None , lowercase_ = False , lowercase_ = None , ): _snake_case : Tuple = 0 _snake_case : Dict = save_directory for controlnet in self.nets: controlnet.save_pretrained( lowercase_ , is_main_process=lowercase_ , save_function=lowercase_ , safe_serialization=lowercase_ , variant=lowercase_ , ) idx += 1 _snake_case : int = model_path_to_save + f"""_{idx}""" @classmethod def UpperCamelCase ( cls , lowercase_ , **lowercase_ ): _snake_case : List[str] = 0 _snake_case : Optional[Any] = [] # load controlnet and append to list until no controlnet directory exists anymore # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... _snake_case : Optional[Any] = pretrained_model_path while os.path.isdir(lowercase_ ): _snake_case : int = ControlNetModel.from_pretrained(lowercase_ , **lowercase_ ) controlnets.append(lowercase_ ) idx += 1 _snake_case : str = pretrained_model_path + f"""_{idx}""" logger.info(f"""{len(lowercase_ )} controlnets loaded from {pretrained_model_path}.""" ) if len(lowercase_ ) == 0: raise ValueError( f"""No ControlNets found under {os.path.dirname(lowercase_ )}. Expected at least {pretrained_model_path + '_0'}.""" ) return cls(lowercase_ )
670
0
def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase )-> Union[str, Any]: '''simple docstring''' return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2 def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase=0 )-> Optional[Any]: '''simple docstring''' return sorted(__lowercase ,key=lambda UpperCAmelCase : x[column] ) def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase=float('''inf''' ) )-> str: '''simple docstring''' for i in range(points_counts - 1 ): for j in range(i + 1 ,__lowercase ): SCREAMING_SNAKE_CASE_ = euclidean_distance_sqr(points[i] ,points[j] ) if current_dis < min_dis: SCREAMING_SNAKE_CASE_ = current_dis return min_dis def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase=float('''inf''' ) )-> Dict: '''simple docstring''' for i in range(min(6 ,points_counts - 1 ) ,__lowercase ): for j in range(max(0 ,i - 6 ) ,__lowercase ): SCREAMING_SNAKE_CASE_ = euclidean_distance_sqr(points[i] ,points[j] ) if current_dis < min_dis: SCREAMING_SNAKE_CASE_ = current_dis return min_dis def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase )-> Any: '''simple docstring''' if points_counts <= 3: return dis_between_closest_pair(__lowercase ,__lowercase ) # recursion SCREAMING_SNAKE_CASE_ = points_counts // 2 SCREAMING_SNAKE_CASE_ = closest_pair_of_points_sqr( __lowercase ,points_sorted_on_y[:mid] ,__lowercase ) SCREAMING_SNAKE_CASE_ = closest_pair_of_points_sqr( __lowercase ,points_sorted_on_y[mid:] ,points_counts - mid ) SCREAMING_SNAKE_CASE_ = min(__lowercase ,__lowercase ) SCREAMING_SNAKE_CASE_ = [] for point in points_sorted_on_x: if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis: cross_strip.append(__lowercase ) SCREAMING_SNAKE_CASE_ = dis_between_closest_in_strip( __lowercase ,len(__lowercase ) ,__lowercase ) return min(__lowercase ,__lowercase ) def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase )-> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE_ = column_based_sort(__lowercase ,column=0 ) SCREAMING_SNAKE_CASE_ = column_based_sort(__lowercase ,column=1 ) return ( closest_pair_of_points_sqr( __lowercase ,__lowercase ,__lowercase ) ) ** 0.5 if __name__ == "__main__": A_ = [(2, 3), (1_2, 3_0), (4_0, 5_0), (5, 1), (1_2, 1_0), (3, 4)] print("Distance:", closest_pair_of_points(points, len(points)))
393
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowercase_ ( __snake_case ): _lowerCamelCase = ['image_processor', 'tokenizer'] _lowerCamelCase = 'CLIPImageProcessor' _lowerCamelCase = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast') def __init__( self , lowercase_=None , lowercase_=None , **lowercase_ ): _snake_case : Optional[Any] = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , lowercase_ , ) _snake_case : Dict = kwargs.pop("feature_extractor" ) _snake_case : Any = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(lowercase_ , lowercase_ ) def __call__( self , lowercase_=None , lowercase_=None , lowercase_=None , **lowercase_ ): if text is None and images is None: raise ValueError("You have to specify either text or images. Both cannot be none." ) if text is not None: _snake_case : str = self.tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_ ) if images is not None: _snake_case : List[str] = self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_ ) if text is not None and images is not None: _snake_case : Tuple = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**lowercase_ ) , tensor_type=lowercase_ ) def UpperCamelCase ( self , *lowercase_ , **lowercase_ ): return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ ) def UpperCamelCase ( self , *lowercase_ , **lowercase_ ): return self.tokenizer.decode(*lowercase_ , **lowercase_ ) @property def UpperCamelCase ( self ): _snake_case : Any = self.tokenizer.model_input_names _snake_case : int = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
670
0
'''simple docstring''' def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 0 , _lowerCAmelCase = 0 )-> int: __UpperCAmelCase = right or len(__lowercase ) - 1 if left > right: return -1 elif list_data[left] == key: return left elif list_data[right] == key: return right else: return search(__lowercase , __lowercase , left + 1 , right - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
126
from __future__ import annotations def snake_case (__lowercase , __lowercase , __lowercase ) -> dict[str, float]: '''simple docstring''' if (voltage, current, resistance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if resistance < 0: raise ValueError("Resistance cannot be negative" ) if voltage == 0: return {"voltage": float(current * resistance )} elif current == 0: return {"current": voltage / resistance} elif resistance == 0: return {"resistance": voltage / current} else: raise ValueError("Exactly one argument must be 0" ) if __name__ == "__main__": import doctest doctest.testmod()
670
0
"""simple docstring""" from __future__ import annotations def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> float: """simple docstring""" if days_between_payments <= 0: raise ValueError("days_between_payments must be > 0" ) if daily_interest_rate < 0: raise ValueError("daily_interest_rate must be >= 0" ) if principal <= 0: raise ValueError("principal must be > 0" ) return principal * daily_interest_rate * days_between_payments def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> float: """simple docstring""" if number_of_compounding_periods <= 0: raise ValueError("number_of_compounding_periods must be > 0" ) if nominal_annual_interest_rate_percentage < 0: raise ValueError("nominal_annual_interest_rate_percentage must be >= 0" ) if principal <= 0: raise ValueError("principal must be > 0" ) return principal * ( (1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods - 1 ) def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> float: """simple docstring""" if number_of_years <= 0: raise ValueError("number_of_years must be > 0" ) if nominal_annual_percentage_rate < 0: raise ValueError("nominal_annual_percentage_rate must be >= 0" ) if principal <= 0: raise ValueError("principal must be > 0" ) return compound_interest( __lowercase , nominal_annual_percentage_rate / 3_65 , number_of_years * 3_65 ) if __name__ == "__main__": import doctest doctest.testmod()
163
import functools import gc import inspect import torch from .imports import is_npu_available, is_xpu_available def snake_case (*__lowercase ) -> Dict: '''simple docstring''' if not isinstance(__lowercase , __lowercase ): _snake_case : Dict = list(__lowercase ) for i in range(len(__lowercase ) ): _snake_case : List[str] = None gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() return objects def snake_case (__lowercase ) -> bool: '''simple docstring''' _snake_case : str = [ "CUDA out of memory.", # CUDA OOM "cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.", # CUDNN SNAFU "DefaultCPUAllocator: can't allocate memory", # CPU OOM ] if isinstance(__lowercase , __lowercase ) and len(exception.args ) == 1: return any(err in exception.args[0] for err in _statements ) return False def snake_case (__lowercase = None , __lowercase = 128 ) -> Any: '''simple docstring''' if function is None: return functools.partial(__lowercase , starting_batch_size=__lowercase ) _snake_case : List[str] = starting_batch_size def decorator(*__lowercase , **__lowercase ): nonlocal batch_size gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() _snake_case : Optional[Any] = list(inspect.signature(__lowercase ).parameters.keys() ) # Guard against user error if len(__lowercase ) < (len(__lowercase ) + 1): _snake_case : str = ", ".join([F"""{arg}={value}""" for arg, value in zip(params[1:] , args[1:] )] ) raise TypeError( F"""Batch size was passed into `{function.__name__}` as the first argument when called.""" F"""Remove this as the decorator already does so: `{function.__name__}({arg_str})`""" ) while True: if batch_size == 0: raise RuntimeError("No executable batch size found, reached zero." ) try: return function(__lowercase , *__lowercase , **__lowercase ) except Exception as e: if should_reduce_batch_size(__lowercase ): gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() batch_size //= 2 else: raise return decorator
670
0
import torch from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel class _UpperCAmelCase ( __snake_case ): a = '''M-CLIP''' def __init__( self , a__=1024 , a__=768 , **a__ ): A_ : Union[str, Any] = transformerDimSize A_ : Optional[Any] = imageDimSize super().__init__(**lowercase_ ) class _UpperCAmelCase ( __snake_case ): a = MCLIPConfig def __init__( self , a__ , *a__ , **a__ ): super().__init__(lowercase_ , *lowercase_ , **lowercase_ ) A_ : Any = XLMRobertaModel(lowercase_ ) A_ : List[Any] = torch.nn.Linear( in_features=config.transformerDimensions , out_features=config.numDims ) def _lowerCamelCase ( self , a__ , a__ ): A_ : int = self.transformer(input_ids=lowercase_ , attention_mask=lowercase_ )[0] A_ : Optional[Any] = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None] return self.LinearTransformation(lowercase_ ), embs
569
__SCREAMING_SNAKE_CASE : Union[str, Any] = { 'a': 'AAAAA', 'b': 'AAAAB', 'c': 'AAABA', 'd': 'AAABB', 'e': 'AABAA', 'f': 'AABAB', 'g': 'AABBA', 'h': 'AABBB', 'i': 'ABAAA', 'j': 'BBBAA', 'k': 'ABAAB', 'l': 'ABABA', 'm': 'ABABB', 'n': 'ABBAA', 'o': 'ABBAB', 'p': 'ABBBA', 'q': 'ABBBB', 'r': 'BAAAA', 's': 'BAAAB', 't': 'BAABA', 'u': 'BAABB', 'v': 'BBBAB', 'w': 'BABAA', 'x': 'BABAB', 'y': 'BABBA', 'z': 'BABBB', ' ': ' ', } __SCREAMING_SNAKE_CASE : int = {value: key for key, value in encode_dict.items()} def snake_case (__lowercase ) -> str: '''simple docstring''' _snake_case : Any = "" for letter in word.lower(): if letter.isalpha() or letter == " ": encoded += encode_dict[letter] else: raise Exception("encode() accepts only letters of the alphabet and spaces" ) return encoded def snake_case (__lowercase ) -> str: '''simple docstring''' if set(__lowercase ) - {"A", "B", " "} != set(): raise Exception("decode() accepts only 'A', 'B' and spaces" ) _snake_case : str = "" for word in coded.split(): while len(__lowercase ) != 0: decoded += decode_dict[word[:5]] _snake_case : int = word[5:] decoded += " " return decoded.strip() if __name__ == "__main__": from doctest import testmod testmod()
670
0
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast @require_vision class lowercase_ ( unittest.TestCase): """simple docstring""" def lowercase__ ( self ): """simple docstring""" a_ = tempfile.mkdtemp() a_ = BlipImageProcessor() a_ = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" ) a_ = BlipaProcessor(lowercase_ , lowercase_ ) processor.save_pretrained(self.tmpdirname ) def lowercase__ ( self , **_UpperCAmelCase ): """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_ ).tokenizer def lowercase__ ( self , **_UpperCAmelCase ): """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_ ).image_processor def lowercase__ ( self ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def lowercase__ ( self ): """simple docstring""" a_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] a_ = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowercase__ ( self ): """simple docstring""" a_ = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) a_ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) a_ = self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0 ) a_ = BlipaProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowercase_ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , lowercase_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , lowercase_ ) def lowercase__ ( self ): """simple docstring""" a_ = self.get_image_processor() a_ = self.get_tokenizer() a_ = BlipaProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) a_ = self.prepare_image_inputs() a_ = image_processor(lowercase_ , return_tensors="""np""" ) a_ = processor(images=lowercase_ , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def lowercase__ ( self ): """simple docstring""" a_ = self.get_image_processor() a_ = self.get_tokenizer() a_ = BlipaProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) a_ = "lower newer" a_ = processor(text=lowercase_ ) a_ = tokenizer(lowercase_ , return_token_type_ids=lowercase_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowercase__ ( self ): """simple docstring""" a_ = self.get_image_processor() a_ = self.get_tokenizer() a_ = BlipaProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) a_ = "lower newer" a_ = self.prepare_image_inputs() a_ = processor(text=lowercase_ , images=lowercase_ ) self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] ) # test if it raises when no input is passed with pytest.raises(lowercase_ ): processor() def lowercase__ ( self ): """simple docstring""" a_ = self.get_image_processor() a_ = self.get_tokenizer() a_ = BlipaProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) a_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] a_ = processor.batch_decode(lowercase_ ) a_ = tokenizer.batch_decode(lowercase_ ) self.assertListEqual(lowercase_ , lowercase_ ) def lowercase__ ( self ): """simple docstring""" a_ = self.get_image_processor() a_ = self.get_tokenizer() a_ = BlipaProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) a_ = "lower newer" a_ = self.prepare_image_inputs() a_ = processor(text=lowercase_ , images=lowercase_ ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
483
import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class lowercase_ ( unittest.TestCase ): def UpperCamelCase ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() def UpperCamelCase ( self ): _snake_case ,_snake_case : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-2" , revision="bf16" , dtype=jnp.bfloataa , ) _snake_case : List[Any] = "A painting of a squirrel eating a burger" _snake_case : Union[str, Any] = jax.device_count() _snake_case : List[Any] = num_samples * [prompt] _snake_case : Tuple = sd_pipe.prepare_inputs(lowercase_ ) _snake_case : str = replicate(lowercase_ ) _snake_case : Dict = shard(lowercase_ ) _snake_case : List[Any] = jax.random.PRNGKey(0 ) _snake_case : List[Any] = jax.random.split(lowercase_ , jax.device_count() ) _snake_case : Tuple = sd_pipe(lowercase_ , lowercase_ , lowercase_ , num_inference_steps=25 , jit=lowercase_ )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) _snake_case : List[Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) _snake_case : str = images[0, 253:256, 253:256, -1] _snake_case : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) ) _snake_case : Optional[Any] = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 def UpperCamelCase ( self ): _snake_case : Optional[Any] = "stabilityai/stable-diffusion-2" _snake_case ,_snake_case : List[Any] = FlaxDPMSolverMultistepScheduler.from_pretrained(lowercase_ , subfolder="scheduler" ) _snake_case ,_snake_case : int = FlaxStableDiffusionPipeline.from_pretrained( lowercase_ , scheduler=lowercase_ , revision="bf16" , dtype=jnp.bfloataa , ) _snake_case : str = scheduler_params _snake_case : Dict = "A painting of a squirrel eating a burger" _snake_case : Dict = jax.device_count() _snake_case : Optional[int] = num_samples * [prompt] _snake_case : List[str] = sd_pipe.prepare_inputs(lowercase_ ) _snake_case : Optional[int] = replicate(lowercase_ ) _snake_case : Union[str, Any] = shard(lowercase_ ) _snake_case : List[Any] = jax.random.PRNGKey(0 ) _snake_case : Union[str, Any] = jax.random.split(lowercase_ , jax.device_count() ) _snake_case : str = sd_pipe(lowercase_ , lowercase_ , lowercase_ , num_inference_steps=25 , jit=lowercase_ )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) _snake_case : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) _snake_case : List[str] = images[0, 253:256, 253:256, -1] _snake_case : Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) _snake_case : Dict = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
670
0
from math import ceil def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: List[str] = list(range(0 , _UpperCAmelCase ) ) SCREAMING_SNAKE_CASE_: Optional[int] = [item for sublist in list(device_map.values() ) for item in sublist] # Duplicate check SCREAMING_SNAKE_CASE_: int = [] for i in device_map_blocks: if device_map_blocks.count(_UpperCAmelCase ) > 1 and i not in duplicate_blocks: duplicate_blocks.append(_UpperCAmelCase ) # Missing blocks SCREAMING_SNAKE_CASE_: Optional[Any] = [i for i in blocks if i not in device_map_blocks] SCREAMING_SNAKE_CASE_: Any = [i for i in device_map_blocks if i not in blocks] if len(_UpperCAmelCase ) != 0: raise ValueError( "Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device." " These attention blocks were specified more than once: " + str(_UpperCAmelCase ) ) if len(_UpperCAmelCase ) != 0: raise ValueError( "There are attention blocks for this model that are not specified in the device_map. Add these attention " "blocks to a device on the device_map: " + str(_UpperCAmelCase ) ) if len(_UpperCAmelCase ) != 0: raise ValueError( "The device_map contains more attention blocks than this model has. Remove these from the device_map:" + str(_UpperCAmelCase ) ) def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Union[str, Any] = list(range(_UpperCAmelCase ) ) SCREAMING_SNAKE_CASE_: Tuple = int(ceil(n_layers / len(_UpperCAmelCase ) ) ) SCREAMING_SNAKE_CASE_: Tuple = [layers[i : i + n_blocks] for i in range(0 , _UpperCAmelCase , _UpperCAmelCase )] return dict(zip(_UpperCAmelCase , _UpperCAmelCase ) )
671
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : List[Any] = '''upernet''' def __init__( self : Any , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : List[str]=512 , lowerCAmelCase__ : Any=0.02 , lowerCAmelCase__ : str=[1, 2, 3, 6] , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : Dict=0.4 , lowerCAmelCase__ : int=384 , lowerCAmelCase__ : Union[str, Any]=256 , lowerCAmelCase__ : Any=1 , lowerCAmelCase__ : Tuple=False , lowerCAmelCase__ : List[str]=255 , **lowerCAmelCase__ : List[str] , ): super().__init__(**lowerCAmelCase__) if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.") SCREAMING_SNAKE_CASE_: Dict = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"]) elif isinstance(lowerCAmelCase__ , lowerCAmelCase__): SCREAMING_SNAKE_CASE_: str = backbone_config.get("model_type") SCREAMING_SNAKE_CASE_: str = CONFIG_MAPPING[backbone_model_type] SCREAMING_SNAKE_CASE_: Tuple = config_class.from_dict(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: str = backbone_config SCREAMING_SNAKE_CASE_: Optional[Any] = hidden_size SCREAMING_SNAKE_CASE_: Dict = initializer_range SCREAMING_SNAKE_CASE_: Any = pool_scales SCREAMING_SNAKE_CASE_: Optional[Any] = use_auxiliary_head SCREAMING_SNAKE_CASE_: str = auxiliary_loss_weight SCREAMING_SNAKE_CASE_: List[Any] = auxiliary_in_channels SCREAMING_SNAKE_CASE_: Union[str, Any] = auxiliary_channels SCREAMING_SNAKE_CASE_: Dict = auxiliary_num_convs SCREAMING_SNAKE_CASE_: str = auxiliary_concat_input SCREAMING_SNAKE_CASE_: Dict = loss_ignore_index def _SCREAMING_SNAKE_CASE ( self : Tuple): SCREAMING_SNAKE_CASE_: Tuple = copy.deepcopy(self.__dict__) SCREAMING_SNAKE_CASE_: int = self.backbone_config.to_dict() SCREAMING_SNAKE_CASE_: Optional[int] = self.__class__.model_type return output
671
1
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import AlignProcessor, EfficientNetImageProcessor @require_vision class __lowercase ( unittest.TestCase ): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : List[Any]): SCREAMING_SNAKE_CASE_: Union[str, Any] = tempfile.mkdtemp() SCREAMING_SNAKE_CASE_: int = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] SCREAMING_SNAKE_CASE_: List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) SCREAMING_SNAKE_CASE_: Optional[Any] = { "do_resize": True, "size": 20, "do_center_crop": True, "crop_size": 18, "do_normalize": True, "image_mean": [0.4814_5466, 0.457_8275, 0.4082_1073], "image_std": [0.2686_2954, 0.2613_0258, 0.2757_7711], } SCREAMING_SNAKE_CASE_: str = os.path.join(self.tmpdirname , lowerCAmelCase__) with open(self.image_processor_file , "w" , encoding="utf-8") as fp: json.dump(lowerCAmelCase__ , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : str , **lowerCAmelCase__ : List[Any]): return BertTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Any , **lowerCAmelCase__ : Dict): return BertTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Dict , **lowerCAmelCase__ : Optional[int]): return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : List[str]): shutil.rmtree(self.tmpdirname) def _SCREAMING_SNAKE_CASE ( self : List[Any]): SCREAMING_SNAKE_CASE_: int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)] SCREAMING_SNAKE_CASE_: Any = [Image.fromarray(np.moveaxis(lowerCAmelCase__ , 0 , -1)) for x in image_inputs] return image_inputs def _SCREAMING_SNAKE_CASE ( self : Dict): SCREAMING_SNAKE_CASE_: List[Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE_: Tuple = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE_: Optional[Any] = self.get_image_processor() SCREAMING_SNAKE_CASE_: Any = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__) processor_slow.save_pretrained(self.tmpdirname) SCREAMING_SNAKE_CASE_: List[str] = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Dict = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__) processor_fast.save_pretrained(self.tmpdirname) SCREAMING_SNAKE_CASE_: Dict = AlignProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab()) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab()) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab()) self.assertIsInstance(processor_slow.tokenizer , lowerCAmelCase__) self.assertIsInstance(processor_fast.tokenizer , lowerCAmelCase__) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string()) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string()) self.assertIsInstance(processor_slow.image_processor , lowerCAmelCase__) self.assertIsInstance(processor_fast.image_processor , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : List[Any]): SCREAMING_SNAKE_CASE_: List[Any] = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor()) processor.save_pretrained(self.tmpdirname) SCREAMING_SNAKE_CASE_: Union[str, Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)") SCREAMING_SNAKE_CASE_: Optional[int] = self.get_image_processor(do_normalize=lowerCAmelCase__ , padding_value=1.0) SCREAMING_SNAKE_CASE_: Union[str, Any] = AlignProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowerCAmelCase__ , padding_value=1.0) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer , lowerCAmelCase__) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): SCREAMING_SNAKE_CASE_: Optional[Any] = self.get_image_processor() SCREAMING_SNAKE_CASE_: Union[str, Any] = self.get_tokenizer() SCREAMING_SNAKE_CASE_: str = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[Any] = self.prepare_image_inputs() SCREAMING_SNAKE_CASE_: List[Any] = image_processor(lowerCAmelCase__ , return_tensors="np") SCREAMING_SNAKE_CASE_: Tuple = processor(images=lowerCAmelCase__ , return_tensors="np") for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2) def _SCREAMING_SNAKE_CASE ( self : int): SCREAMING_SNAKE_CASE_: Union[str, Any] = self.get_image_processor() SCREAMING_SNAKE_CASE_: str = self.get_tokenizer() SCREAMING_SNAKE_CASE_: Dict = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = "lower newer" SCREAMING_SNAKE_CASE_: Any = processor(text=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = tokenizer(lowerCAmelCase__ , padding="max_length" , max_length=64) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key]) def _SCREAMING_SNAKE_CASE ( self : Dict): SCREAMING_SNAKE_CASE_: Union[str, Any] = self.get_image_processor() SCREAMING_SNAKE_CASE_: Any = self.get_tokenizer() SCREAMING_SNAKE_CASE_: int = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = "lower newer" SCREAMING_SNAKE_CASE_: List[str] = self.prepare_image_inputs() SCREAMING_SNAKE_CASE_: List[Any] = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__) self.assertListEqual(list(inputs.keys()) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"]) # test if it raises when no input is passed with pytest.raises(lowerCAmelCase__): processor() def _SCREAMING_SNAKE_CASE ( self : int): SCREAMING_SNAKE_CASE_: Any = self.get_image_processor() SCREAMING_SNAKE_CASE_: List[str] = self.get_tokenizer() SCREAMING_SNAKE_CASE_: str = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] SCREAMING_SNAKE_CASE_: Optional[int] = processor.batch_decode(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[Any] = tokenizer.batch_decode(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Optional[int]): SCREAMING_SNAKE_CASE_: str = self.get_image_processor() SCREAMING_SNAKE_CASE_: List[str] = self.get_tokenizer() SCREAMING_SNAKE_CASE_: str = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[Any] = "lower newer" SCREAMING_SNAKE_CASE_: str = self.prepare_image_inputs() SCREAMING_SNAKE_CASE_: Union[str, Any] = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__) self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
671
import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class __lowercase ( unittest.TestCase ): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : Any): SCREAMING_SNAKE_CASE_: List[str] = torch.nn.Linear(10 , 10) SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.optim.SGD(model.parameters() , 0.1) SCREAMING_SNAKE_CASE_: Any = Accelerator() SCREAMING_SNAKE_CASE_: List[str] = accelerator.prepare(lowerCAmelCase__) try: pickle.loads(pickle.dumps(lowerCAmelCase__)) except Exception as e: self.fail(F"Accelerated optimizer pickling failed with {e}") AcceleratorState._reset_state()
671
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase : str = logging.get_logger(__name__) lowerCAmelCase : Optional[Any] = { """hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""", # See all YOLOS models at https://huggingface.co/models?filter=yolos } class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : List[Any] = '''yolos''' def __init__( self : Optional[int] , lowerCAmelCase__ : Any=768 , lowerCAmelCase__ : Optional[Any]=12 , lowerCAmelCase__ : Any=12 , lowerCAmelCase__ : str=3072 , lowerCAmelCase__ : Tuple="gelu" , lowerCAmelCase__ : List[Any]=0.0 , lowerCAmelCase__ : Optional[Any]=0.0 , lowerCAmelCase__ : List[str]=0.02 , lowerCAmelCase__ : List[str]=1E-12 , lowerCAmelCase__ : Optional[int]=[512, 864] , lowerCAmelCase__ : List[Any]=16 , lowerCAmelCase__ : Tuple=3 , lowerCAmelCase__ : List[Any]=True , lowerCAmelCase__ : Tuple=100 , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : Optional[int]=False , lowerCAmelCase__ : Optional[int]=1 , lowerCAmelCase__ : List[Any]=5 , lowerCAmelCase__ : Optional[Any]=2 , lowerCAmelCase__ : Dict=5 , lowerCAmelCase__ : Any=2 , lowerCAmelCase__ : Any=0.1 , **lowerCAmelCase__ : List[str] , ): super().__init__(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Dict = hidden_size SCREAMING_SNAKE_CASE_: List[str] = num_hidden_layers SCREAMING_SNAKE_CASE_: Union[str, Any] = num_attention_heads SCREAMING_SNAKE_CASE_: Any = intermediate_size SCREAMING_SNAKE_CASE_: str = hidden_act SCREAMING_SNAKE_CASE_: Dict = hidden_dropout_prob SCREAMING_SNAKE_CASE_: Any = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_: List[str] = initializer_range SCREAMING_SNAKE_CASE_: Any = layer_norm_eps SCREAMING_SNAKE_CASE_: Optional[Any] = image_size SCREAMING_SNAKE_CASE_: List[Any] = patch_size SCREAMING_SNAKE_CASE_: Optional[Any] = num_channels SCREAMING_SNAKE_CASE_: int = qkv_bias SCREAMING_SNAKE_CASE_: List[Any] = num_detection_tokens SCREAMING_SNAKE_CASE_: Union[str, Any] = use_mid_position_embeddings SCREAMING_SNAKE_CASE_: Optional[Any] = auxiliary_loss # Hungarian matcher SCREAMING_SNAKE_CASE_: str = class_cost SCREAMING_SNAKE_CASE_: Optional[int] = bbox_cost SCREAMING_SNAKE_CASE_: str = giou_cost # Loss coefficients SCREAMING_SNAKE_CASE_: Optional[Any] = bbox_loss_coefficient SCREAMING_SNAKE_CASE_: Any = giou_loss_coefficient SCREAMING_SNAKE_CASE_: Any = eos_coefficient class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : str = version.parse('''1.11''' ) @property def _SCREAMING_SNAKE_CASE ( self : List[str]): return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ]) @property def _SCREAMING_SNAKE_CASE ( self : Tuple): return 1E-4 @property def _SCREAMING_SNAKE_CASE ( self : List[Any]): return 12
671
from itertools import count def A_ ( _UpperCAmelCase = 50 ): SCREAMING_SNAKE_CASE_: Union[str, Any] = [1] * min_block_length for n in count(_UpperCAmelCase ): fill_count_functions.append(1 ) for block_length in range(_UpperCAmelCase , n + 1 ): for block_start in range(n - block_length ): fill_count_functions[n] += fill_count_functions[ n - block_start - block_length - 1 ] fill_count_functions[n] += 1 if fill_count_functions[n] > 1_00_00_00: break return n if __name__ == "__main__": print(f'''{solution() = }''')
671
1
from __future__ import annotations from collections import deque from collections.abc import Sequence from dataclasses import dataclass from typing import Any @dataclass class __lowercase : """simple docstring""" _UpperCAmelCase : int _UpperCAmelCase : Node | None = None _UpperCAmelCase : Node | None = None def A_ ( ): SCREAMING_SNAKE_CASE_: List[str] = Node(1 ) SCREAMING_SNAKE_CASE_: int = Node(2 ) SCREAMING_SNAKE_CASE_: List[Any] = Node(3 ) SCREAMING_SNAKE_CASE_: Any = Node(4 ) SCREAMING_SNAKE_CASE_: Optional[Any] = Node(5 ) return tree def A_ ( _UpperCAmelCase ): return [root.data, *preorder(root.left ), *preorder(root.right )] if root else [] def A_ ( _UpperCAmelCase ): return postorder(root.left ) + postorder(root.right ) + [root.data] if root else [] def A_ ( _UpperCAmelCase ): return [*inorder(root.left ), root.data, *inorder(root.right )] if root else [] def A_ ( _UpperCAmelCase ): return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0 def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: list[Any] = [] if root is None: return output SCREAMING_SNAKE_CASE_: Tuple = deque([root] ) while process_queue: SCREAMING_SNAKE_CASE_: Dict = process_queue.popleft() output.append(node.data ) if node.left: process_queue.append(node.left ) if node.right: process_queue.append(node.right ) return output def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: list[Any] = [] def populate_output(_UpperCAmelCase , _UpperCAmelCase ) -> None: if not root: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.left , level - 1 ) populate_output(root.right , level - 1 ) populate_output(_UpperCAmelCase , _UpperCAmelCase ) return output def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: list[Any] = [] def populate_output(_UpperCAmelCase , _UpperCAmelCase ) -> None: if root is None: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.right , level - 1 ) populate_output(root.left , level - 1 ) populate_output(_UpperCAmelCase , _UpperCAmelCase ) return output def A_ ( _UpperCAmelCase ): if root is None: return [] SCREAMING_SNAKE_CASE_: list[Sequence[Node | None]] = [] SCREAMING_SNAKE_CASE_: List[Any] = 0 SCREAMING_SNAKE_CASE_: str = height(_UpperCAmelCase ) for h in range(1 , height_tree + 1 ): if not flag: output.append(get_nodes_from_left_to_right(_UpperCAmelCase , _UpperCAmelCase ) ) SCREAMING_SNAKE_CASE_: Dict = 1 else: output.append(get_nodes_from_right_to_left(_UpperCAmelCase , _UpperCAmelCase ) ) SCREAMING_SNAKE_CASE_: Union[str, Any] = 0 return output def A_ ( ): # Main function for testing. SCREAMING_SNAKE_CASE_: Dict = make_tree() print(f"In-order Traversal: {inorder(_UpperCAmelCase )}" ) print(f"Pre-order Traversal: {preorder(_UpperCAmelCase )}" ) print(f"Post-order Traversal: {postorder(_UpperCAmelCase )}" , "\n" ) print(f"Height of Tree: {height(_UpperCAmelCase )}" , "\n" ) print("Complete Level Order Traversal: " ) print(level_order(_UpperCAmelCase ) , "\n" ) print("Level-wise order Traversal: " ) for level in range(1 , height(_UpperCAmelCase ) + 1 ): print(f"Level {level}:" , get_nodes_from_left_to_right(_UpperCAmelCase , level=_UpperCAmelCase ) ) print("\nZigZag order Traversal: " ) print(zigzag(_UpperCAmelCase ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
671
def A_ ( _UpperCAmelCase ): if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise TypeError("only integers accepted as input" ) else: SCREAMING_SNAKE_CASE_: List[Any] = str(abs(_UpperCAmelCase ) ) SCREAMING_SNAKE_CASE_: Tuple = [list(_UpperCAmelCase ) for char in range(len(_UpperCAmelCase ) )] for index in range(len(_UpperCAmelCase ) ): num_transpositions[index].pop(_UpperCAmelCase ) return max( int("".join(list(_UpperCAmelCase ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__("""doctest""").testmod()
671
1
from typing import Callable, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase : int = logging.get_logger(__name__) lowerCAmelCase : Dict = { """microsoft/xprophetnet-large-wiki100-cased""": ( """https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json""" ), } class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Optional[Any] = '''xlm-prophetnet''' _UpperCAmelCase : Any = ['''past_key_values'''] _UpperCAmelCase : Tuple = { '''num_attention_heads''': '''num_encoder_attention_heads''', } def __init__( self : str , lowerCAmelCase__ : Optional[float] = 0.1 , lowerCAmelCase__ : Optional[Union[str, Callable]] = "gelu" , lowerCAmelCase__ : Optional[int] = 3_0522 , lowerCAmelCase__ : Optional[int] = 1024 , lowerCAmelCase__ : Optional[int] = 4096 , lowerCAmelCase__ : Optional[int] = 12 , lowerCAmelCase__ : Optional[int] = 16 , lowerCAmelCase__ : Optional[int] = 4096 , lowerCAmelCase__ : Optional[int] = 12 , lowerCAmelCase__ : Optional[int] = 16 , lowerCAmelCase__ : Optional[float] = 0.1 , lowerCAmelCase__ : Optional[float] = 0.1 , lowerCAmelCase__ : Optional[int] = 512 , lowerCAmelCase__ : Optional[float] = 0.02 , lowerCAmelCase__ : Optional[bool] = True , lowerCAmelCase__ : Optional[bool] = True , lowerCAmelCase__ : Optional[int] = 0 , lowerCAmelCase__ : Optional[int] = 2 , lowerCAmelCase__ : Optional[int] = 32 , lowerCAmelCase__ : Optional[int] = 128 , lowerCAmelCase__ : Optional[bool] = False , lowerCAmelCase__ : Optional[float] = 0.0 , lowerCAmelCase__ : Optional[bool] = True , lowerCAmelCase__ : Optional[int] = 0 , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : Optional[int] = 2 , **lowerCAmelCase__ : List[str] , ): SCREAMING_SNAKE_CASE_: List[Any] = vocab_size SCREAMING_SNAKE_CASE_: int = hidden_size SCREAMING_SNAKE_CASE_: Any = encoder_ffn_dim SCREAMING_SNAKE_CASE_: Tuple = num_encoder_layers SCREAMING_SNAKE_CASE_: List[Any] = num_encoder_attention_heads SCREAMING_SNAKE_CASE_: Dict = decoder_ffn_dim SCREAMING_SNAKE_CASE_: Any = num_decoder_layers SCREAMING_SNAKE_CASE_: Tuple = num_decoder_attention_heads SCREAMING_SNAKE_CASE_: str = max_position_embeddings SCREAMING_SNAKE_CASE_: str = init_std # Normal(0, this parameter) SCREAMING_SNAKE_CASE_: Dict = activation_function # parameters for xlmprophetnet SCREAMING_SNAKE_CASE_: Optional[int] = ngram SCREAMING_SNAKE_CASE_: Tuple = num_buckets SCREAMING_SNAKE_CASE_: Union[str, Any] = relative_max_distance SCREAMING_SNAKE_CASE_: List[str] = disable_ngram_loss SCREAMING_SNAKE_CASE_: Dict = eps # 3 Types of Dropout SCREAMING_SNAKE_CASE_: Any = attention_dropout SCREAMING_SNAKE_CASE_: Optional[int] = activation_dropout SCREAMING_SNAKE_CASE_: str = dropout SCREAMING_SNAKE_CASE_: Optional[int] = use_cache super().__init__( pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , add_cross_attention=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , ) @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): return self.num_encoder_layers + self.num_decoder_layers @num_hidden_layers.setter def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : Any): raise NotImplementedError( "This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and" " `num_decoder_layers`.")
671
from __future__ import annotations from collections.abc import Iterator from typing import Any class __lowercase : """simple docstring""" def __init__( self : List[str] , lowerCAmelCase__ : Any): SCREAMING_SNAKE_CASE_: Any = data SCREAMING_SNAKE_CASE_: Node | None = None class __lowercase : """simple docstring""" def __init__( self : int): SCREAMING_SNAKE_CASE_: Dict = None SCREAMING_SNAKE_CASE_: str = None def __iter__( self : List[str]): SCREAMING_SNAKE_CASE_: Tuple = self.head while self.head: yield node.data SCREAMING_SNAKE_CASE_: List[str] = node.next if node == self.head: break def __len__( self : Dict): return sum(1 for _ in self) def __repr__( self : Dict): return "->".join(str(lowerCAmelCase__) for item in iter(self)) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Any): self.insert_nth(len(self) , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Any): self.insert_nth(0 , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Any): if index < 0 or index > len(self): raise IndexError("list index out of range.") SCREAMING_SNAKE_CASE_: Any = Node(lowerCAmelCase__) if self.head is None: SCREAMING_SNAKE_CASE_: str = new_node # first node points itself SCREAMING_SNAKE_CASE_: Optional[Any] = new_node elif index == 0: # insert at head SCREAMING_SNAKE_CASE_: Optional[Any] = self.head SCREAMING_SNAKE_CASE_: str = new_node else: SCREAMING_SNAKE_CASE_: int = self.head for _ in range(index - 1): SCREAMING_SNAKE_CASE_: Optional[Any] = temp.next SCREAMING_SNAKE_CASE_: List[str] = temp.next SCREAMING_SNAKE_CASE_: int = new_node if index == len(self) - 1: # insert at tail SCREAMING_SNAKE_CASE_: Any = new_node def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): return self.delete_nth(0) def _SCREAMING_SNAKE_CASE ( self : Any): return self.delete_nth(len(self) - 1) def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : int = 0): if not 0 <= index < len(self): raise IndexError("list index out of range.") SCREAMING_SNAKE_CASE_: Optional[Any] = self.head if self.head == self.tail: # just one node SCREAMING_SNAKE_CASE_: List[str] = None elif index == 0: # delete head node SCREAMING_SNAKE_CASE_: int = self.tail.next.next SCREAMING_SNAKE_CASE_: Tuple = self.head.next else: SCREAMING_SNAKE_CASE_: Optional[int] = self.head for _ in range(index - 1): SCREAMING_SNAKE_CASE_: Any = temp.next SCREAMING_SNAKE_CASE_: Optional[Any] = temp.next SCREAMING_SNAKE_CASE_: int = temp.next.next if index == len(self) - 1: # delete at tail SCREAMING_SNAKE_CASE_: int = temp return delete_node.data def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): return len(self) == 0 def A_ ( ): SCREAMING_SNAKE_CASE_: Dict = CircularLinkedList() assert len(_UpperCAmelCase ) == 0 assert circular_linked_list.is_empty() is True assert str(_UpperCAmelCase ) == "" try: circular_linked_list.delete_front() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_tail() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_nth(-1 ) raise AssertionError except IndexError: assert True try: circular_linked_list.delete_nth(0 ) raise AssertionError except IndexError: assert True assert circular_linked_list.is_empty() is True for i in range(5 ): assert len(_UpperCAmelCase ) == i circular_linked_list.insert_nth(_UpperCAmelCase , i + 1 ) assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 6 ) ) circular_linked_list.insert_tail(6 ) assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 7 ) ) circular_linked_list.insert_head(0 ) assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(0 , 7 ) ) assert circular_linked_list.delete_front() == 0 assert circular_linked_list.delete_tail() == 6 assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 6 ) ) assert circular_linked_list.delete_nth(2 ) == 3 circular_linked_list.insert_nth(2 , 3 ) assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 6 ) ) assert circular_linked_list.is_empty() is False if __name__ == "__main__": import doctest doctest.testmod()
671
1
from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class __lowercase ( UpperCAmelCase_ ): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : float): return 0.0 def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: List[str] = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] ) SCREAMING_SNAKE_CASE_: Dict = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] ) return lowest, highest def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[int] = 5_12 SCREAMING_SNAKE_CASE_: str = [1] + [0] * (size - 1) SCREAMING_SNAKE_CASE_: Dict = [filter_type.process(_UpperCAmelCase ) for item in inputs] SCREAMING_SNAKE_CASE_: Optional[Any] = [0] * (samplerate - size) # zero-padding outputs += filler SCREAMING_SNAKE_CASE_: Tuple = np.abs(np.fft.fft(_UpperCAmelCase ) ) SCREAMING_SNAKE_CASE_: Optional[Any] = 20 * np.logaa(_UpperCAmelCase ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel("Frequency (Hz)" ) plt.xscale("log" ) # Display within reasonable bounds SCREAMING_SNAKE_CASE_: Any = get_bounds(_UpperCAmelCase , _UpperCAmelCase ) plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) ) plt.ylabel("Gain (dB)" ) plt.plot(_UpperCAmelCase ) plt.show() def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[int] = 5_12 SCREAMING_SNAKE_CASE_: Union[str, Any] = [1] + [0] * (size - 1) SCREAMING_SNAKE_CASE_: Dict = [filter_type.process(_UpperCAmelCase ) for item in inputs] SCREAMING_SNAKE_CASE_: int = [0] * (samplerate - size) # zero-padding outputs += filler SCREAMING_SNAKE_CASE_: Any = np.angle(np.fft.fft(_UpperCAmelCase ) ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel("Frequency (Hz)" ) plt.xscale("log" ) plt.ylim(-2 * pi , 2 * pi ) plt.ylabel("Phase shift (Radians)" ) plt.plot(np.unwrap(_UpperCAmelCase , -2 * pi ) ) plt.show()
671
from collections import defaultdict from math import ceil, sqrt def A_ ( _UpperCAmelCase = 1_00_00_00 , _UpperCAmelCase = 10 ): SCREAMING_SNAKE_CASE_: defaultdict = defaultdict(_UpperCAmelCase ) for outer_width in range(3 , (t_limit // 4) + 2 ): if outer_width * outer_width > t_limit: SCREAMING_SNAKE_CASE_: Tuple = max( ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 ) else: SCREAMING_SNAKE_CASE_: Optional[Any] = 1 hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2 for hole_width in range(_UpperCAmelCase , outer_width - 1 , 2 ): count[outer_width * outer_width - hole_width * hole_width] += 1 return sum(1 for n in count.values() if 1 <= n <= 10 ) if __name__ == "__main__": print(f'''{solution() = }''')
671
1
import argparse import datetime def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: List[Any] = { "0": "Sunday", "1": "Monday", "2": "Tuesday", "3": "Wednesday", "4": "Thursday", "5": "Friday", "6": "Saturday", } SCREAMING_SNAKE_CASE_: Dict = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0} # Validate if not 0 < len(_UpperCAmelCase ) < 11: raise ValueError("Must be 10 characters long" ) # Get month SCREAMING_SNAKE_CASE_: int = int(date_input[0] + date_input[1] ) # Validate if not 0 < m < 13: raise ValueError("Month must be between 1 - 12" ) SCREAMING_SNAKE_CASE_: str = date_input[2] # Validate if sep_a not in ["-", "/"]: raise ValueError("Date separator must be '-' or '/'" ) # Get day SCREAMING_SNAKE_CASE_: int = int(date_input[3] + date_input[4] ) # Validate if not 0 < d < 32: raise ValueError("Date must be between 1 - 31" ) # Get second separator SCREAMING_SNAKE_CASE_: str = date_input[5] # Validate if sep_a not in ["-", "/"]: raise ValueError("Date separator must be '-' or '/'" ) # Get year SCREAMING_SNAKE_CASE_: int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] ) # Arbitrary year range if not 45 < y < 85_00: raise ValueError( "Year out of range. There has to be some sort of limit...right?" ) # Get datetime obj for validation SCREAMING_SNAKE_CASE_: Tuple = datetime.date(int(_UpperCAmelCase ) , int(_UpperCAmelCase ) , int(_UpperCAmelCase ) ) # Start math if m <= 2: SCREAMING_SNAKE_CASE_: List[Any] = y - 1 SCREAMING_SNAKE_CASE_: List[str] = m + 12 # maths var SCREAMING_SNAKE_CASE_: int = int(str(_UpperCAmelCase )[:2] ) SCREAMING_SNAKE_CASE_: int = int(str(_UpperCAmelCase )[2:] ) SCREAMING_SNAKE_CASE_: int = int(2.6 * m - 5.3_9 ) SCREAMING_SNAKE_CASE_: int = int(c / 4 ) SCREAMING_SNAKE_CASE_: int = int(k / 4 ) SCREAMING_SNAKE_CASE_: int = int(d + k ) SCREAMING_SNAKE_CASE_: int = int(t + u + v + x ) SCREAMING_SNAKE_CASE_: int = int(z - (2 * c) ) SCREAMING_SNAKE_CASE_: int = round(w % 7 ) # End math # Validate math if f != convert_datetime_days[dt_ck.weekday()]: raise AssertionError("The date was evaluated incorrectly. Contact developer." ) # Response SCREAMING_SNAKE_CASE_: str = f"Your date {date_input}, is a {days[str(_UpperCAmelCase )]}!" return response if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase : Tuple = argparse.ArgumentParser( description=( """Find out what day of the week nearly any date is or was. Enter """ """date as a string in the mm-dd-yyyy or mm/dd/yyyy format""" ) ) parser.add_argument( """date_input""", type=str, help="""Date as a string (mm-dd-yyyy or mm/dd/yyyy)""" ) lowerCAmelCase : Optional[int] = parser.parse_args() zeller(args.date_input)
671
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowerCAmelCase : str = { """configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""], """tokenization_xlm""": ["""XLMTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Dict = [ """XLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """XLMForMultipleChoice""", """XLMForQuestionAnswering""", """XLMForQuestionAnsweringSimple""", """XLMForSequenceClassification""", """XLMForTokenClassification""", """XLMModel""", """XLMPreTrainedModel""", """XLMWithLMHeadModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[str] = [ """TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFXLMForMultipleChoice""", """TFXLMForQuestionAnsweringSimple""", """TFXLMForSequenceClassification""", """TFXLMForTokenClassification""", """TFXLMMainLayer""", """TFXLMModel""", """TFXLMPreTrainedModel""", """TFXLMWithLMHeadModel""", ] if TYPE_CHECKING: from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig from .tokenization_xlm import XLMTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm import ( XLM_PRETRAINED_MODEL_ARCHIVE_LIST, XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMPreTrainedModel, XLMWithLMHeadModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm import ( TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, TFXLMForTokenClassification, TFXLMMainLayer, TFXLMModel, TFXLMPreTrainedModel, TFXLMWithLMHeadModel, ) else: import sys lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
671
1
from math import sqrt def A_ ( _UpperCAmelCase = 1_00_00_00 ): SCREAMING_SNAKE_CASE_: int = 0 SCREAMING_SNAKE_CASE_: int = 0 SCREAMING_SNAKE_CASE_: int while num_cuboids <= limit: max_cuboid_size += 1 for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ): if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer(): num_cuboids += ( min(_UpperCAmelCase , sum_shortest_sides // 2 ) - max(1 , sum_shortest_sides - max_cuboid_size ) + 1 ) return max_cuboid_size if __name__ == "__main__": print(f'''{solution() = }''')
671
lowerCAmelCase : List[str] = { """A""": ["""B""", """C""", """E"""], """B""": ["""A""", """D""", """E"""], """C""": ["""A""", """F""", """G"""], """D""": ["""B"""], """E""": ["""A""", """B""", """D"""], """F""": ["""C"""], """G""": ["""C"""], } def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Any = set() # keep track of all the paths to be checked SCREAMING_SNAKE_CASE_: Tuple = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue SCREAMING_SNAKE_CASE_: List[Any] = queue.pop(0 ) # get the last node from the path SCREAMING_SNAKE_CASE_: Tuple = path[-1] if node not in explored: SCREAMING_SNAKE_CASE_: Union[str, Any] = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: SCREAMING_SNAKE_CASE_: int = list(_UpperCAmelCase ) new_path.append(_UpperCAmelCase ) queue.append(_UpperCAmelCase ) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(_UpperCAmelCase ) # in case there's no path between the 2 nodes return [] def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 SCREAMING_SNAKE_CASE_: List[Any] = [start] SCREAMING_SNAKE_CASE_: List[str] = set(_UpperCAmelCase ) # Keep tab on distances from `start` node. SCREAMING_SNAKE_CASE_: Union[str, Any] = {start: 0, target: -1} while queue: SCREAMING_SNAKE_CASE_: Dict = queue.pop(0 ) if node == target: SCREAMING_SNAKE_CASE_: Tuple = ( dist[node] if dist[target] == -1 else min(dist[target] , dist[node] ) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(_UpperCAmelCase ) queue.append(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Union[str, Any] = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, """G""", """D""")) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, """G""", """D""")) # returns 4
671
1
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInstructPixaPixPipeline, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils import floats_tensor, load_image, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" _UpperCAmelCase : Optional[Any] = StableDiffusionInstructPixaPixPipeline _UpperCAmelCase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''} _UpperCAmelCase : str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS _UpperCAmelCase : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS _UpperCAmelCase : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS def _SCREAMING_SNAKE_CASE ( self : str): torch.manual_seed(0) SCREAMING_SNAKE_CASE_: Optional[int] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) SCREAMING_SNAKE_CASE_: int = PNDMScheduler(skip_prk_steps=lowerCAmelCase__) torch.manual_seed(0) SCREAMING_SNAKE_CASE_: Optional[Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) torch.manual_seed(0) SCREAMING_SNAKE_CASE_: Dict = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) SCREAMING_SNAKE_CASE_: str = CLIPTextModel(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") SCREAMING_SNAKE_CASE_: Dict = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Any=0): SCREAMING_SNAKE_CASE_: Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = image.cpu().permute(0 , 2 , 3 , 1)[0] SCREAMING_SNAKE_CASE_: Union[str, Any] = Image.fromarray(np.uinta(lowerCAmelCase__)).convert("RGB") if str(lowerCAmelCase__).startswith("mps"): SCREAMING_SNAKE_CASE_: int = torch.manual_seed(lowerCAmelCase__) else: SCREAMING_SNAKE_CASE_: Dict = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "image_guidance_scale": 1, "output_type": "numpy", } return inputs def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): SCREAMING_SNAKE_CASE_: int = "cpu" # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE_: Optional[Any] = self.get_dummy_components() SCREAMING_SNAKE_CASE_: Tuple = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[str] = sd_pipe.to(lowerCAmelCase__) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = sd_pipe(**lowerCAmelCase__).images SCREAMING_SNAKE_CASE_: List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) SCREAMING_SNAKE_CASE_: Optional[Any] = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3 def _SCREAMING_SNAKE_CASE ( self : Tuple): SCREAMING_SNAKE_CASE_: Dict = "cpu" # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE_: int = self.get_dummy_components() SCREAMING_SNAKE_CASE_: str = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: int = sd_pipe.to(lowerCAmelCase__) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Dict = "french fries" SCREAMING_SNAKE_CASE_: Optional[Any] = sd_pipe(**lowerCAmelCase__ , negative_prompt=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = output.images SCREAMING_SNAKE_CASE_: int = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) SCREAMING_SNAKE_CASE_: Optional[int] = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3 def _SCREAMING_SNAKE_CASE ( self : Any): SCREAMING_SNAKE_CASE_: List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE_: Union[str, Any] = self.get_dummy_components() SCREAMING_SNAKE_CASE_: Tuple = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Dict = sd_pipe.to(lowerCAmelCase__) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = self.get_dummy_inputs(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Any = [inputs["prompt"]] * 2 SCREAMING_SNAKE_CASE_: Optional[int] = np.array(inputs["image"]).astype(np.floataa) / 255.0 SCREAMING_SNAKE_CASE_: Any = torch.from_numpy(lowerCAmelCase__).unsqueeze(0).to(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[str] = image / 2 + 0.5 SCREAMING_SNAKE_CASE_: Tuple = image.permute(0 , 3 , 1 , 2) SCREAMING_SNAKE_CASE_: Dict = image.repeat(2 , 1 , 1 , 1) SCREAMING_SNAKE_CASE_: Dict = sd_pipe(**lowerCAmelCase__).images SCREAMING_SNAKE_CASE_: str = image[-1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) SCREAMING_SNAKE_CASE_: Optional[Any] = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3 def _SCREAMING_SNAKE_CASE ( self : List[Any]): SCREAMING_SNAKE_CASE_: Optional[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE_: int = self.get_dummy_components() SCREAMING_SNAKE_CASE_: Tuple = EulerAncestralDiscreteScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear") SCREAMING_SNAKE_CASE_: List[str] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = sd_pipe.to(lowerCAmelCase__) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: str = self.get_dummy_inputs(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Any = sd_pipe(**lowerCAmelCase__).images SCREAMING_SNAKE_CASE_: Any = image[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE_: Optional[Any] = [round(lowerCAmelCase__ , 4) for x in image_slice.flatten().tolist()] print(",".join([str(lowerCAmelCase__) for x in slice])) assert image.shape == (1, 32, 32, 3) SCREAMING_SNAKE_CASE_: int = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3 def _SCREAMING_SNAKE_CASE ( self : Tuple): super().test_inference_batch_single_identical(expected_max_diff=3E-3) def _SCREAMING_SNAKE_CASE ( self : Any): SCREAMING_SNAKE_CASE_: List[str] = self.get_dummy_components() SCREAMING_SNAKE_CASE_: List[Any] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = VaeImageProcessor(do_resize=lowerCAmelCase__ , do_normalize=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[int] = pipe.to(lowerCAmelCase__) pipe.set_progress_bar_config(disable=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Any = pipe(**self.get_dummy_inputs_by_type(lowerCAmelCase__ , input_image_type="pt"))[0] SCREAMING_SNAKE_CASE_: Optional[int] = components["vae"] SCREAMING_SNAKE_CASE_: Any = self.get_dummy_inputs_by_type(lowerCAmelCase__ , input_image_type="pt") for image_param in self.image_latents_params: if image_param in inputs.keys(): SCREAMING_SNAKE_CASE_: List[str] = vae.encode(inputs[image_param]).latent_dist.mode() SCREAMING_SNAKE_CASE_: str = pipe(**lowerCAmelCase__)[0] SCREAMING_SNAKE_CASE_: List[Any] = np.abs(out - out_latents_inputs).max() self.assertLess(lowerCAmelCase__ , 1E-4 , "passing latents as image input generate different result from passing image") @slow @require_torch_gpu class __lowercase ( unittest.TestCase ): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : Dict): super().tearDown() gc.collect() torch.cuda.empty_cache() def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Optional[int]=0): SCREAMING_SNAKE_CASE_: List[str] = torch.manual_seed(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: str = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg") SCREAMING_SNAKE_CASE_: List[Any] = { "prompt": "turn him into a cyborg", "image": image, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "image_guidance_scale": 1.0, "output_type": "numpy", } return inputs def _SCREAMING_SNAKE_CASE ( self : int): SCREAMING_SNAKE_CASE_: Any = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=lowerCAmelCase__) pipe.to(lowerCAmelCase__) pipe.set_progress_bar_config(disable=lowerCAmelCase__) pipe.enable_attention_slicing() SCREAMING_SNAKE_CASE_: int = self.get_inputs() SCREAMING_SNAKE_CASE_: Any = pipe(**lowerCAmelCase__).images SCREAMING_SNAKE_CASE_: Tuple = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE_: Any = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555]) assert np.abs(expected_slice - image_slice).max() < 1E-3 def _SCREAMING_SNAKE_CASE ( self : Any): SCREAMING_SNAKE_CASE_: List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Dict = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.to(lowerCAmelCase__) pipe.set_progress_bar_config(disable=lowerCAmelCase__) pipe.enable_attention_slicing() SCREAMING_SNAKE_CASE_: Optional[Any] = self.get_inputs() SCREAMING_SNAKE_CASE_: Optional[Any] = pipe(**lowerCAmelCase__).images SCREAMING_SNAKE_CASE_: Optional[Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE_: Dict = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301]) assert np.abs(expected_slice - image_slice).max() < 1E-3 def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): SCREAMING_SNAKE_CASE_: str = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = DDIMScheduler.from_config(pipe.scheduler.config) pipe.to(lowerCAmelCase__) pipe.set_progress_bar_config(disable=lowerCAmelCase__) pipe.enable_attention_slicing() SCREAMING_SNAKE_CASE_: Optional[int] = self.get_inputs() SCREAMING_SNAKE_CASE_: List[Any] = pipe(**lowerCAmelCase__).images SCREAMING_SNAKE_CASE_: int = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE_: List[Any] = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753]) assert np.abs(expected_slice - image_slice).max() < 1E-3 def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): SCREAMING_SNAKE_CASE_: Any = 0 def callback_fn(lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : torch.FloatTensor) -> None: SCREAMING_SNAKE_CASE_: Tuple = True nonlocal number_of_steps number_of_steps += 1 if step == 1: SCREAMING_SNAKE_CASE_: List[Any] = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) SCREAMING_SNAKE_CASE_: Optional[int] = latents[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE_: Optional[Any] = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983]) assert np.abs(latents_slice.flatten() - expected_slice).max() < 5E-2 elif step == 2: SCREAMING_SNAKE_CASE_: str = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) SCREAMING_SNAKE_CASE_: Optional[Any] = latents[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE_: str = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115]) assert np.abs(latents_slice.flatten() - expected_slice).max() < 5E-2 SCREAMING_SNAKE_CASE_: Optional[int] = False SCREAMING_SNAKE_CASE_: int = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa) SCREAMING_SNAKE_CASE_: Optional[int] = pipe.to(lowerCAmelCase__) pipe.set_progress_bar_config(disable=lowerCAmelCase__) pipe.enable_attention_slicing() SCREAMING_SNAKE_CASE_: int = self.get_inputs() pipe(**lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=1) assert callback_fn.has_been_called assert number_of_steps == 3 def _SCREAMING_SNAKE_CASE ( self : int): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() SCREAMING_SNAKE_CASE_: int = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa) SCREAMING_SNAKE_CASE_: Any = pipe.to(lowerCAmelCase__) pipe.set_progress_bar_config(disable=lowerCAmelCase__) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() SCREAMING_SNAKE_CASE_: List[str] = self.get_inputs() SCREAMING_SNAKE_CASE_: Optional[Any] = pipe(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Dict = torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 10**9 def _SCREAMING_SNAKE_CASE ( self : int): SCREAMING_SNAKE_CASE_: str = self.get_inputs() # resize to resolution that is divisible by 8 but not 16 or 32 SCREAMING_SNAKE_CASE_: Dict = inputs["image"].resize((504, 504)) SCREAMING_SNAKE_CASE_: Tuple = "timbrooks/instruct-pix2pix" SCREAMING_SNAKE_CASE_: Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained( lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , ) pipe.to(lowerCAmelCase__) pipe.set_progress_bar_config(disable=lowerCAmelCase__) pipe.enable_attention_slicing() SCREAMING_SNAKE_CASE_: Union[str, Any] = pipe(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = output.images[0] SCREAMING_SNAKE_CASE_: int = image[255:258, 383:386, -1] assert image.shape == (504, 504, 3) SCREAMING_SNAKE_CASE_: int = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590]) assert np.abs(image_slice.flatten() - expected_slice).max() < 5E-3
671
from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class __lowercase ( UpperCAmelCase_ ): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : float): return 0.0 def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: List[str] = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] ) SCREAMING_SNAKE_CASE_: Dict = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] ) return lowest, highest def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[int] = 5_12 SCREAMING_SNAKE_CASE_: str = [1] + [0] * (size - 1) SCREAMING_SNAKE_CASE_: Dict = [filter_type.process(_UpperCAmelCase ) for item in inputs] SCREAMING_SNAKE_CASE_: Optional[Any] = [0] * (samplerate - size) # zero-padding outputs += filler SCREAMING_SNAKE_CASE_: Tuple = np.abs(np.fft.fft(_UpperCAmelCase ) ) SCREAMING_SNAKE_CASE_: Optional[Any] = 20 * np.logaa(_UpperCAmelCase ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel("Frequency (Hz)" ) plt.xscale("log" ) # Display within reasonable bounds SCREAMING_SNAKE_CASE_: Any = get_bounds(_UpperCAmelCase , _UpperCAmelCase ) plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) ) plt.ylabel("Gain (dB)" ) plt.plot(_UpperCAmelCase ) plt.show() def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[int] = 5_12 SCREAMING_SNAKE_CASE_: Union[str, Any] = [1] + [0] * (size - 1) SCREAMING_SNAKE_CASE_: Dict = [filter_type.process(_UpperCAmelCase ) for item in inputs] SCREAMING_SNAKE_CASE_: int = [0] * (samplerate - size) # zero-padding outputs += filler SCREAMING_SNAKE_CASE_: Any = np.angle(np.fft.fft(_UpperCAmelCase ) ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel("Frequency (Hz)" ) plt.xscale("log" ) plt.ylim(-2 * pi , 2 * pi ) plt.ylabel("Phase shift (Radians)" ) plt.plot(np.unwrap(_UpperCAmelCase , -2 * pi ) ) plt.show()
671
1
import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class __lowercase ( unittest.TestCase ): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : Any): SCREAMING_SNAKE_CASE_: List[str] = torch.nn.Linear(10 , 10) SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.optim.SGD(model.parameters() , 0.1) SCREAMING_SNAKE_CASE_: Any = Accelerator() SCREAMING_SNAKE_CASE_: List[str] = accelerator.prepare(lowerCAmelCase__) try: pickle.loads(pickle.dumps(lowerCAmelCase__)) except Exception as e: self.fail(F"Accelerated optimizer pickling failed with {e}") AcceleratorState._reset_state()
671
from __future__ import annotations from math import ceil, floor, sqrt def A_ ( _UpperCAmelCase = 2_00_00_00 ): SCREAMING_SNAKE_CASE_: list[int] = [0] SCREAMING_SNAKE_CASE_: int for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ): triangle_numbers.append(triangle_numbers[-1] + idx ) # we want this to be as close as possible to target SCREAMING_SNAKE_CASE_: int = 0 # the area corresponding to the grid that gives the product closest to target SCREAMING_SNAKE_CASE_: int = 0 # an estimate of b, using the quadratic formula SCREAMING_SNAKE_CASE_: float # the largest integer less than b_estimate SCREAMING_SNAKE_CASE_: int # the largest integer less than b_estimate SCREAMING_SNAKE_CASE_: int # the triangle number corresponding to b_floor SCREAMING_SNAKE_CASE_: int # the triangle number corresponding to b_ceil SCREAMING_SNAKE_CASE_: int for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ): SCREAMING_SNAKE_CASE_: List[Any] = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2 SCREAMING_SNAKE_CASE_: Any = floor(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: List[str] = ceil(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Any = triangle_numbers[b_floor] SCREAMING_SNAKE_CASE_: List[Any] = triangle_numbers[b_ceil] if abs(target - triangle_b_first_guess * triangle_a ) < abs( target - best_product ): SCREAMING_SNAKE_CASE_: int = triangle_b_first_guess * triangle_a SCREAMING_SNAKE_CASE_: int = idx_a * b_floor if abs(target - triangle_b_second_guess * triangle_a ) < abs( target - best_product ): SCREAMING_SNAKE_CASE_: Optional[Any] = triangle_b_second_guess * triangle_a SCREAMING_SNAKE_CASE_: Tuple = idx_a * b_ceil return area if __name__ == "__main__": print(f'''{solution() = }''')
671
1
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin, SchedulerOutput @dataclass class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : torch.FloatTensor _UpperCAmelCase : torch.FloatTensor class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Dict = 1 @register_to_config def __init__( self : List[str] , lowerCAmelCase__ : int = 2000 , lowerCAmelCase__ : float = 0.15 , lowerCAmelCase__ : float = 0.01 , lowerCAmelCase__ : float = 1348.0 , lowerCAmelCase__ : float = 1E-5 , lowerCAmelCase__ : int = 1 , ): # standard deviation of the initial noise distribution SCREAMING_SNAKE_CASE_: Optional[int] = sigma_max # setable values SCREAMING_SNAKE_CASE_: int = None self.set_sigmas(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : torch.FloatTensor , lowerCAmelCase__ : Optional[int] = None): return sample def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase__ : int , lowerCAmelCase__ : float = None , lowerCAmelCase__ : Union[str, torch.device] = None): SCREAMING_SNAKE_CASE_: Dict = sampling_eps if sampling_eps is not None else self.config.sampling_eps SCREAMING_SNAKE_CASE_: int = torch.linspace(1 , lowerCAmelCase__ , lowerCAmelCase__ , device=lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : int , lowerCAmelCase__ : float = None , lowerCAmelCase__ : float = None , lowerCAmelCase__ : float = None): SCREAMING_SNAKE_CASE_: Optional[Any] = sigma_min if sigma_min is not None else self.config.sigma_min SCREAMING_SNAKE_CASE_: str = sigma_max if sigma_max is not None else self.config.sigma_max SCREAMING_SNAKE_CASE_: Tuple = sampling_eps if sampling_eps is not None else self.config.sampling_eps if self.timesteps is None: self.set_timesteps(lowerCAmelCase__ , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps) SCREAMING_SNAKE_CASE_: int = torch.exp(torch.linspace(math.log(lowerCAmelCase__) , math.log(lowerCAmelCase__) , lowerCAmelCase__)) SCREAMING_SNAKE_CASE_: int = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps]) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : int): return torch.where( timesteps == 0 , torch.zeros_like(t.to(timesteps.device)) , self.discrete_sigmas[timesteps - 1].to(timesteps.device) , ) def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : torch.FloatTensor , lowerCAmelCase__ : int , lowerCAmelCase__ : torch.FloatTensor , lowerCAmelCase__ : Optional[torch.Generator] = None , lowerCAmelCase__ : bool = True , ): if self.timesteps is None: raise ValueError( "`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler") SCREAMING_SNAKE_CASE_: List[Any] = timestep * torch.ones( sample.shape[0] , device=sample.device) # torch.repeat_interleave(timestep, sample.shape[0]) SCREAMING_SNAKE_CASE_: Tuple = (timestep * (len(self.timesteps) - 1)).long() # mps requires indices to be in the same device, so we use cpu as is the default with cuda SCREAMING_SNAKE_CASE_: Any = timesteps.to(self.discrete_sigmas.device) SCREAMING_SNAKE_CASE_: Optional[int] = self.discrete_sigmas[timesteps].to(sample.device) SCREAMING_SNAKE_CASE_: List[Any] = self.get_adjacent_sigma(lowerCAmelCase__ , lowerCAmelCase__).to(sample.device) SCREAMING_SNAKE_CASE_: List[Any] = torch.zeros_like(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = (sigma**2 - adjacent_sigma**2) ** 0.5 # equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x) # also equation 47 shows the analog from SDE models to ancestral sampling methods SCREAMING_SNAKE_CASE_: str = diffusion.flatten() while len(diffusion.shape) < len(sample.shape): SCREAMING_SNAKE_CASE_: Any = diffusion.unsqueeze(-1) SCREAMING_SNAKE_CASE_: int = drift - diffusion**2 * model_output # equation 6: sample noise for the diffusion term of SCREAMING_SNAKE_CASE_: int = randn_tensor( sample.shape , layout=sample.layout , generator=lowerCAmelCase__ , device=sample.device , dtype=sample.dtype) SCREAMING_SNAKE_CASE_: List[str] = sample - drift # subtract because `dt` is a small negative timestep # TODO is the variable diffusion the correct scaling term for the noise? SCREAMING_SNAKE_CASE_: Tuple = prev_sample_mean + diffusion * noise # add impact of diffusion field g if not return_dict: return (prev_sample, prev_sample_mean) return SdeVeOutput(prev_sample=lowerCAmelCase__ , prev_sample_mean=lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : torch.FloatTensor , lowerCAmelCase__ : torch.FloatTensor , lowerCAmelCase__ : Optional[torch.Generator] = None , lowerCAmelCase__ : bool = True , ): if self.timesteps is None: raise ValueError( "`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler") # For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z" # sample noise for correction SCREAMING_SNAKE_CASE_: Dict = randn_tensor(sample.shape , layout=sample.layout , generator=lowerCAmelCase__).to(sample.device) # compute step size from the model_output, the noise, and the snr SCREAMING_SNAKE_CASE_: Dict = torch.norm(model_output.reshape(model_output.shape[0] , -1) , dim=-1).mean() SCREAMING_SNAKE_CASE_: Dict = torch.norm(noise.reshape(noise.shape[0] , -1) , dim=-1).mean() SCREAMING_SNAKE_CASE_: List[Any] = (self.config.snr * noise_norm / grad_norm) ** 2 * 2 SCREAMING_SNAKE_CASE_: Tuple = step_size * torch.ones(sample.shape[0]).to(sample.device) # self.repeat_scalar(step_size, sample.shape[0]) # compute corrected sample: model_output term and noise term SCREAMING_SNAKE_CASE_: int = step_size.flatten() while len(step_size.shape) < len(sample.shape): SCREAMING_SNAKE_CASE_: str = step_size.unsqueeze(-1) SCREAMING_SNAKE_CASE_: Optional[int] = sample + step_size * model_output SCREAMING_SNAKE_CASE_: List[Any] = prev_sample_mean + ((step_size * 2) ** 0.5) * noise if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : torch.FloatTensor , lowerCAmelCase__ : torch.FloatTensor , lowerCAmelCase__ : torch.FloatTensor , ): # Make sure sigmas and timesteps have the same device and dtype as original_samples SCREAMING_SNAKE_CASE_: Optional[int] = timesteps.to(original_samples.device) SCREAMING_SNAKE_CASE_: Union[str, Any] = self.discrete_sigmas.to(original_samples.device)[timesteps] SCREAMING_SNAKE_CASE_: int = ( noise * sigmas[:, None, None, None] if noise is not None else torch.randn_like(lowerCAmelCase__) * sigmas[:, None, None, None] ) SCREAMING_SNAKE_CASE_: List[str] = noise + original_samples return noisy_samples def __len__( self : str): return self.config.num_train_timesteps
671
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase : Optional[int] = { """configuration_longformer""": [ """LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongformerConfig""", """LongformerOnnxConfig""", ], """tokenization_longformer""": ["""LongformerTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[str] = ["""LongformerTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Union[str, Any] = [ """LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """LongformerForMaskedLM""", """LongformerForMultipleChoice""", """LongformerForQuestionAnswering""", """LongformerForSequenceClassification""", """LongformerForTokenClassification""", """LongformerModel""", """LongformerPreTrainedModel""", """LongformerSelfAttention""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : int = [ """TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFLongformerForMaskedLM""", """TFLongformerForMultipleChoice""", """TFLongformerForQuestionAnswering""", """TFLongformerForSequenceClassification""", """TFLongformerForTokenClassification""", """TFLongformerModel""", """TFLongformerPreTrainedModel""", """TFLongformerSelfAttention""", ] if TYPE_CHECKING: from .configuration_longformer import ( LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig, LongformerOnnxConfig, ) from .tokenization_longformer import LongformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_longformer_fast import LongformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longformer import ( LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, LongformerPreTrainedModel, LongformerSelfAttention, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_longformer import ( TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFLongformerForMaskedLM, TFLongformerForMultipleChoice, TFLongformerForQuestionAnswering, TFLongformerForSequenceClassification, TFLongformerForTokenClassification, TFLongformerModel, TFLongformerPreTrainedModel, TFLongformerSelfAttention, ) else: import sys lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
671
1
import argparse import json import os from collections import OrderedDict import torch from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): # Load configuration defined in the metadata file with open(_UpperCAmelCase ) as metadata_file: SCREAMING_SNAKE_CASE_: Optional[Any] = json.load(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: List[str] = LukeConfig(use_entity_aware_attention=_UpperCAmelCase , **metadata["model_config"] ) # Load in the weights from the checkpoint_path SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.load(_UpperCAmelCase , map_location="cpu" )["module"] # Load the entity vocab file SCREAMING_SNAKE_CASE_: int = load_original_entity_vocab(_UpperCAmelCase ) # add an entry for [MASK2] SCREAMING_SNAKE_CASE_: Optional[int] = max(entity_vocab.values() ) + 1 config.entity_vocab_size += 1 SCREAMING_SNAKE_CASE_: str = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] ) # Add special tokens to the token vocabulary for downstream tasks SCREAMING_SNAKE_CASE_: Optional[int] = AddedToken("<ent>" , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Tuple = AddedToken("<ent2>" , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(f"Saving tokenizer to {pytorch_dump_folder_path}" ) tokenizer.save_pretrained(_UpperCAmelCase ) with open(os.path.join(_UpperCAmelCase , "tokenizer_config.json" ) , "r" ) as f: SCREAMING_SNAKE_CASE_: Dict = json.load(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: str = "MLukeTokenizer" with open(os.path.join(_UpperCAmelCase , "tokenizer_config.json" ) , "w" ) as f: json.dump(_UpperCAmelCase , _UpperCAmelCase ) with open(os.path.join(_UpperCAmelCase , MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f: json.dump(_UpperCAmelCase , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: str = MLukeTokenizer.from_pretrained(_UpperCAmelCase ) # Initialize the embeddings of the special tokens SCREAMING_SNAKE_CASE_: Dict = tokenizer.convert_tokens_to_ids(["@"] )[0] SCREAMING_SNAKE_CASE_: List[str] = tokenizer.convert_tokens_to_ids(["#"] )[0] SCREAMING_SNAKE_CASE_: Any = state_dict["embeddings.word_embeddings.weight"] SCREAMING_SNAKE_CASE_: List[str] = word_emb[ent_init_index].unsqueeze(0 ) SCREAMING_SNAKE_CASE_: int = word_emb[enta_init_index].unsqueeze(0 ) SCREAMING_SNAKE_CASE_: List[Any] = torch.cat([word_emb, ent_emb, enta_emb] ) # add special tokens for 'entity_predictions.bias' for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]: SCREAMING_SNAKE_CASE_: List[str] = state_dict[bias_name] SCREAMING_SNAKE_CASE_: Union[str, Any] = decoder_bias[ent_init_index].unsqueeze(0 ) SCREAMING_SNAKE_CASE_: int = decoder_bias[enta_init_index].unsqueeze(0 ) SCREAMING_SNAKE_CASE_: str = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: SCREAMING_SNAKE_CASE_: Optional[int] = f"encoder.layer.{layer_index}.attention.self." SCREAMING_SNAKE_CASE_: Optional[int] = state_dict[prefix + matrix_name] SCREAMING_SNAKE_CASE_: List[str] = state_dict[prefix + matrix_name] SCREAMING_SNAKE_CASE_: str = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks SCREAMING_SNAKE_CASE_: List[str] = state_dict["entity_embeddings.entity_embeddings.weight"] SCREAMING_SNAKE_CASE_: str = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 ) SCREAMING_SNAKE_CASE_: List[Any] = torch.cat([entity_emb, entity_mask_emb] ) # add [MASK2] for 'entity_predictions.bias' SCREAMING_SNAKE_CASE_: List[Any] = state_dict["entity_predictions.bias"] SCREAMING_SNAKE_CASE_: Union[str, Any] = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 ) SCREAMING_SNAKE_CASE_: Any = torch.cat([entity_prediction_bias, entity_mask_bias] ) SCREAMING_SNAKE_CASE_: Dict = LukeForMaskedLM(config=_UpperCAmelCase ).eval() state_dict.pop("entity_predictions.decoder.weight" ) state_dict.pop("lm_head.decoder.weight" ) state_dict.pop("lm_head.decoder.bias" ) SCREAMING_SNAKE_CASE_: List[Any] = OrderedDict() for key, value in state_dict.items(): if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )): SCREAMING_SNAKE_CASE_: Tuple = state_dict[key] else: SCREAMING_SNAKE_CASE_: Dict = state_dict[key] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase ) if set(_UpperCAmelCase ) != {"luke.embeddings.position_ids"}: raise ValueError(f"Unexpected unexpected_keys: {unexpected_keys}" ) if set(_UpperCAmelCase ) != { "lm_head.decoder.weight", "lm_head.decoder.bias", "entity_predictions.decoder.weight", }: raise ValueError(f"Unexpected missing_keys: {missing_keys}" ) model.tie_weights() assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all() assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all() # Check outputs SCREAMING_SNAKE_CASE_: Dict = MLukeTokenizer.from_pretrained(_UpperCAmelCase , task="entity_classification" ) SCREAMING_SNAKE_CASE_: int = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)." SCREAMING_SNAKE_CASE_: List[str] = (0, 9) SCREAMING_SNAKE_CASE_: Union[str, Any] = tokenizer(_UpperCAmelCase , entity_spans=[span] , return_tensors="pt" ) SCREAMING_SNAKE_CASE_: Tuple = model(**_UpperCAmelCase ) # Verify word hidden states if model_size == "large": raise NotImplementedError else: # base SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.Size((1, 33, 7_68) ) SCREAMING_SNAKE_CASE_: str = torch.tensor([[0.0_8_9_2, 0.0_5_9_6, -0.2_8_1_9], [0.0_1_3_4, 0.1_1_9_9, 0.0_5_7_3], [-0.0_1_6_9, 0.0_9_2_7, 0.0_6_4_4]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( f"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , _UpperCAmelCase , atol=1e-4 ): raise ValueError # Verify entity hidden states if model_size == "large": raise NotImplementedError else: # base SCREAMING_SNAKE_CASE_: List[Any] = torch.Size((1, 1, 7_68) ) SCREAMING_SNAKE_CASE_: List[str] = torch.tensor([[-0.1_4_8_2, 0.0_6_0_9, 0.0_3_2_2]] ) if not (outputs.entity_last_hidden_state.shape == expected_shape): raise ValueError( f"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is" f" {expected_shape}" ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , _UpperCAmelCase , atol=1e-4 ): raise ValueError # Verify masked word/entity prediction SCREAMING_SNAKE_CASE_: Any = MLukeTokenizer.from_pretrained(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Optional[int] = "Tokyo is the capital of <mask>." SCREAMING_SNAKE_CASE_: List[Any] = (24, 30) SCREAMING_SNAKE_CASE_: Optional[int] = tokenizer(_UpperCAmelCase , entity_spans=[span] , return_tensors="pt" ) SCREAMING_SNAKE_CASE_: Tuple = model(**_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Any = encoding["input_ids"][0].tolist() SCREAMING_SNAKE_CASE_: List[str] = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) ) SCREAMING_SNAKE_CASE_: Tuple = outputs.logits[0][mask_position_id].argmax(dim=-1 ) assert "Japan" == tokenizer.decode(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Dict = outputs.entity_logits[0][0].argmax().item() SCREAMING_SNAKE_CASE_: str = [ entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id ] assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan" # Finally, save our PyTorch model and tokenizer print("Saving PyTorch model to {}".format(_UpperCAmelCase ) ) model.save_pretrained(_UpperCAmelCase ) def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[int] = ["[MASK]", "[PAD]", "[UNK]"] SCREAMING_SNAKE_CASE_: Optional[int] = [json.loads(_UpperCAmelCase ) for line in open(_UpperCAmelCase )] SCREAMING_SNAKE_CASE_: List[str] = {} for entry in data: SCREAMING_SNAKE_CASE_: int = entry["id"] for entity_name, language in entry["entities"]: if entity_name in SPECIAL_TOKENS: SCREAMING_SNAKE_CASE_: List[str] = entity_id break SCREAMING_SNAKE_CASE_: str = f"{language}:{entity_name}" SCREAMING_SNAKE_CASE_: Union[str, Any] = entity_id return new_mapping if __name__ == "__main__": lowerCAmelCase : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""") parser.add_argument( """--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration.""" ) parser.add_argument( """--entity_vocab_path""", default=None, type=str, help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model.""" ) parser.add_argument( """--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted.""" ) lowerCAmelCase : List[str] = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
671
import argparse import os.path as osp import re import torch from safetensors.torch import load_file, save_file # =================# # UNet Conversion # # =================# lowerCAmelCase : Optional[int] = [ # (stable-diffusion, HF Diffusers) ("""time_embed.0.weight""", """time_embedding.linear_1.weight"""), ("""time_embed.0.bias""", """time_embedding.linear_1.bias"""), ("""time_embed.2.weight""", """time_embedding.linear_2.weight"""), ("""time_embed.2.bias""", """time_embedding.linear_2.bias"""), ("""input_blocks.0.0.weight""", """conv_in.weight"""), ("""input_blocks.0.0.bias""", """conv_in.bias"""), ("""out.0.weight""", """conv_norm_out.weight"""), ("""out.0.bias""", """conv_norm_out.bias"""), ("""out.2.weight""", """conv_out.weight"""), ("""out.2.bias""", """conv_out.bias"""), ] lowerCAmelCase : str = [ # (stable-diffusion, HF Diffusers) ("""in_layers.0""", """norm1"""), ("""in_layers.2""", """conv1"""), ("""out_layers.0""", """norm2"""), ("""out_layers.3""", """conv2"""), ("""emb_layers.1""", """time_emb_proj"""), ("""skip_connection""", """conv_shortcut"""), ] lowerCAmelCase : List[str] = [] # hardcoded number of downblocks and resnets/attentions... # would need smarter logic for other networks. for i in range(4): # loop over downblocks/upblocks for j in range(2): # loop over resnets/attentions for downblocks lowerCAmelCase : int = f'''down_blocks.{i}.resnets.{j}.''' lowerCAmelCase : List[str] = f'''input_blocks.{3*i + j + 1}.0.''' unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix)) if i < 3: # no attention layers in down_blocks.3 lowerCAmelCase : Any = f'''down_blocks.{i}.attentions.{j}.''' lowerCAmelCase : List[Any] = f'''input_blocks.{3*i + j + 1}.1.''' unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix)) for j in range(3): # loop over resnets/attentions for upblocks lowerCAmelCase : Any = f'''up_blocks.{i}.resnets.{j}.''' lowerCAmelCase : str = f'''output_blocks.{3*i + j}.0.''' unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix)) if i > 0: # no attention layers in up_blocks.0 lowerCAmelCase : List[Any] = f'''up_blocks.{i}.attentions.{j}.''' lowerCAmelCase : str = f'''output_blocks.{3*i + j}.1.''' unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix)) if i < 3: # no downsample in down_blocks.3 lowerCAmelCase : Any = f'''down_blocks.{i}.downsamplers.0.conv.''' lowerCAmelCase : Tuple = f'''input_blocks.{3*(i+1)}.0.op.''' unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix)) # no upsample in up_blocks.3 lowerCAmelCase : Tuple = f'''up_blocks.{i}.upsamplers.0.''' lowerCAmelCase : Tuple = f'''output_blocks.{3*i + 2}.{1 if i == 0 else 2}.''' unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix)) lowerCAmelCase : Any = """mid_block.attentions.0.""" lowerCAmelCase : Dict = """middle_block.1.""" unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix)) for j in range(2): lowerCAmelCase : int = f'''mid_block.resnets.{j}.''' lowerCAmelCase : Union[str, Any] = f'''middle_block.{2*j}.''' unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix)) def A_ ( _UpperCAmelCase ): # buyer beware: this is a *brittle* function, # and correct output requires that all of these pieces interact in # the exact order in which I have arranged them. SCREAMING_SNAKE_CASE_: Dict = {k: k for k in unet_state_dict.keys()} for sd_name, hf_name in unet_conversion_map: SCREAMING_SNAKE_CASE_: Optional[int] = sd_name for k, v in mapping.items(): if "resnets" in k: for sd_part, hf_part in unet_conversion_map_resnet: SCREAMING_SNAKE_CASE_: Any = v.replace(_UpperCAmelCase , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: str = v for k, v in mapping.items(): for sd_part, hf_part in unet_conversion_map_layer: SCREAMING_SNAKE_CASE_: Optional[Any] = v.replace(_UpperCAmelCase , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Optional[int] = v SCREAMING_SNAKE_CASE_: Optional[Any] = {v: unet_state_dict[k] for k, v in mapping.items()} return new_state_dict # ================# # VAE Conversion # # ================# lowerCAmelCase : Union[str, Any] = [ # (stable-diffusion, HF Diffusers) ("""nin_shortcut""", """conv_shortcut"""), ("""norm_out""", """conv_norm_out"""), ("""mid.attn_1.""", """mid_block.attentions.0."""), ] for i in range(4): # down_blocks have two resnets for j in range(2): lowerCAmelCase : Union[str, Any] = f'''encoder.down_blocks.{i}.resnets.{j}.''' lowerCAmelCase : Optional[Any] = f'''encoder.down.{i}.block.{j}.''' vae_conversion_map.append((sd_down_prefix, hf_down_prefix)) if i < 3: lowerCAmelCase : Dict = f'''down_blocks.{i}.downsamplers.0.''' lowerCAmelCase : List[str] = f'''down.{i}.downsample.''' vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix)) lowerCAmelCase : List[str] = f'''up_blocks.{i}.upsamplers.0.''' lowerCAmelCase : int = f'''up.{3-i}.upsample.''' vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix)) # up_blocks have three resnets # also, up blocks in hf are numbered in reverse from sd for j in range(3): lowerCAmelCase : Any = f'''decoder.up_blocks.{i}.resnets.{j}.''' lowerCAmelCase : int = f'''decoder.up.{3-i}.block.{j}.''' vae_conversion_map.append((sd_up_prefix, hf_up_prefix)) # this part accounts for mid blocks in both the encoder and the decoder for i in range(2): lowerCAmelCase : str = f'''mid_block.resnets.{i}.''' lowerCAmelCase : Tuple = f'''mid.block_{i+1}.''' vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix)) lowerCAmelCase : List[Any] = [ # (stable-diffusion, HF Diffusers) ("""norm.""", """group_norm."""), ("""q.""", """query."""), ("""k.""", """key."""), ("""v.""", """value."""), ("""proj_out.""", """proj_attn."""), ] def A_ ( _UpperCAmelCase ): # convert HF linear weights to SD conv2d weights return w.reshape(*w.shape , 1 , 1 ) def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[Any] = {k: k for k in vae_state_dict.keys()} for k, v in mapping.items(): for sd_part, hf_part in vae_conversion_map: SCREAMING_SNAKE_CASE_: Union[str, Any] = v.replace(_UpperCAmelCase , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Union[str, Any] = v for k, v in mapping.items(): if "attentions" in k: for sd_part, hf_part in vae_conversion_map_attn: SCREAMING_SNAKE_CASE_: Any = v.replace(_UpperCAmelCase , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: List[str] = v SCREAMING_SNAKE_CASE_: Tuple = {v: vae_state_dict[k] for k, v in mapping.items()} SCREAMING_SNAKE_CASE_: Union[str, Any] = ["q", "k", "v", "proj_out"] for k, v in new_state_dict.items(): for weight_name in weights_to_convert: if f"mid.attn_1.{weight_name}.weight" in k: print(f"Reshaping {k} for SD format" ) SCREAMING_SNAKE_CASE_: List[str] = reshape_weight_for_sd(_UpperCAmelCase ) return new_state_dict # =========================# # Text Encoder Conversion # # =========================# lowerCAmelCase : Optional[Any] = [ # (stable-diffusion, HF Diffusers) ("""resblocks.""", """text_model.encoder.layers."""), ("""ln_1""", """layer_norm1"""), ("""ln_2""", """layer_norm2"""), (""".c_fc.""", """.fc1."""), (""".c_proj.""", """.fc2."""), (""".attn""", """.self_attn"""), ("""ln_final.""", """transformer.text_model.final_layer_norm."""), ("""token_embedding.weight""", """transformer.text_model.embeddings.token_embedding.weight"""), ("""positional_embedding""", """transformer.text_model.embeddings.position_embedding.weight"""), ] lowerCAmelCase : Optional[Any] = {re.escape(x[1]): x[0] for x in textenc_conversion_lst} lowerCAmelCase : Optional[int] = re.compile("""|""".join(protected.keys())) # Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp lowerCAmelCase : str = {"""q""": 0, """k""": 1, """v""": 2} def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: str = {} SCREAMING_SNAKE_CASE_: str = {} SCREAMING_SNAKE_CASE_: List[str] = {} for k, v in text_enc_dict.items(): if ( k.endswith(".self_attn.q_proj.weight" ) or k.endswith(".self_attn.k_proj.weight" ) or k.endswith(".self_attn.v_proj.weight" ) ): SCREAMING_SNAKE_CASE_: str = k[: -len(".q_proj.weight" )] SCREAMING_SNAKE_CASE_: Dict = k[-len("q_proj.weight" )] if k_pre not in capture_qkv_weight: SCREAMING_SNAKE_CASE_: Tuple = [None, None, None] SCREAMING_SNAKE_CASE_: Union[str, Any] = v continue if ( k.endswith(".self_attn.q_proj.bias" ) or k.endswith(".self_attn.k_proj.bias" ) or k.endswith(".self_attn.v_proj.bias" ) ): SCREAMING_SNAKE_CASE_: Union[str, Any] = k[: -len(".q_proj.bias" )] SCREAMING_SNAKE_CASE_: Any = k[-len("q_proj.bias" )] if k_pre not in capture_qkv_bias: SCREAMING_SNAKE_CASE_: List[Any] = [None, None, None] SCREAMING_SNAKE_CASE_: List[str] = v continue SCREAMING_SNAKE_CASE_: int = textenc_pattern.sub(lambda _UpperCAmelCase : protected[re.escape(m.group(0 ) )] , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Dict = v for k_pre, tensors in capture_qkv_weight.items(): if None in tensors: raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" ) SCREAMING_SNAKE_CASE_: str = textenc_pattern.sub(lambda _UpperCAmelCase : protected[re.escape(m.group(0 ) )] , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: int = torch.cat(_UpperCAmelCase ) for k_pre, tensors in capture_qkv_bias.items(): if None in tensors: raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" ) SCREAMING_SNAKE_CASE_: Optional[int] = textenc_pattern.sub(lambda _UpperCAmelCase : protected[re.escape(m.group(0 ) )] , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: List[Any] = torch.cat(_UpperCAmelCase ) return new_state_dict def A_ ( _UpperCAmelCase ): return text_enc_dict if __name__ == "__main__": lowerCAmelCase : int = argparse.ArgumentParser() parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""") parser.add_argument( """--use_safetensors""", action="""store_true""", help="""Save weights use safetensors, default is ckpt.""" ) lowerCAmelCase : Optional[Any] = parser.parse_args() assert args.model_path is not None, "Must provide a model path!" assert args.checkpoint_path is not None, "Must provide a checkpoint path!" # Path for safetensors lowerCAmelCase : int = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.safetensors""") lowerCAmelCase : List[str] = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.safetensors""") lowerCAmelCase : Optional[int] = osp.join(args.model_path, """text_encoder""", """model.safetensors""") # Load models from safetensors if it exists, if it doesn't pytorch if osp.exists(unet_path): lowerCAmelCase : Optional[int] = load_file(unet_path, device="""cpu""") else: lowerCAmelCase : Union[str, Any] = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.bin""") lowerCAmelCase : Optional[Any] = torch.load(unet_path, map_location="""cpu""") if osp.exists(vae_path): lowerCAmelCase : str = load_file(vae_path, device="""cpu""") else: lowerCAmelCase : List[Any] = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.bin""") lowerCAmelCase : Optional[Any] = torch.load(vae_path, map_location="""cpu""") if osp.exists(text_enc_path): lowerCAmelCase : List[Any] = load_file(text_enc_path, device="""cpu""") else: lowerCAmelCase : List[Any] = osp.join(args.model_path, """text_encoder""", """pytorch_model.bin""") lowerCAmelCase : Optional[Any] = torch.load(text_enc_path, map_location="""cpu""") # Convert the UNet model lowerCAmelCase : int = convert_unet_state_dict(unet_state_dict) lowerCAmelCase : Optional[int] = {"""model.diffusion_model.""" + k: v for k, v in unet_state_dict.items()} # Convert the VAE model lowerCAmelCase : Union[str, Any] = convert_vae_state_dict(vae_state_dict) lowerCAmelCase : Optional[int] = {"""first_stage_model.""" + k: v for k, v in vae_state_dict.items()} # Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper lowerCAmelCase : Any = """text_model.encoder.layers.22.layer_norm2.bias""" in text_enc_dict if is_vaa_model: # Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm lowerCAmelCase : Any = {"""transformer.""" + k: v for k, v in text_enc_dict.items()} lowerCAmelCase : str = convert_text_enc_state_dict_vaa(text_enc_dict) lowerCAmelCase : Dict = {"""cond_stage_model.model.""" + k: v for k, v in text_enc_dict.items()} else: lowerCAmelCase : Any = convert_text_enc_state_dict(text_enc_dict) lowerCAmelCase : Optional[Any] = {"""cond_stage_model.transformer.""" + k: v for k, v in text_enc_dict.items()} # Put together new checkpoint lowerCAmelCase : Union[str, Any] = {**unet_state_dict, **vae_state_dict, **text_enc_dict} if args.half: lowerCAmelCase : str = {k: v.half() for k, v in state_dict.items()} if args.use_safetensors: save_file(state_dict, args.checkpoint_path) else: lowerCAmelCase : int = {"""state_dict""": state_dict} torch.save(state_dict, args.checkpoint_path)
671
1
import os def A_ ( ): SCREAMING_SNAKE_CASE_: List[str] = os.path.dirname(os.path.realpath(_UpperCAmelCase ) ) SCREAMING_SNAKE_CASE_: str = os.path.join(_UpperCAmelCase , "triangle.txt" ) with open(_UpperCAmelCase ) as f: SCREAMING_SNAKE_CASE_: List[str] = f.readlines() SCREAMING_SNAKE_CASE_: Dict = [] for line in triangle: SCREAMING_SNAKE_CASE_: str = [] for number in line.strip().split(" " ): numbers_from_line.append(int(_UpperCAmelCase ) ) a.append(_UpperCAmelCase ) for i in range(1 , len(_UpperCAmelCase ) ): for j in range(len(a[i] ) ): SCREAMING_SNAKE_CASE_: Any = a[i - 1][j] if j != len(a[i - 1] ) else 0 SCREAMING_SNAKE_CASE_: int = a[i - 1][j - 1] if j > 0 else 0 a[i][j] += max(_UpperCAmelCase , _UpperCAmelCase ) return max(a[-1] ) if __name__ == "__main__": print(solution())
671
from typing import Callable, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase : int = logging.get_logger(__name__) lowerCAmelCase : Dict = { """microsoft/xprophetnet-large-wiki100-cased""": ( """https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json""" ), } class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Optional[Any] = '''xlm-prophetnet''' _UpperCAmelCase : Any = ['''past_key_values'''] _UpperCAmelCase : Tuple = { '''num_attention_heads''': '''num_encoder_attention_heads''', } def __init__( self : str , lowerCAmelCase__ : Optional[float] = 0.1 , lowerCAmelCase__ : Optional[Union[str, Callable]] = "gelu" , lowerCAmelCase__ : Optional[int] = 3_0522 , lowerCAmelCase__ : Optional[int] = 1024 , lowerCAmelCase__ : Optional[int] = 4096 , lowerCAmelCase__ : Optional[int] = 12 , lowerCAmelCase__ : Optional[int] = 16 , lowerCAmelCase__ : Optional[int] = 4096 , lowerCAmelCase__ : Optional[int] = 12 , lowerCAmelCase__ : Optional[int] = 16 , lowerCAmelCase__ : Optional[float] = 0.1 , lowerCAmelCase__ : Optional[float] = 0.1 , lowerCAmelCase__ : Optional[int] = 512 , lowerCAmelCase__ : Optional[float] = 0.02 , lowerCAmelCase__ : Optional[bool] = True , lowerCAmelCase__ : Optional[bool] = True , lowerCAmelCase__ : Optional[int] = 0 , lowerCAmelCase__ : Optional[int] = 2 , lowerCAmelCase__ : Optional[int] = 32 , lowerCAmelCase__ : Optional[int] = 128 , lowerCAmelCase__ : Optional[bool] = False , lowerCAmelCase__ : Optional[float] = 0.0 , lowerCAmelCase__ : Optional[bool] = True , lowerCAmelCase__ : Optional[int] = 0 , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : Optional[int] = 2 , **lowerCAmelCase__ : List[str] , ): SCREAMING_SNAKE_CASE_: List[Any] = vocab_size SCREAMING_SNAKE_CASE_: int = hidden_size SCREAMING_SNAKE_CASE_: Any = encoder_ffn_dim SCREAMING_SNAKE_CASE_: Tuple = num_encoder_layers SCREAMING_SNAKE_CASE_: List[Any] = num_encoder_attention_heads SCREAMING_SNAKE_CASE_: Dict = decoder_ffn_dim SCREAMING_SNAKE_CASE_: Any = num_decoder_layers SCREAMING_SNAKE_CASE_: Tuple = num_decoder_attention_heads SCREAMING_SNAKE_CASE_: str = max_position_embeddings SCREAMING_SNAKE_CASE_: str = init_std # Normal(0, this parameter) SCREAMING_SNAKE_CASE_: Dict = activation_function # parameters for xlmprophetnet SCREAMING_SNAKE_CASE_: Optional[int] = ngram SCREAMING_SNAKE_CASE_: Tuple = num_buckets SCREAMING_SNAKE_CASE_: Union[str, Any] = relative_max_distance SCREAMING_SNAKE_CASE_: List[str] = disable_ngram_loss SCREAMING_SNAKE_CASE_: Dict = eps # 3 Types of Dropout SCREAMING_SNAKE_CASE_: Any = attention_dropout SCREAMING_SNAKE_CASE_: Optional[int] = activation_dropout SCREAMING_SNAKE_CASE_: str = dropout SCREAMING_SNAKE_CASE_: Optional[int] = use_cache super().__init__( pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , add_cross_attention=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , ) @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): return self.num_encoder_layers + self.num_decoder_layers @num_hidden_layers.setter def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : Any): raise NotImplementedError( "This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and" " `num_decoder_layers`.")
671
1
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: str = args.log_outputs SCREAMING_SNAKE_CASE_: Optional[int] = "_".join(args.dataset.split("/" ) + [args.config, args.split] ) # load metric SCREAMING_SNAKE_CASE_: str = load_metric("wer" ) SCREAMING_SNAKE_CASE_: str = load_metric("cer" ) # compute metrics SCREAMING_SNAKE_CASE_: Optional[Any] = wer.compute(references=result["target"] , predictions=result["prediction"] ) SCREAMING_SNAKE_CASE_: List[Any] = cer.compute(references=result["target"] , predictions=result["prediction"] ) # print & log results SCREAMING_SNAKE_CASE_: Any = f"WER: {wer_result}\nCER: {cer_result}" print(_UpperCAmelCase ) with open(f"{dataset_id}_eval_results.txt" , "w" ) as f: f.write(_UpperCAmelCase ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: SCREAMING_SNAKE_CASE_: Optional[Any] = f"log_{dataset_id}_predictions.txt" SCREAMING_SNAKE_CASE_: List[str] = f"log_{dataset_id}_targets.txt" with open(_UpperCAmelCase , "w" ) as p, open(_UpperCAmelCase , "w" ) as t: # mapping function to write output def write_to_file(_UpperCAmelCase , _UpperCAmelCase ): p.write(f"{i}" + "\n" ) p.write(batch["prediction"] + "\n" ) t.write(f"{i}" + "\n" ) t.write(batch["target"] + "\n" ) result.map(_UpperCAmelCase , with_indices=_UpperCAmelCase ) def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Any = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training SCREAMING_SNAKE_CASE_: List[Any] = re.sub(_UpperCAmelCase , "" , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! SCREAMING_SNAKE_CASE_: Optional[Any] = ["\n\n", "\n", " ", " "] for t in token_sequences_to_ignore: SCREAMING_SNAKE_CASE_: str = " ".join(text.split(_UpperCAmelCase ) ) return text def A_ ( _UpperCAmelCase ): # load dataset SCREAMING_SNAKE_CASE_: str = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=_UpperCAmelCase ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor SCREAMING_SNAKE_CASE_: Dict = AutoFeatureExtractor.from_pretrained(args.model_id ) SCREAMING_SNAKE_CASE_: Optional[int] = feature_extractor.sampling_rate # resample audio SCREAMING_SNAKE_CASE_: Dict = dataset.cast_column("audio" , Audio(sampling_rate=_UpperCAmelCase ) ) # load eval pipeline if args.device is None: SCREAMING_SNAKE_CASE_: Tuple = 0 if torch.cuda.is_available() else -1 SCREAMING_SNAKE_CASE_: Union[str, Any] = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(_UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Dict = asr( batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) SCREAMING_SNAKE_CASE_: int = prediction["text"] SCREAMING_SNAKE_CASE_: Optional[Any] = normalize_text(batch["sentence"] ) return batch # run inference on all examples SCREAMING_SNAKE_CASE_: Optional[Any] = dataset.map(_UpperCAmelCase , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(_UpperCAmelCase , _UpperCAmelCase ) if __name__ == "__main__": lowerCAmelCase : List[str] = argparse.ArgumentParser() parser.add_argument( """--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers""" ) parser.add_argument( """--dataset""", type=str, required=True, help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""", ) parser.add_argument( """--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice""" ) parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""") parser.add_argument( """--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds.""" ) parser.add_argument( """--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second.""" ) parser.add_argument( """--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis.""" ) parser.add_argument( """--device""", type=int, default=None, help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""", ) lowerCAmelCase : Optional[int] = parser.parse_args() main(args)
671
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowerCAmelCase : Dict = logging.get_logger(__name__) def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[int] = b.T SCREAMING_SNAKE_CASE_: Dict = np.sum(np.square(_UpperCAmelCase ) , axis=1 ) SCREAMING_SNAKE_CASE_: Tuple = np.sum(np.square(_UpperCAmelCase ) , axis=0 ) SCREAMING_SNAKE_CASE_: List[Any] = np.matmul(_UpperCAmelCase , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Dict = aa[:, None] - 2 * ab + ba[None, :] return d def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: int = x.reshape(-1 , 3 ) SCREAMING_SNAKE_CASE_: Tuple = squared_euclidean_distance(_UpperCAmelCase , _UpperCAmelCase ) return np.argmin(_UpperCAmelCase , axis=1 ) class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : int = ['''pixel_values'''] def __init__( self : Tuple , lowerCAmelCase__ : Optional[Union[List[List[int]], np.ndarray]] = None , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : bool = True , **lowerCAmelCase__ : List[str] , ): super().__init__(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Any = size if size is not None else {"height": 256, "width": 256} SCREAMING_SNAKE_CASE_: Tuple = get_size_dict(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = np.array(lowerCAmelCase__) if clusters is not None else None SCREAMING_SNAKE_CASE_: Dict = do_resize SCREAMING_SNAKE_CASE_: str = size SCREAMING_SNAKE_CASE_: List[Any] = resample SCREAMING_SNAKE_CASE_: Optional[int] = do_normalize SCREAMING_SNAKE_CASE_: Dict = do_color_quantize def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Optional[Any] , ): SCREAMING_SNAKE_CASE_: List[str] = get_size_dict(lowerCAmelCase__) if "height" not in size or "width" not in size: raise ValueError(F"Size dictionary must contain both height and width keys. Got {size.keys()}") return resize( lowerCAmelCase__ , size=(size["height"], size["width"]) , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , ): SCREAMING_SNAKE_CASE_: str = rescale(image=lowerCAmelCase__ , scale=1 / 127.5 , data_format=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[int] = image - 1 return image def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : ImageInput , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[Union[List[List[int]], np.ndarray]] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **lowerCAmelCase__ : Union[str, Any] , ): SCREAMING_SNAKE_CASE_: Tuple = do_resize if do_resize is not None else self.do_resize SCREAMING_SNAKE_CASE_: Optional[int] = size if size is not None else self.size SCREAMING_SNAKE_CASE_: Dict = get_size_dict(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[str] = resample if resample is not None else self.resample SCREAMING_SNAKE_CASE_: int = do_normalize if do_normalize is not None else self.do_normalize SCREAMING_SNAKE_CASE_: List[str] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize SCREAMING_SNAKE_CASE_: Tuple = clusters if clusters is not None else self.clusters SCREAMING_SNAKE_CASE_: Optional[int] = np.array(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[int] = make_list_of_images(lowerCAmelCase__) if not valid_images(lowerCAmelCase__): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray.") if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True.") if do_color_quantize and clusters is None: raise ValueError("Clusters must be specified if do_color_quantize is True.") # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE_: Union[str, Any] = [to_numpy_array(lowerCAmelCase__) for image in images] if do_resize: SCREAMING_SNAKE_CASE_: Optional[Any] = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__) for image in images] if do_normalize: SCREAMING_SNAKE_CASE_: str = [self.normalize(image=lowerCAmelCase__) for image in images] if do_color_quantize: SCREAMING_SNAKE_CASE_: Any = [to_channel_dimension_format(lowerCAmelCase__ , ChannelDimension.LAST) for image in images] # color quantize from (batch_size, height, width, 3) to (batch_size, height, width) SCREAMING_SNAKE_CASE_: List[Any] = np.array(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[str] = color_quantize(lowerCAmelCase__ , lowerCAmelCase__).reshape(images.shape[:-1]) # flatten to (batch_size, height*width) SCREAMING_SNAKE_CASE_: str = images.shape[0] SCREAMING_SNAKE_CASE_: Tuple = images.reshape(lowerCAmelCase__ , -1) # We need to convert back to a list of images to keep consistent behaviour across processors. SCREAMING_SNAKE_CASE_: str = list(lowerCAmelCase__) else: SCREAMING_SNAKE_CASE_: Dict = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__) for image in images] SCREAMING_SNAKE_CASE_: Optional[Any] = {"input_ids": images} return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__)
671
1
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_camembert import CamembertTokenizer else: lowerCAmelCase : Optional[Any] = None lowerCAmelCase : List[str] = logging.get_logger(__name__) lowerCAmelCase : Optional[Any] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""} lowerCAmelCase : int = { """vocab_file""": { """camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""", }, """tokenizer_file""": { """camembert-base""": """https://huggingface.co/camembert-base/resolve/main/tokenizer.json""", }, } lowerCAmelCase : Tuple = { """camembert-base""": 512, } lowerCAmelCase : Optional[Any] = """▁""" class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Tuple = VOCAB_FILES_NAMES _UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase : Tuple = ['''input_ids''', '''attention_mask'''] _UpperCAmelCase : Dict = CamembertTokenizer def __init__( self : Optional[Any] , lowerCAmelCase__ : str=None , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : Dict="<s>" , lowerCAmelCase__ : Union[str, Any]="</s>" , lowerCAmelCase__ : int="</s>" , lowerCAmelCase__ : Any="<s>" , lowerCAmelCase__ : List[Any]="<unk>" , lowerCAmelCase__ : Any="<pad>" , lowerCAmelCase__ : int="<mask>" , lowerCAmelCase__ : int=["<s>NOTUSED", "</s>NOTUSED"] , **lowerCAmelCase__ : Optional[Any] , ): # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE_: List[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else mask_token super().__init__( lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , **lowerCAmelCase__ , ) SCREAMING_SNAKE_CASE_: Union[str, Any] = vocab_file SCREAMING_SNAKE_CASE_: str = False if not self.vocab_file else True def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] SCREAMING_SNAKE_CASE_: str = [self.cls_token_id] SCREAMING_SNAKE_CASE_: str = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None): SCREAMING_SNAKE_CASE_: Optional[Any] = [self.sep_token_id] SCREAMING_SNAKE_CASE_: Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None): if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer.") if not os.path.isdir(lowerCAmelCase__): logger.error(F"Vocabulary path ({save_directory}) should be a directory") return SCREAMING_SNAKE_CASE_: Optional[int] = os.path.join( lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__): copyfile(self.vocab_file , lowerCAmelCase__) return (out_vocab_file,)
671
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert import BertTokenizer lowerCAmelCase : Optional[int] = logging.get_logger(__name__) lowerCAmelCase : str = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} lowerCAmelCase : Tuple = { """vocab_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } lowerCAmelCase : Union[str, Any] = { """vocab_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } lowerCAmelCase : List[str] = { """vocab_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json""" ), }, } lowerCAmelCase : int = { """facebook/dpr-ctx_encoder-single-nq-base""": 512, """facebook/dpr-ctx_encoder-multiset-base""": 512, } lowerCAmelCase : int = { """facebook/dpr-question_encoder-single-nq-base""": 512, """facebook/dpr-question_encoder-multiset-base""": 512, } lowerCAmelCase : List[Any] = { """facebook/dpr-reader-single-nq-base""": 512, """facebook/dpr-reader-multiset-base""": 512, } lowerCAmelCase : Optional[int] = { """facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True}, } lowerCAmelCase : Optional[int] = { """facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True}, } lowerCAmelCase : List[str] = { """facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True}, } class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Any = VOCAB_FILES_NAMES _UpperCAmelCase : Optional[Any] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase : List[Any] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase : List[Any] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES _UpperCAmelCase : Optional[int] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase : Any = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase : str = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION lowerCAmelCase : List[Any] = collections.namedtuple( """DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""] ) lowerCAmelCase : Optional[Any] = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""]) lowerCAmelCase : int = R""" Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: ``` [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> ``` Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Returns: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. """ @add_start_docstrings(UpperCAmelCase_ ) class __lowercase : """simple docstring""" def __call__( self : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : Union[bool, str] = False , lowerCAmelCase__ : Union[bool, str] = False , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : Optional[bool] = None , **lowerCAmelCase__ : Tuple , ): if titles is None and texts is None: return super().__call__( lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , ) elif titles is None or texts is None: SCREAMING_SNAKE_CASE_: List[str] = titles if texts is None else texts return super().__call__( lowerCAmelCase__ , lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , ) SCREAMING_SNAKE_CASE_: Optional[int] = titles if not isinstance(lowerCAmelCase__ , lowerCAmelCase__) else [titles] SCREAMING_SNAKE_CASE_: int = texts if not isinstance(lowerCAmelCase__ , lowerCAmelCase__) else [texts] SCREAMING_SNAKE_CASE_: str = len(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = questions if not isinstance(lowerCAmelCase__ , lowerCAmelCase__) else [questions] * n_passages if len(lowerCAmelCase__) != len(lowerCAmelCase__): raise ValueError( F"There should be as many titles than texts but got {len(lowerCAmelCase__)} titles and {len(lowerCAmelCase__)} texts.") SCREAMING_SNAKE_CASE_: Optional[Any] = super().__call__(lowerCAmelCase__ , lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__)["input_ids"] SCREAMING_SNAKE_CASE_: Union[str, Any] = super().__call__(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__)["input_ids"] SCREAMING_SNAKE_CASE_: int = { "input_ids": [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(lowerCAmelCase__ , lowerCAmelCase__) ] } if return_attention_mask is not False: SCREAMING_SNAKE_CASE_: Dict = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids]) SCREAMING_SNAKE_CASE_: int = attention_mask return self.pad(lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : BatchEncoding , lowerCAmelCase__ : DPRReaderOutput , lowerCAmelCase__ : int = 16 , lowerCAmelCase__ : int = 64 , lowerCAmelCase__ : int = 4 , ): SCREAMING_SNAKE_CASE_: int = reader_input["input_ids"] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = reader_output[:3] SCREAMING_SNAKE_CASE_: Tuple = len(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = sorted(range(lowerCAmelCase__) , reverse=lowerCAmelCase__ , key=relevance_logits.__getitem__) SCREAMING_SNAKE_CASE_: List[DPRReaderOutput] = [] for doc_id in sorted_docs: SCREAMING_SNAKE_CASE_: Optional[int] = list(input_ids[doc_id]) # assuming question & title information is at the beginning of the sequence SCREAMING_SNAKE_CASE_: str = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: SCREAMING_SNAKE_CASE_: List[Any] = sequence_ids.index(self.pad_token_id) else: SCREAMING_SNAKE_CASE_: Dict = len(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[Any] = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCAmelCase__ , top_spans=lowerCAmelCase__ , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCAmelCase__ , start_index=lowerCAmelCase__ , end_index=lowerCAmelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1]) , )) if len(lowerCAmelCase__) >= num_spans: break return nbest_spans_predictions[:num_spans] def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , ): SCREAMING_SNAKE_CASE_: Any = [] for start_index, start_score in enumerate(lowerCAmelCase__): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]): scores.append(((start_index, start_index + answer_length), start_score + end_score)) SCREAMING_SNAKE_CASE_: Union[str, Any] = sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__: x[1] , reverse=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[str] = [] for (start_index, end_index), score in scores: if start_index > end_index: raise ValueError(F"Wrong span indices: [{start_index}:{end_index}]") SCREAMING_SNAKE_CASE_: int = end_index - start_index + 1 if length > max_answer_length: raise ValueError(F"Span is too long: {length} > {max_answer_length}") if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals): continue chosen_span_intervals.append((start_index, end_index)) if len(lowerCAmelCase__) == top_spans: break return chosen_span_intervals @add_end_docstrings(UpperCAmelCase_ ) class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Any = VOCAB_FILES_NAMES _UpperCAmelCase : Optional[Any] = READER_PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase : int = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase : Optional[int] = READER_PRETRAINED_INIT_CONFIGURATION _UpperCAmelCase : str = ['''input_ids''', '''attention_mask''']
671
1
import inspect import unittest from transformers import DecisionTransformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import DecisionTransformerModel from transformers.models.decision_transformer.modeling_decision_transformer import ( DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) class __lowercase : """simple docstring""" def __init__( self : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int=13 , lowerCAmelCase__ : Tuple=7 , lowerCAmelCase__ : Optional[Any]=6 , lowerCAmelCase__ : Any=17 , lowerCAmelCase__ : Any=23 , lowerCAmelCase__ : Union[str, Any]=11 , lowerCAmelCase__ : int=True , ): SCREAMING_SNAKE_CASE_: List[Any] = parent SCREAMING_SNAKE_CASE_: Dict = batch_size SCREAMING_SNAKE_CASE_: List[Any] = seq_length SCREAMING_SNAKE_CASE_: Tuple = act_dim SCREAMING_SNAKE_CASE_: Dict = state_dim SCREAMING_SNAKE_CASE_: Union[str, Any] = hidden_size SCREAMING_SNAKE_CASE_: Any = max_length SCREAMING_SNAKE_CASE_: Optional[int] = is_training def _SCREAMING_SNAKE_CASE ( self : int): SCREAMING_SNAKE_CASE_: Optional[Any] = floats_tensor((self.batch_size, self.seq_length, self.state_dim)) SCREAMING_SNAKE_CASE_: int = floats_tensor((self.batch_size, self.seq_length, self.act_dim)) SCREAMING_SNAKE_CASE_: int = floats_tensor((self.batch_size, self.seq_length, 1)) SCREAMING_SNAKE_CASE_: Union[str, Any] = floats_tensor((self.batch_size, self.seq_length, 1)) SCREAMING_SNAKE_CASE_: int = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000) SCREAMING_SNAKE_CASE_: Optional[Any] = random_attention_mask((self.batch_size, self.seq_length)) SCREAMING_SNAKE_CASE_: Tuple = self.get_config() return ( config, states, actions, rewards, returns_to_go, timesteps, attention_mask, ) def _SCREAMING_SNAKE_CASE ( self : Tuple): return DecisionTransformerConfig( batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , ) def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int , ): SCREAMING_SNAKE_CASE_: Union[str, Any] = DecisionTransformerModel(config=lowerCAmelCase__) model.to(lowerCAmelCase__) model.eval() SCREAMING_SNAKE_CASE_: int = model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__) self.parent.assertEqual(result.state_preds.shape , states.shape) self.parent.assertEqual(result.action_preds.shape , actions.shape) self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size)) # seq length *3 as there are 3 modelities: states, returns and actions def _SCREAMING_SNAKE_CASE ( self : Tuple): SCREAMING_SNAKE_CASE_: str = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ): Union[str, Any] = config_and_inputs SCREAMING_SNAKE_CASE_: Optional[Any] = { "states": states, "actions": actions, "rewards": rewards, "returns_to_go": returns_to_go, "timesteps": timesteps, "attention_mask": attention_mask, } return config, inputs_dict @require_torch class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" _UpperCAmelCase : Union[str, Any] = (DecisionTransformerModel,) if is_torch_available() else () _UpperCAmelCase : Dict = () _UpperCAmelCase : Optional[Any] = {'''feature-extraction''': DecisionTransformerModel} if is_torch_available() else {} # Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids _UpperCAmelCase : Optional[int] = False # Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features _UpperCAmelCase : Tuple = False _UpperCAmelCase : Any = False _UpperCAmelCase : Tuple = False _UpperCAmelCase : int = False _UpperCAmelCase : Tuple = False _UpperCAmelCase : Optional[int] = False _UpperCAmelCase : Union[str, Any] = False _UpperCAmelCase : List[str] = False _UpperCAmelCase : int = False def _SCREAMING_SNAKE_CASE ( self : Optional[int]): SCREAMING_SNAKE_CASE_: Optional[int] = DecisionTransformerModelTester(self) SCREAMING_SNAKE_CASE_: Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37) def _SCREAMING_SNAKE_CASE ( self : Optional[int]): self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : Optional[int]): SCREAMING_SNAKE_CASE_: List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase__) @slow def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE_: Optional[int] = DecisionTransformerModel.from_pretrained(lowerCAmelCase__) self.assertIsNotNone(lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : List[Any]): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_: Union[str, Any] = model_class(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[str] = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE_: Tuple = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE_: Optional[Any] = [ "states", "actions", "rewards", "returns_to_go", "timesteps", "attention_mask", ] self.assertListEqual(arg_names[: len(lowerCAmelCase__)] , lowerCAmelCase__) @require_torch class __lowercase ( unittest.TestCase ): """simple docstring""" @slow def _SCREAMING_SNAKE_CASE ( self : int): SCREAMING_SNAKE_CASE_: Any = 2 # number of steps of autoregressive prediction we will perform SCREAMING_SNAKE_CASE_: str = 10 # defined by the RL environment, may be normalized SCREAMING_SNAKE_CASE_: str = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert") SCREAMING_SNAKE_CASE_: Optional[int] = model.to(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = model.config torch.manual_seed(0) SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.randn(1 , 1 , config.state_dim).to(device=lowerCAmelCase__ , dtype=torch.floataa) # env.reset() SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.tensor( [[0.24_2793, -0.2869_3074, 0.874_2613], [0.6781_5274, -0.0810_1085, -0.1295_2147]] , device=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.tensor(lowerCAmelCase__ , device=lowerCAmelCase__ , dtype=torch.floataa).reshape(1 , 1 , 1) SCREAMING_SNAKE_CASE_: Any = state SCREAMING_SNAKE_CASE_: Dict = torch.zeros(1 , 0 , config.act_dim , device=lowerCAmelCase__ , dtype=torch.floataa) SCREAMING_SNAKE_CASE_: str = torch.zeros(1 , 0 , device=lowerCAmelCase__ , dtype=torch.floataa) SCREAMING_SNAKE_CASE_: List[str] = torch.tensor(0 , device=lowerCAmelCase__ , dtype=torch.long).reshape(1 , 1) for step in range(lowerCAmelCase__): SCREAMING_SNAKE_CASE_: Dict = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=lowerCAmelCase__)] , dim=1) SCREAMING_SNAKE_CASE_: Optional[Any] = torch.cat([rewards, torch.zeros(1 , 1 , device=lowerCAmelCase__)] , dim=1) SCREAMING_SNAKE_CASE_: str = torch.ones(1 , states.shape[1]).to(dtype=torch.long , device=states.device) with torch.no_grad(): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = model( states=lowerCAmelCase__ , actions=lowerCAmelCase__ , rewards=lowerCAmelCase__ , returns_to_go=lowerCAmelCase__ , timesteps=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , ) self.assertEqual(action_pred.shape , actions.shape) self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4)) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = ( # env.step(action) torch.randn(1 , 1 , config.state_dim).to(device=lowerCAmelCase__ , dtype=torch.floataa), 1.0, False, {}, ) SCREAMING_SNAKE_CASE_: Any = action_pred[0, -1] SCREAMING_SNAKE_CASE_: Tuple = torch.cat([states, state] , dim=1) SCREAMING_SNAKE_CASE_: str = returns_to_go[0, -1] - reward SCREAMING_SNAKE_CASE_: Optional[Any] = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1)] , dim=1) SCREAMING_SNAKE_CASE_: str = torch.cat( [timesteps, torch.ones((1, 1) , device=lowerCAmelCase__ , dtype=torch.long) * (step + 1)] , dim=1)
671
from transformers import DistilBertTokenizer, DistilBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Optional[Any] = DistilBertTokenizer _UpperCAmelCase : Union[str, Any] = DistilBertTokenizerFast _UpperCAmelCase : int = True @slow def _SCREAMING_SNAKE_CASE ( self : Any): SCREAMING_SNAKE_CASE_: Optional[Any] = DistilBertTokenizer.from_pretrained("distilbert-base-uncased") SCREAMING_SNAKE_CASE_: Any = tokenizer.encode("sequence builders" , add_special_tokens=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: int = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ]
671
1
class __lowercase : """simple docstring""" def __init__( self : Any , lowerCAmelCase__ : str = "" , lowerCAmelCase__ : bool = False): # Mapping from the first character of the prefix of the node SCREAMING_SNAKE_CASE_: dict[str, RadixNode] = {} # A node will be a leaf if the tree contains its word SCREAMING_SNAKE_CASE_: str = is_leaf SCREAMING_SNAKE_CASE_: List[str] = prefix def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : str): SCREAMING_SNAKE_CASE_: Optional[Any] = 0 for q, w in zip(self.prefix , lowerCAmelCase__): if q != w: break x += 1 return self.prefix[:x], self.prefix[x:], word[x:] def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : list[str]): for word in words: self.insert(lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase__ : str): # Case 1: If the word is the prefix of the node # Solution: We set the current node as leaf if self.prefix == word: SCREAMING_SNAKE_CASE_: Optional[Any] = True # Case 2: The node has no edges that have a prefix to the word # Solution: We create an edge from the current node to a new one # containing the word elif word[0] not in self.nodes: SCREAMING_SNAKE_CASE_: List[Any] = RadixNode(prefix=lowerCAmelCase__ , is_leaf=lowerCAmelCase__) else: SCREAMING_SNAKE_CASE_: str = self.nodes[word[0]] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = incoming_node.match( lowerCAmelCase__) # Case 3: The node prefix is equal to the matching # Solution: We insert remaining word on the next node if remaining_prefix == "": self.nodes[matching_string[0]].insert(lowerCAmelCase__) # Case 4: The word is greater equal to the matching # Solution: Create a node in between both nodes, change # prefixes and add the new node for the remaining word else: SCREAMING_SNAKE_CASE_: Union[str, Any] = remaining_prefix SCREAMING_SNAKE_CASE_: Optional[int] = self.nodes[matching_string[0]] SCREAMING_SNAKE_CASE_: str = RadixNode(lowerCAmelCase__ , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Any = aux_node if remaining_word == "": SCREAMING_SNAKE_CASE_: str = True else: self.nodes[matching_string[0]].insert(lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : str): SCREAMING_SNAKE_CASE_: Any = self.nodes.get(word[0] , lowerCAmelCase__) if not incoming_node: return False else: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = incoming_node.match( lowerCAmelCase__) # If there is remaining prefix, the word can't be on the tree if remaining_prefix != "": return False # This applies when the word and the prefix are equal elif remaining_word == "": return incoming_node.is_leaf # We have word remaining so we check the next node else: return incoming_node.find(lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : str): SCREAMING_SNAKE_CASE_: Tuple = self.nodes.get(word[0] , lowerCAmelCase__) if not incoming_node: return False else: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = incoming_node.match( lowerCAmelCase__) # If there is remaining prefix, the word can't be on the tree if remaining_prefix != "": return False # We have word remaining so we check the next node elif remaining_word != "": return incoming_node.delete(lowerCAmelCase__) else: # If it is not a leaf, we don't have to delete if not incoming_node.is_leaf: return False else: # We delete the nodes if no edges go from it if len(incoming_node.nodes) == 0: del self.nodes[word[0]] # We merge the current node with its only child if len(self.nodes) == 1 and not self.is_leaf: SCREAMING_SNAKE_CASE_: Dict = list(self.nodes.values())[0] SCREAMING_SNAKE_CASE_: List[Any] = merging_node.is_leaf self.prefix += merging_node.prefix SCREAMING_SNAKE_CASE_: str = merging_node.nodes # If there is more than 1 edge, we just mark it as non-leaf elif len(incoming_node.nodes) > 1: SCREAMING_SNAKE_CASE_: Optional[int] = False # If there is 1 edge, we merge it with its child else: SCREAMING_SNAKE_CASE_: Optional[Any] = list(incoming_node.nodes.values())[0] SCREAMING_SNAKE_CASE_: Optional[int] = merging_node.is_leaf incoming_node.prefix += merging_node.prefix SCREAMING_SNAKE_CASE_: Any = merging_node.nodes return True def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : int = 0): if self.prefix != "": print("-" * height , self.prefix , " (leaf)" if self.is_leaf else "") for value in self.nodes.values(): value.print_tree(height + 1) def A_ ( ): SCREAMING_SNAKE_CASE_: List[str] = "banana bananas bandana band apple all beast".split() SCREAMING_SNAKE_CASE_: List[Any] = RadixNode() root.insert_many(_UpperCAmelCase ) assert all(root.find(_UpperCAmelCase ) for word in words ) assert not root.find("bandanas" ) assert not root.find("apps" ) root.delete("all" ) assert not root.find("all" ) root.delete("banana" ) assert not root.find("banana" ) assert root.find("bananas" ) return True def A_ ( ): assert test_trie() def A_ ( ): SCREAMING_SNAKE_CASE_: Tuple = RadixNode() SCREAMING_SNAKE_CASE_: List[str] = "banana bananas bandanas bandana band apple all beast".split() root.insert_many(_UpperCAmelCase ) print("Words:" , _UpperCAmelCase ) print("Tree:" ) root.print_tree() if __name__ == "__main__": main()
671
import collections import json import math import os import re import time from fnmatch import fnmatch from typing import Dict import requests from slack_sdk import WebClient lowerCAmelCase : List[Any] = WebClient(token=os.environ["""CI_SLACK_BOT_TOKEN"""]) def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[int] = test_results.split(" " ) SCREAMING_SNAKE_CASE_: Tuple = 0 SCREAMING_SNAKE_CASE_: str = 0 # When the output is short enough, the output is surrounded by = signs: "== OUTPUT ==" # When it is too long, those signs are not present. SCREAMING_SNAKE_CASE_: Optional[Any] = expressions[-2] if "=" in expressions[-1] else expressions[-1] for i, expression in enumerate(_UpperCAmelCase ): if "failed" in expression: failed += int(expressions[i - 1] ) if "passed" in expression: success += int(expressions[i - 1] ) return failed, success, time_spent def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: str = {} SCREAMING_SNAKE_CASE_: Any = None SCREAMING_SNAKE_CASE_: Union[str, Any] = False for line in failures_short_lines.split("\n" ): if re.search(R"_ \[doctest\]" , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: List[Any] = True SCREAMING_SNAKE_CASE_: Dict = line.split(" " )[2] elif in_error and not line.split(" " )[0].isdigit(): SCREAMING_SNAKE_CASE_: Union[str, Any] = line SCREAMING_SNAKE_CASE_: List[str] = False return failures class __lowercase : """simple docstring""" def __init__( self : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Dict): SCREAMING_SNAKE_CASE_: Dict = title SCREAMING_SNAKE_CASE_: int = doc_test_results["time_spent"].split(",")[0] SCREAMING_SNAKE_CASE_: int = doc_test_results["success"] SCREAMING_SNAKE_CASE_: Optional[Any] = doc_test_results["failures"] SCREAMING_SNAKE_CASE_: Any = self.n_success + self.n_failures # Failures and success of the modeling tests SCREAMING_SNAKE_CASE_: Optional[int] = doc_test_results @property def _SCREAMING_SNAKE_CASE ( self : Any): SCREAMING_SNAKE_CASE_: int = [self._time_spent] SCREAMING_SNAKE_CASE_: List[Any] = 0 for time in time_spent: SCREAMING_SNAKE_CASE_: Union[str, Any] = time.split(":") # Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute. if len(lowerCAmelCase__) == 1: SCREAMING_SNAKE_CASE_: Dict = [0, 0, time_parts[0]] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = int(time_parts[0]), int(time_parts[1]), float(time_parts[2]) total_secs += hours * 3600 + minutes * 60 + seconds SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60 return F"{int(lowerCAmelCase__)}h{int(lowerCAmelCase__)}m{int(lowerCAmelCase__)}s" @property def _SCREAMING_SNAKE_CASE ( self : List[Any]): return {"type": "header", "text": {"type": "plain_text", "text": self.title}} @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): return { "type": "section", "text": { "type": "plain_text", "text": F"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.", "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): return { "type": "section", "text": { "type": "plain_text", "text": ( F"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in" F" {self.time}." ), "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } @property def _SCREAMING_SNAKE_CASE ( self : Any): SCREAMING_SNAKE_CASE_: Optional[Any] = 40 SCREAMING_SNAKE_CASE_: List[str] = {k: v["failed"] for k, v in doc_test_results.items() if isinstance(lowerCAmelCase__ , lowerCAmelCase__)} SCREAMING_SNAKE_CASE_: Tuple = "" for category, failures in category_failures.items(): if len(lowerCAmelCase__) == 0: continue if report != "": report += "\n\n" report += F"*{category} failures*:".ljust(line_length // 2).rjust(line_length // 2) + "\n" report += "`" report += "`\n`".join(lowerCAmelCase__) report += "`" return { "type": "section", "text": { "type": "mrkdwn", "text": F"The following examples had failures:\n\n\n{report}\n", }, } @property def _SCREAMING_SNAKE_CASE ( self : str): SCREAMING_SNAKE_CASE_: Optional[Any] = [self.header] if self.n_failures > 0: blocks.append(self.failures) if self.n_failures > 0: blocks.extend([self.category_failures]) if self.n_failures == 0: blocks.append(self.no_failures) return json.dumps(lowerCAmelCase__) @staticmethod def _SCREAMING_SNAKE_CASE ( ): SCREAMING_SNAKE_CASE_: List[str] = [ { "type": "section", "text": { "type": "plain_text", "text": "There was an issue running the tests.", }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } ] print("Sending the following payload") print(json.dumps({"blocks": json.loads(lowerCAmelCase__)})) client.chat_postMessage( channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text="There was an issue running the tests." , blocks=lowerCAmelCase__ , ) def _SCREAMING_SNAKE_CASE ( self : Tuple): print("Sending the following payload") print(json.dumps({"blocks": json.loads(self.payload)})) SCREAMING_SNAKE_CASE_: Optional[Any] = F"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else "All tests passed." SCREAMING_SNAKE_CASE_: List[Any] = client.chat_postMessage( channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , blocks=self.payload , text=lowerCAmelCase__ , ) def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any]): SCREAMING_SNAKE_CASE_: Dict = "" for key, value in failures.items(): SCREAMING_SNAKE_CASE_: str = value[:200] + " [Truncated]" if len(lowerCAmelCase__) > 250 else value failures_text += F"*{key}*\n_{value}_\n\n" SCREAMING_SNAKE_CASE_: Any = job_name SCREAMING_SNAKE_CASE_: List[Any] = {"type": "section", "text": {"type": "mrkdwn", "text": text}} if job_link is not None: SCREAMING_SNAKE_CASE_: Tuple = { "type": "button", "text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True}, "url": job_link, } return [ {"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}}, content, {"type": "section", "text": {"type": "mrkdwn", "text": failures_text}}, ] def _SCREAMING_SNAKE_CASE ( self : Any): if self.thread_ts is None: raise ValueError("Can only post reply if a post has been made.") SCREAMING_SNAKE_CASE_: Tuple = self.doc_test_results.pop("job_link") self.doc_test_results.pop("failures") self.doc_test_results.pop("success") self.doc_test_results.pop("time_spent") SCREAMING_SNAKE_CASE_: Any = sorted(self.doc_test_results.items() , key=lambda lowerCAmelCase__: t[0]) for job, job_result in sorted_dict: if len(job_result["failures"]): SCREAMING_SNAKE_CASE_: Union[str, Any] = F"*Num failures* :{len(job_result['failed'])} \n" SCREAMING_SNAKE_CASE_: Optional[Any] = job_result["failures"] SCREAMING_SNAKE_CASE_: Optional[Any] = self.get_reply_blocks(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , text=lowerCAmelCase__) print("Sending the following reply") print(json.dumps({"blocks": blocks})) client.chat_postMessage( channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text=F"Results for {job}" , blocks=lowerCAmelCase__ , thread_ts=self.thread_ts["ts"] , ) time.sleep(1) def A_ ( ): SCREAMING_SNAKE_CASE_: Tuple = os.environ["GITHUB_RUN_ID"] SCREAMING_SNAKE_CASE_: Any = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100" SCREAMING_SNAKE_CASE_: List[Any] = requests.get(_UpperCAmelCase ).json() SCREAMING_SNAKE_CASE_: Optional[Any] = {} try: jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} ) SCREAMING_SNAKE_CASE_: Any = math.ceil((result["total_count"] - 1_00) / 1_00 ) for i in range(_UpperCAmelCase ): SCREAMING_SNAKE_CASE_: str = requests.get(url + f"&page={i + 2}" ).json() jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} ) return jobs except Exception as e: print("Unknown error, could not fetch links." , _UpperCAmelCase ) return {} def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[Any] = {} if os.path.exists(_UpperCAmelCase ): SCREAMING_SNAKE_CASE_: List[str] = os.listdir(_UpperCAmelCase ) for file in files: try: with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , encoding="utf-8" ) as f: SCREAMING_SNAKE_CASE_: Dict = f.read() except UnicodeDecodeError as e: raise ValueError(f"Could not open {os.path.join(_UpperCAmelCase , _UpperCAmelCase )}." ) from e return _artifact def A_ ( ): class __lowercase : """simple docstring""" def __init__( self : List[str] , lowerCAmelCase__ : str): SCREAMING_SNAKE_CASE_: Dict = name SCREAMING_SNAKE_CASE_: List[str] = [] def __str__( self : Optional[Any]): return self.name def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : str): self.paths.append({"name": self.name, "path": path}) SCREAMING_SNAKE_CASE_: Dict[str, Artifact] = {} SCREAMING_SNAKE_CASE_: List[Any] = filter(os.path.isdir , os.listdir() ) for directory in directories: SCREAMING_SNAKE_CASE_: Dict = directory if artifact_name not in _available_artifacts: SCREAMING_SNAKE_CASE_: Tuple = Artifact(_UpperCAmelCase ) _available_artifacts[artifact_name].add_path(_UpperCAmelCase ) return _available_artifacts if __name__ == "__main__": lowerCAmelCase : Tuple = get_job_links() lowerCAmelCase : Optional[Any] = retrieve_available_artifacts() lowerCAmelCase : Any = collections.OrderedDict( [ ("""*.py""", """API Examples"""), ("""*.md""", """MD Examples"""), ] ) # This dict will contain all the information relative to each doc test category: # - failed: list of failed tests # - failures: dict in the format 'test': 'error_message' lowerCAmelCase : int = { v: { """failed""": [], """failures""": {}, } for v in docs.values() } # Link to the GitHub Action job lowerCAmelCase : Optional[int] = github_actions_job_links.get("""run_doctests""") lowerCAmelCase : List[Any] = available_artifacts["""doc_tests_gpu_test_reports"""].paths[0] lowerCAmelCase : Any = retrieve_artifact(artifact_path["""name"""]) if "stats" in artifact: lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[str] = handle_test_results(artifact["""stats"""]) lowerCAmelCase : List[str] = failed lowerCAmelCase : Any = success lowerCAmelCase : Dict = time_spent[1:-1] + """, """ lowerCAmelCase : str = extract_first_line_failure(artifact["""failures_short"""]) for line in artifact["summary_short"].split("""\n"""): if re.search("""FAILED""", line): lowerCAmelCase : Tuple = line.replace("""FAILED """, """""") lowerCAmelCase : str = line.split()[0].replace("""\n""", """""") if "::" in line: lowerCAmelCase , lowerCAmelCase : Optional[int] = line.split("""::""") else: lowerCAmelCase , lowerCAmelCase : str = line, line for file_regex in docs.keys(): if fnmatch(file_path, file_regex): lowerCAmelCase : str = docs[file_regex] doc_test_results[category]["failed"].append(test) lowerCAmelCase : str = all_failures[test] if test in all_failures else """N/A""" lowerCAmelCase : Any = failure break lowerCAmelCase : Union[str, Any] = Message("""🤗 Results of the doc tests.""", doc_test_results) message.post() message.post_reply()
671
1
def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Union[str, Any] = set() # edges = list of graph's edges SCREAMING_SNAKE_CASE_: List[str] = get_edges(_UpperCAmelCase ) # While there are still elements in edges list, take an arbitrary edge # (from_node, to_node) and add his extremity to chosen_vertices and then # remove all arcs adjacent to the from_node and to_node while edges: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = edges.pop() chosen_vertices.add(_UpperCAmelCase ) chosen_vertices.add(_UpperCAmelCase ) for edge in edges.copy(): if from_node in edge or to_node in edge: edges.discard(_UpperCAmelCase ) return chosen_vertices def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: List[Any] = set() for from_node, to_nodes in graph.items(): for to_node in to_nodes: edges.add((from_node, to_node) ) return edges if __name__ == "__main__": import doctest doctest.testmod() # graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} # print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
671
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # and perform gradient accumulation # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## lowerCAmelCase : str = 16 lowerCAmelCase : List[Any] = 32 def A_ ( _UpperCAmelCase , _UpperCAmelCase = 16 ): SCREAMING_SNAKE_CASE_: List[Any] = AutoTokenizer.from_pretrained("bert-base-cased" ) SCREAMING_SNAKE_CASE_: Tuple = load_dataset("glue" , "mrpc" ) def tokenize_function(_UpperCAmelCase ): # max_length=None => use the model max length (it's actually the default) SCREAMING_SNAKE_CASE_: List[Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): SCREAMING_SNAKE_CASE_: str = datasets.map( _UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library SCREAMING_SNAKE_CASE_: Optional[Any] = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(_UpperCAmelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. SCREAMING_SNAKE_CASE_: List[Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": SCREAMING_SNAKE_CASE_: Tuple = 16 elif accelerator.mixed_precision != "no": SCREAMING_SNAKE_CASE_: int = 8 else: SCREAMING_SNAKE_CASE_: Any = None return tokenizer.pad( _UpperCAmelCase , padding="longest" , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors="pt" , ) # Instantiate dataloaders. SCREAMING_SNAKE_CASE_: Optional[Any] = DataLoader( tokenized_datasets["train"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Tuple = DataLoader( tokenized_datasets["validation"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders lowerCAmelCase : Optional[int] = mocked_dataloaders # noqa: F811 def A_ ( _UpperCAmelCase , _UpperCAmelCase ): # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS" , _UpperCAmelCase ) == "1": SCREAMING_SNAKE_CASE_: Tuple = 2 # New Code # SCREAMING_SNAKE_CASE_: List[str] = int(args.gradient_accumulation_steps ) # Initialize accelerator SCREAMING_SNAKE_CASE_: int = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_UpperCAmelCase ) if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1: raise NotImplementedError( "Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs SCREAMING_SNAKE_CASE_: Tuple = config["lr"] SCREAMING_SNAKE_CASE_: List[str] = int(config["num_epochs"] ) SCREAMING_SNAKE_CASE_: List[str] = int(config["seed"] ) SCREAMING_SNAKE_CASE_: Optional[int] = int(config["batch_size"] ) SCREAMING_SNAKE_CASE_: str = evaluate.load("glue" , "mrpc" ) set_seed(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) SCREAMING_SNAKE_CASE_: Union[str, Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_UpperCAmelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). SCREAMING_SNAKE_CASE_: List[Any] = model.to(accelerator.device ) # Instantiate optimizer SCREAMING_SNAKE_CASE_: Union[str, Any] = AdamW(params=model.parameters() , lr=_UpperCAmelCase ) # Instantiate scheduler SCREAMING_SNAKE_CASE_: str = get_linear_schedule_with_warmup( optimizer=_UpperCAmelCase , num_warmup_steps=1_00 , num_training_steps=(len(_UpperCAmelCase ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = accelerator.prepare( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # Now we train the model for epoch in range(_UpperCAmelCase ): model.train() for step, batch in enumerate(_UpperCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(_UpperCAmelCase ): SCREAMING_SNAKE_CASE_: List[Any] = model(**_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: List[Any] = output.loss accelerator.backward(_UpperCAmelCase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_UpperCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): SCREAMING_SNAKE_CASE_: Optional[Any] = model(**_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: List[Any] = outputs.logits.argmax(dim=-1 ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=_UpperCAmelCase , references=_UpperCAmelCase , ) SCREAMING_SNAKE_CASE_: List[str] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"epoch {epoch}:" , _UpperCAmelCase ) def A_ ( ): SCREAMING_SNAKE_CASE_: str = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) # New Code # parser.add_argument( "--gradient_accumulation_steps" , type=_UpperCAmelCase , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) SCREAMING_SNAKE_CASE_: List[Any] = parser.parse_args() SCREAMING_SNAKE_CASE_: Tuple = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(_UpperCAmelCase , _UpperCAmelCase ) if __name__ == "__main__": main()
671
1
from typing import List, Optional, Tuple, Union import PIL import torch from torchvision import transforms from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput from diffusers.schedulers import DDIMScheduler from diffusers.utils import randn_tensor lowerCAmelCase : Optional[int] = transforms.Compose( [ transforms.Resize((256, 256)), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def A_ ( _UpperCAmelCase ): if isinstance(_UpperCAmelCase , torch.Tensor ): return image elif isinstance(_UpperCAmelCase , PIL.Image.Image ): SCREAMING_SNAKE_CASE_: Union[str, Any] = [image] SCREAMING_SNAKE_CASE_: str = [trans(img.convert("RGB" ) ) for img in image] SCREAMING_SNAKE_CASE_: Dict = torch.stack(_UpperCAmelCase ) return image class __lowercase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : Dict , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[Any]): super().__init__() # make sure scheduler can always be converted to DDIM SCREAMING_SNAKE_CASE_: Any = DDIMScheduler.from_config(scheduler.config) self.register_modules(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Tuple): if strength < 0 or strength > 1: raise ValueError(F"The value of strength should in [0.0, 1.0] but is {strength}") def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[Any]): # get the original timestep using init_timestep SCREAMING_SNAKE_CASE_: Optional[Any] = min(int(num_inference_steps * strength) , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = max(num_inference_steps - init_timestep , 0) SCREAMING_SNAKE_CASE_: Optional[int] = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Dict=None): if not isinstance(lowerCAmelCase__ , (torch.Tensor, PIL.Image.Image, list)): raise ValueError( F"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowerCAmelCase__)}") SCREAMING_SNAKE_CASE_: List[str] = image.to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) and len(lowerCAmelCase__) != batch_size: raise ValueError( F"You have passed a list of generators of length {len(lowerCAmelCase__)}, but requested an effective batch" F" size of {batch_size}. Make sure the batch size matches the length of the generators.") SCREAMING_SNAKE_CASE_: str = init_latents.shape SCREAMING_SNAKE_CASE_: List[Any] = randn_tensor(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=lowerCAmelCase__ , dtype=lowerCAmelCase__) # get latents print("add noise to latents at timestep" , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = self.scheduler.add_noise(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Dict = init_latents return latents @torch.no_grad() def __call__( self : Optional[int] , lowerCAmelCase__ : Union[torch.FloatTensor, PIL.Image.Image] = None , lowerCAmelCase__ : float = 0.8 , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase__ : float = 0.0 , lowerCAmelCase__ : int = 50 , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[str] = "pil" , lowerCAmelCase__ : bool = True , ): self.check_inputs(lowerCAmelCase__) # 2. Preprocess image SCREAMING_SNAKE_CASE_: Union[str, Any] = preprocess(lowerCAmelCase__) # 3. set timesteps self.scheduler.set_timesteps(lowerCAmelCase__ , device=self.device) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = self.get_timesteps(lowerCAmelCase__ , lowerCAmelCase__ , self.device) SCREAMING_SNAKE_CASE_: Optional[int] = timesteps[:1].repeat(lowerCAmelCase__) # 4. Prepare latent variables SCREAMING_SNAKE_CASE_: str = self.prepare_latents(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , self.unet.dtype , self.device , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[Any] = latents # 5. Denoising loop for t in self.progress_bar(lowerCAmelCase__): # 1. predict noise model_output SCREAMING_SNAKE_CASE_: Union[str, Any] = self.unet(lowerCAmelCase__ , lowerCAmelCase__).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 SCREAMING_SNAKE_CASE_: int = self.scheduler.step( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , eta=lowerCAmelCase__ , use_clipped_model_output=lowerCAmelCase__ , generator=lowerCAmelCase__ , ).prev_sample SCREAMING_SNAKE_CASE_: Optional[int] = (image / 2 + 0.5).clamp(0 , 1) SCREAMING_SNAKE_CASE_: List[Any] = image.cpu().permute(0 , 2 , 3 , 1).numpy() if output_type == "pil": SCREAMING_SNAKE_CASE_: List[Any] = self.numpy_to_pil(lowerCAmelCase__) if not return_dict: return (image, latent_timestep.item()) return ImagePipelineOutput(images=lowerCAmelCase__)
671
from math import asin, atan, cos, radians, sin, sqrt, tan lowerCAmelCase : Union[str, Any] = 637_8137.0 lowerCAmelCase : int = 635_6752.31_4245 lowerCAmelCase : Union[str, Any] = 6378137 def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: List[Any] = (AXIS_A - AXIS_B) / AXIS_A SCREAMING_SNAKE_CASE_: str = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) ) SCREAMING_SNAKE_CASE_: Optional[int] = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) ) SCREAMING_SNAKE_CASE_: Any = radians(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Dict = radians(_UpperCAmelCase ) # Equation SCREAMING_SNAKE_CASE_: str = sin((phi_a - phi_a) / 2 ) SCREAMING_SNAKE_CASE_: List[Any] = sin((lambda_a - lambda_a) / 2 ) # Square both values sin_sq_phi *= sin_sq_phi sin_sq_lambda *= sin_sq_lambda SCREAMING_SNAKE_CASE_: Tuple = sqrt(sin_sq_phi + (cos(_UpperCAmelCase ) * cos(_UpperCAmelCase ) * sin_sq_lambda) ) return 2 * RADIUS * asin(_UpperCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
671
1
import math def A_ ( _UpperCAmelCase ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def A_ ( _UpperCAmelCase = 1_00_01 ): try: SCREAMING_SNAKE_CASE_: Optional[Any] = int(_UpperCAmelCase ) except (TypeError, ValueError): raise TypeError("Parameter nth must be int or castable to int." ) from None if nth <= 0: raise ValueError("Parameter nth must be greater than or equal to one." ) SCREAMING_SNAKE_CASE_: list[int] = [] SCREAMING_SNAKE_CASE_: List[str] = 2 while len(_UpperCAmelCase ) < nth: if is_prime(_UpperCAmelCase ): primes.append(_UpperCAmelCase ) num += 1 else: num += 1 return primes[len(_UpperCAmelCase ) - 1] if __name__ == "__main__": print(f'''{solution() = }''')
671
import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): # Initialise PyTorch model SCREAMING_SNAKE_CASE_: List[Any] = BertConfig.from_json_file(_UpperCAmelCase ) print(f"Building PyTorch model from configuration: {config}" ) SCREAMING_SNAKE_CASE_: Tuple = BertForPreTraining(_UpperCAmelCase ) # Load weights from tf checkpoint load_tf_weights_in_bert(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # Save pytorch-model print(f"Save PyTorch model to {pytorch_dump_path}" ) torch.save(model.state_dict() , _UpperCAmelCase ) if __name__ == "__main__": lowerCAmelCase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--bert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained BERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) lowerCAmelCase : Optional[Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
671
1
from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig lowerCAmelCase : Dict = logging.get_logger(__name__) # General docstring lowerCAmelCase : List[Any] = """RegNetConfig""" # Base docstring lowerCAmelCase : List[Any] = """facebook/regnet-y-040""" lowerCAmelCase : Tuple = [1, 1088, 7, 7] # Image classification docstring lowerCAmelCase : Any = """facebook/regnet-y-040""" lowerCAmelCase : Any = """tabby, tabby cat""" lowerCAmelCase : Optional[int] = [ """facebook/regnet-y-040""", # See all regnet models at https://huggingface.co/models?filter=regnet ] class __lowercase ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 3 , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : Optional[str] = "relu" , **lowerCAmelCase__ : Dict , ): super().__init__(**lowerCAmelCase__) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb SCREAMING_SNAKE_CASE_: List[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2) SCREAMING_SNAKE_CASE_: List[str] = tf.keras.layers.ConvaD( filters=lowerCAmelCase__ , kernel_size=lowerCAmelCase__ , strides=lowerCAmelCase__ , padding="VALID" , groups=lowerCAmelCase__ , use_bias=lowerCAmelCase__ , name="convolution" , ) SCREAMING_SNAKE_CASE_: List[Any] = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization") SCREAMING_SNAKE_CASE_: List[Any] = ACTaFN[activation] if activation is not None else tf.identity def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : List[Any]): SCREAMING_SNAKE_CASE_: int = self.convolution(self.padding(lowerCAmelCase__)) SCREAMING_SNAKE_CASE_: Union[str, Any] = self.normalization(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: int = self.activation(lowerCAmelCase__) return hidden_state class __lowercase ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : Optional[int] , lowerCAmelCase__ : RegNetConfig , **lowerCAmelCase__ : Tuple): super().__init__(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Any = config.num_channels SCREAMING_SNAKE_CASE_: List[str] = TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Dict): SCREAMING_SNAKE_CASE_: str = shape_list(lowerCAmelCase__)[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration.") # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) SCREAMING_SNAKE_CASE_: Dict = tf.transpose(lowerCAmelCase__ , perm=(0, 2, 3, 1)) SCREAMING_SNAKE_CASE_: str = self.embedder(lowerCAmelCase__) return hidden_state class __lowercase ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : List[str] , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 2 , **lowerCAmelCase__ : Union[str, Any]): super().__init__(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[Any] = tf.keras.layers.ConvaD( filters=lowerCAmelCase__ , kernel_size=1 , strides=lowerCAmelCase__ , use_bias=lowerCAmelCase__ , name="convolution") SCREAMING_SNAKE_CASE_: Any = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization") def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase__ : tf.Tensor , lowerCAmelCase__ : bool = False): return self.normalization(self.convolution(lowerCAmelCase__) , training=lowerCAmelCase__) class __lowercase ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : int , **lowerCAmelCase__ : int): super().__init__(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCAmelCase__ , name="pooler") SCREAMING_SNAKE_CASE_: Dict = [ tf.keras.layers.ConvaD(filters=lowerCAmelCase__ , kernel_size=1 , activation="relu" , name="attention.0"), tf.keras.layers.ConvaD(filters=lowerCAmelCase__ , kernel_size=1 , activation="sigmoid" , name="attention.2"), ] def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Any): # [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels] SCREAMING_SNAKE_CASE_: List[Any] = self.pooler(lowerCAmelCase__) for layer_module in self.attention: SCREAMING_SNAKE_CASE_: Optional[Any] = layer_module(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[int] = hidden_state * pooled return hidden_state class __lowercase ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : List[Any] , lowerCAmelCase__ : RegNetConfig , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 1 , **lowerCAmelCase__ : Optional[int]): super().__init__(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[str] = in_channels != out_channels or stride != 1 SCREAMING_SNAKE_CASE_: str = max(1 , out_channels // config.groups_width) SCREAMING_SNAKE_CASE_: List[str] = ( TFRegNetShortCut(lowerCAmelCase__ , stride=lowerCAmelCase__ , name="shortcut") if should_apply_shortcut else tf.keras.layers.Activation("linear" , name="shortcut") ) # `self.layers` instead of `self.layer` because that is a reserved argument. SCREAMING_SNAKE_CASE_: Tuple = [ TFRegNetConvLayer(lowerCAmelCase__ , kernel_size=1 , activation=config.hidden_act , name="layer.0"), TFRegNetConvLayer( lowerCAmelCase__ , stride=lowerCAmelCase__ , groups=lowerCAmelCase__ , activation=config.hidden_act , name="layer.1"), TFRegNetConvLayer(lowerCAmelCase__ , kernel_size=1 , activation=lowerCAmelCase__ , name="layer.2"), ] SCREAMING_SNAKE_CASE_: Any = ACTaFN[config.hidden_act] def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : int): SCREAMING_SNAKE_CASE_: Dict = hidden_state for layer_module in self.layers: SCREAMING_SNAKE_CASE_: Optional[Any] = layer_module(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = self.shortcut(lowerCAmelCase__) hidden_state += residual SCREAMING_SNAKE_CASE_: Union[str, Any] = self.activation(lowerCAmelCase__) return hidden_state class __lowercase ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : Optional[int] , lowerCAmelCase__ : RegNetConfig , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 1 , **lowerCAmelCase__ : Union[str, Any]): super().__init__(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[int] = in_channels != out_channels or stride != 1 SCREAMING_SNAKE_CASE_: Union[str, Any] = max(1 , out_channels // config.groups_width) SCREAMING_SNAKE_CASE_: str = ( TFRegNetShortCut(lowerCAmelCase__ , stride=lowerCAmelCase__ , name="shortcut") if should_apply_shortcut else tf.keras.layers.Activation("linear" , name="shortcut") ) SCREAMING_SNAKE_CASE_: str = [ TFRegNetConvLayer(lowerCAmelCase__ , kernel_size=1 , activation=config.hidden_act , name="layer.0"), TFRegNetConvLayer( lowerCAmelCase__ , stride=lowerCAmelCase__ , groups=lowerCAmelCase__ , activation=config.hidden_act , name="layer.1"), TFRegNetSELayer(lowerCAmelCase__ , reduced_channels=int(round(in_channels / 4)) , name="layer.2"), TFRegNetConvLayer(lowerCAmelCase__ , kernel_size=1 , activation=lowerCAmelCase__ , name="layer.3"), ] SCREAMING_SNAKE_CASE_: Dict = ACTaFN[config.hidden_act] def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : str): SCREAMING_SNAKE_CASE_: str = hidden_state for layer_module in self.layers: SCREAMING_SNAKE_CASE_: Optional[Any] = layer_module(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = self.shortcut(lowerCAmelCase__) hidden_state += residual SCREAMING_SNAKE_CASE_: List[Any] = self.activation(lowerCAmelCase__) return hidden_state class __lowercase ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : Union[str, Any] , lowerCAmelCase__ : RegNetConfig , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 2 , lowerCAmelCase__ : int = 2 , **lowerCAmelCase__ : Union[str, Any]): super().__init__(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[Any] = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer SCREAMING_SNAKE_CASE_: str = [ # downsampling is done in the first layer with stride of 2 layer(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ , name="layers.0"), *[layer(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , name=F"layers.{i+1}") for i in range(depth - 1)], ] def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : Dict): for layer_module in self.layers: SCREAMING_SNAKE_CASE_: str = layer_module(lowerCAmelCase__) return hidden_state class __lowercase ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : Any , lowerCAmelCase__ : RegNetConfig , **lowerCAmelCase__ : int): super().__init__(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Dict = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( lowerCAmelCase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , )) SCREAMING_SNAKE_CASE_: Optional[int] = zip(config.hidden_sizes , config.hidden_sizes[1:]) for i, ((in_channels, out_channels), depth) in enumerate(zip(lowerCAmelCase__ , config.depths[1:])): self.stages.append(TFRegNetStage(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , depth=lowerCAmelCase__ , name=F"stages.{i+1}")) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : tf.Tensor , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = True): SCREAMING_SNAKE_CASE_: Optional[Any] = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: SCREAMING_SNAKE_CASE_: Tuple = hidden_states + (hidden_state,) SCREAMING_SNAKE_CASE_: Optional[int] = stage_module(lowerCAmelCase__) if output_hidden_states: SCREAMING_SNAKE_CASE_: Dict = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None) return TFBaseModelOutputWithNoAttention(last_hidden_state=lowerCAmelCase__ , hidden_states=lowerCAmelCase__) @keras_serializable class __lowercase ( tf.keras.layers.Layer ): """simple docstring""" _UpperCAmelCase : List[str] = RegNetConfig def __init__( self : Optional[int] , lowerCAmelCase__ : List[Any] , **lowerCAmelCase__ : Any): super().__init__(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = config SCREAMING_SNAKE_CASE_: Union[str, Any] = TFRegNetEmbeddings(lowerCAmelCase__ , name="embedder") SCREAMING_SNAKE_CASE_: Tuple = TFRegNetEncoder(lowerCAmelCase__ , name="encoder") SCREAMING_SNAKE_CASE_: Dict = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCAmelCase__ , name="pooler") @unpack_inputs def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : tf.Tensor , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : bool = False , ): SCREAMING_SNAKE_CASE_: Any = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) SCREAMING_SNAKE_CASE_: Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict SCREAMING_SNAKE_CASE_: Tuple = self.embedder(lowerCAmelCase__ , training=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[Any] = self.encoder( lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , training=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = encoder_outputs[0] SCREAMING_SNAKE_CASE_: str = self.pooler(lowerCAmelCase__) # Change to NCHW output format have uniformity in the modules SCREAMING_SNAKE_CASE_: str = tf.transpose(lowerCAmelCase__ , perm=(0, 3, 1, 2)) SCREAMING_SNAKE_CASE_: str = tf.transpose(lowerCAmelCase__ , perm=(0, 3, 1, 2)) # Change the other hidden state outputs to NCHW as well if output_hidden_states: SCREAMING_SNAKE_CASE_: str = tuple([tf.transpose(lowerCAmelCase__ , perm=(0, 3, 1, 2)) for h in encoder_outputs[1]]) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=lowerCAmelCase__ , pooler_output=lowerCAmelCase__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Optional[int] = RegNetConfig _UpperCAmelCase : Dict = '''regnet''' _UpperCAmelCase : Optional[Any] = '''pixel_values''' @property def _SCREAMING_SNAKE_CASE ( self : List[Any]): return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa)} lowerCAmelCase : Union[str, Any] = R""" Parameters: This model is a Tensorflow [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and behavior. config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ lowerCAmelCase : List[Any] = R""" Args: pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConveNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( '''The bare RegNet model outputting raw features without any specific head on top.''' , UpperCAmelCase_ , ) class __lowercase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : Union[str, Any] , lowerCAmelCase__ : RegNetConfig , *lowerCAmelCase__ : Optional[Any] , **lowerCAmelCase__ : int): super().__init__(lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = TFRegNetMainLayer(lowerCAmelCase__ , name="regnet") @unpack_inputs @add_start_docstrings_to_model_forward(lowerCAmelCase__) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase__ : tf.Tensor , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : str=False , ): SCREAMING_SNAKE_CASE_: str = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) SCREAMING_SNAKE_CASE_: Dict = return_dict if return_dict is not None else self.config.use_return_dict SCREAMING_SNAKE_CASE_: Optional[Any] = self.regnet( pixel_values=lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , training=lowerCAmelCase__ , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( ''' RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. ''' , UpperCAmelCase_ , ) class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ ): """simple docstring""" def __init__( self : List[Any] , lowerCAmelCase__ : RegNetConfig , *lowerCAmelCase__ : int , **lowerCAmelCase__ : Optional[int]): super().__init__(lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Any = config.num_labels SCREAMING_SNAKE_CASE_: int = TFRegNetMainLayer(lowerCAmelCase__ , name="regnet") # classification head SCREAMING_SNAKE_CASE_: Dict = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name="classifier.1") if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(lowerCAmelCase__) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : tf.Tensor = None , lowerCAmelCase__ : tf.Tensor = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Dict=False , ): SCREAMING_SNAKE_CASE_: int = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) SCREAMING_SNAKE_CASE_: Dict = return_dict if return_dict is not None else self.config.use_return_dict SCREAMING_SNAKE_CASE_: List[Any] = self.regnet( lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , training=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Any = outputs.pooler_output if return_dict else outputs[1] SCREAMING_SNAKE_CASE_: Union[str, Any] = self.classifier[0](lowerCAmelCase__) SCREAMING_SNAKE_CASE_: int = self.classifier[1](lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = None if labels is None else self.hf_compute_loss(labels=lowerCAmelCase__ , logits=lowerCAmelCase__) if not return_dict: SCREAMING_SNAKE_CASE_: Tuple = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=lowerCAmelCase__ , logits=lowerCAmelCase__ , hidden_states=outputs.hidden_states)
671
import math def A_ ( _UpperCAmelCase ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def A_ ( _UpperCAmelCase = 0.1 ): SCREAMING_SNAKE_CASE_: Union[str, Any] = 3 SCREAMING_SNAKE_CASE_: Optional[int] = 3 while primes / (2 * j - 1) >= ratio: for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ): primes += is_prime(_UpperCAmelCase ) j += 2 return j if __name__ == "__main__": import doctest doctest.testmod()
671
1
import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class __lowercase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : Optional[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple=13 , lowerCAmelCase__ : List[str]=7 , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : int=True , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : Tuple=False , lowerCAmelCase__ : Optional[int]=False , lowerCAmelCase__ : Any=False , lowerCAmelCase__ : Optional[Any]=2 , lowerCAmelCase__ : Optional[Any]=99 , lowerCAmelCase__ : List[Any]=0 , lowerCAmelCase__ : Tuple=32 , lowerCAmelCase__ : List[str]=5 , lowerCAmelCase__ : Optional[Any]=4 , lowerCAmelCase__ : str=0.1 , lowerCAmelCase__ : List[Any]=0.1 , lowerCAmelCase__ : List[str]=512 , lowerCAmelCase__ : Union[str, Any]=12 , lowerCAmelCase__ : Dict=2 , lowerCAmelCase__ : List[str]=0.02 , lowerCAmelCase__ : Optional[int]=3 , lowerCAmelCase__ : str=4 , lowerCAmelCase__ : int="last" , lowerCAmelCase__ : Optional[int]=None , lowerCAmelCase__ : Tuple=None , ): SCREAMING_SNAKE_CASE_: List[str] = parent SCREAMING_SNAKE_CASE_: List[str] = batch_size SCREAMING_SNAKE_CASE_: Dict = seq_length SCREAMING_SNAKE_CASE_: Any = is_training SCREAMING_SNAKE_CASE_: List[str] = use_input_lengths SCREAMING_SNAKE_CASE_: str = use_token_type_ids SCREAMING_SNAKE_CASE_: Union[str, Any] = use_labels SCREAMING_SNAKE_CASE_: Tuple = gelu_activation SCREAMING_SNAKE_CASE_: Optional[Any] = sinusoidal_embeddings SCREAMING_SNAKE_CASE_: Any = causal SCREAMING_SNAKE_CASE_: Optional[Any] = asm SCREAMING_SNAKE_CASE_: Dict = n_langs SCREAMING_SNAKE_CASE_: Optional[Any] = vocab_size SCREAMING_SNAKE_CASE_: Dict = n_special SCREAMING_SNAKE_CASE_: List[str] = hidden_size SCREAMING_SNAKE_CASE_: Tuple = num_hidden_layers SCREAMING_SNAKE_CASE_: str = num_attention_heads SCREAMING_SNAKE_CASE_: Tuple = hidden_dropout_prob SCREAMING_SNAKE_CASE_: Dict = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_: List[Any] = max_position_embeddings SCREAMING_SNAKE_CASE_: Optional[int] = type_vocab_size SCREAMING_SNAKE_CASE_: Union[str, Any] = type_sequence_label_size SCREAMING_SNAKE_CASE_: Optional[int] = initializer_range SCREAMING_SNAKE_CASE_: Optional[Any] = num_labels SCREAMING_SNAKE_CASE_: Dict = num_choices SCREAMING_SNAKE_CASE_: Optional[Any] = summary_type SCREAMING_SNAKE_CASE_: Any = use_proj SCREAMING_SNAKE_CASE_: int = scope def _SCREAMING_SNAKE_CASE ( self : List[str]): SCREAMING_SNAKE_CASE_: int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) SCREAMING_SNAKE_CASE_: Optional[Any] = random_attention_mask([self.batch_size, self.seq_length]) SCREAMING_SNAKE_CASE_: List[Any] = None if self.use_input_lengths: SCREAMING_SNAKE_CASE_: Optional[Any] = ( ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2 ) # small variation of seq_length SCREAMING_SNAKE_CASE_: Optional[int] = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE_: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs) SCREAMING_SNAKE_CASE_: List[Any] = None SCREAMING_SNAKE_CASE_: List[str] = None SCREAMING_SNAKE_CASE_: Dict = None if self.use_labels: SCREAMING_SNAKE_CASE_: Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size) SCREAMING_SNAKE_CASE_: Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) SCREAMING_SNAKE_CASE_: int = ids_tensor([self.batch_size] , 2).float() SCREAMING_SNAKE_CASE_: int = ids_tensor([self.batch_size] , self.num_choices) SCREAMING_SNAKE_CASE_: Optional[int] = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def _SCREAMING_SNAKE_CASE ( self : List[str]): return FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , ) def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any] , ): SCREAMING_SNAKE_CASE_: Optional[int] = FlaubertModel(config=lowerCAmelCase__) model.to(lowerCAmelCase__) model.eval() SCREAMING_SNAKE_CASE_: Dict = model(lowerCAmelCase__ , lengths=lowerCAmelCase__ , langs=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = model(lowerCAmelCase__ , langs=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = model(lowerCAmelCase__) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str , ): SCREAMING_SNAKE_CASE_: str = FlaubertWithLMHeadModel(lowerCAmelCase__) model.to(lowerCAmelCase__) model.eval() SCREAMING_SNAKE_CASE_: int = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__) self.parent.assertEqual(result.loss.shape , ()) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any , ): SCREAMING_SNAKE_CASE_: List[str] = FlaubertForQuestionAnsweringSimple(lowerCAmelCase__) model.to(lowerCAmelCase__) model.eval() SCREAMING_SNAKE_CASE_: Dict = model(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = model(lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict , ): SCREAMING_SNAKE_CASE_: str = FlaubertForQuestionAnswering(lowerCAmelCase__) model.to(lowerCAmelCase__) model.eval() SCREAMING_SNAKE_CASE_: List[str] = model(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[int] = model( lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , cls_index=lowerCAmelCase__ , is_impossible=lowerCAmelCase__ , p_mask=lowerCAmelCase__ , ) SCREAMING_SNAKE_CASE_: str = model( lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , cls_index=lowerCAmelCase__ , is_impossible=lowerCAmelCase__ , ) ((SCREAMING_SNAKE_CASE_) , ): Optional[Any] = result_with_labels.to_tuple() SCREAMING_SNAKE_CASE_: int = model(lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__) ((SCREAMING_SNAKE_CASE_) , ): str = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , ()) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top)) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top)) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top)) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top)) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,)) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Union[str, Any] , ): SCREAMING_SNAKE_CASE_: Optional[int] = FlaubertForSequenceClassification(lowerCAmelCase__) model.to(lowerCAmelCase__) model.eval() SCREAMING_SNAKE_CASE_: int = model(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = model(lowerCAmelCase__ , labels=lowerCAmelCase__) self.parent.assertEqual(result.loss.shape , ()) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple , ): SCREAMING_SNAKE_CASE_: str = self.num_labels SCREAMING_SNAKE_CASE_: int = FlaubertForTokenClassification(lowerCAmelCase__) model.to(lowerCAmelCase__) model.eval() SCREAMING_SNAKE_CASE_: str = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict , ): SCREAMING_SNAKE_CASE_: Tuple = self.num_choices SCREAMING_SNAKE_CASE_: str = FlaubertForMultipleChoice(config=lowerCAmelCase__) model.to(lowerCAmelCase__) model.eval() SCREAMING_SNAKE_CASE_: Union[str, Any] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() SCREAMING_SNAKE_CASE_: int = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() SCREAMING_SNAKE_CASE_: List[str] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() SCREAMING_SNAKE_CASE_: List[Any] = model( lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): SCREAMING_SNAKE_CASE_: Dict = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ): Any = config_and_inputs SCREAMING_SNAKE_CASE_: Any = { "input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths, "attention_mask": input_mask, } return config, inputs_dict @require_torch class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" _UpperCAmelCase : str = ( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) _UpperCAmelCase : Union[str, Any] = ( { '''feature-extraction''': FlaubertModel, '''fill-mask''': FlaubertWithLMHeadModel, '''question-answering''': FlaubertForQuestionAnsweringSimple, '''text-classification''': FlaubertForSequenceClassification, '''token-classification''': FlaubertForTokenClassification, '''zero-shot''': FlaubertForSequenceClassification, } if is_torch_available() else {} ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast") ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple=False): SCREAMING_SNAKE_CASE_: List[Any] = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": SCREAMING_SNAKE_CASE_: int = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Any = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__) return inputs_dict def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): SCREAMING_SNAKE_CASE_: Optional[int] = FlaubertModelTester(self) SCREAMING_SNAKE_CASE_: Tuple = ConfigTester(self , config_class=lowerCAmelCase__ , emb_dim=37) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : int): SCREAMING_SNAKE_CASE_: Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Any): SCREAMING_SNAKE_CASE_: List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Tuple): SCREAMING_SNAKE_CASE_: str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Tuple): SCREAMING_SNAKE_CASE_: Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : List[Any]): SCREAMING_SNAKE_CASE_: Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): SCREAMING_SNAKE_CASE_: Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Any): SCREAMING_SNAKE_CASE_: Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*lowerCAmelCase__) @slow def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE_: str = FlaubertModel.from_pretrained(lowerCAmelCase__) self.assertIsNotNone(lowerCAmelCase__) @slow @require_torch_gpu def _SCREAMING_SNAKE_CASE ( self : Optional[int]): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return SCREAMING_SNAKE_CASE_: str = True SCREAMING_SNAKE_CASE_: Tuple = model_class(config=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = torch.jit.trace( lowerCAmelCase__ , (inputs_dict["input_ids"].to("cpu"), inputs_dict["attention_mask"].to("cpu"))) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , "traced_model.pt")) SCREAMING_SNAKE_CASE_: int = torch.jit.load(os.path.join(lowerCAmelCase__ , "traced_model.pt") , map_location=lowerCAmelCase__) loaded(inputs_dict["input_ids"].to(lowerCAmelCase__) , inputs_dict["attention_mask"].to(lowerCAmelCase__)) @require_torch class __lowercase ( unittest.TestCase ): """simple docstring""" @slow def _SCREAMING_SNAKE_CASE ( self : Tuple): SCREAMING_SNAKE_CASE_: Optional[Any] = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased") SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]]) with torch.no_grad(): SCREAMING_SNAKE_CASE_: int = model(lowerCAmelCase__)[0] SCREAMING_SNAKE_CASE_: Dict = torch.Size((1, 11, 768)) self.assertEqual(output.shape , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: int = torch.tensor( [[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]]) self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=1E-4))
671
import re def A_ ( _UpperCAmelCase ): return [char.split() for char in re.split(R"[^ a-z A-Z 0-9 \s]" , str_ )] def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: int = split_input(str_ ) return "".join( ["".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] ) def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): try: SCREAMING_SNAKE_CASE_: List[Any] = split_input(_UpperCAmelCase ) if upper: SCREAMING_SNAKE_CASE_: List[str] = "".join( [ separator.join([char.upper() for char in sub_str] ) for sub_str in string_split ] ) else: SCREAMING_SNAKE_CASE_: Optional[int] = "".join( [ separator.join([char.lower() for char in sub_str] ) for sub_str in string_split ] ) return res_str except IndexError: return "not valid string" def A_ ( _UpperCAmelCase ): return to_simple_case(_UpperCAmelCase ) def A_ ( _UpperCAmelCase ): try: SCREAMING_SNAKE_CASE_: Optional[int] = to_simple_case(_UpperCAmelCase ) return res_str[0].lower() + res_str[1:] except IndexError: return "not valid string" def A_ ( _UpperCAmelCase , _UpperCAmelCase ): return to_complex_case(_UpperCAmelCase , _UpperCAmelCase , "_" ) def A_ ( _UpperCAmelCase , _UpperCAmelCase ): return to_complex_case(_UpperCAmelCase , _UpperCAmelCase , "-" ) if __name__ == "__main__": __import__("""doctest""").testmod()
671
1
import functools def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: int = len(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Optional[int] = len(_UpperCAmelCase ) @functools.cache def min_distance(_UpperCAmelCase , _UpperCAmelCase ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa SCREAMING_SNAKE_CASE_: int = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , _UpperCAmelCase ) , 1 + min_distance(_UpperCAmelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
671
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : List[Any] = '''upernet''' def __init__( self : Any , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : List[str]=512 , lowerCAmelCase__ : Any=0.02 , lowerCAmelCase__ : str=[1, 2, 3, 6] , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : Dict=0.4 , lowerCAmelCase__ : int=384 , lowerCAmelCase__ : Union[str, Any]=256 , lowerCAmelCase__ : Any=1 , lowerCAmelCase__ : Tuple=False , lowerCAmelCase__ : List[str]=255 , **lowerCAmelCase__ : List[str] , ): super().__init__(**lowerCAmelCase__) if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.") SCREAMING_SNAKE_CASE_: Dict = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"]) elif isinstance(lowerCAmelCase__ , lowerCAmelCase__): SCREAMING_SNAKE_CASE_: str = backbone_config.get("model_type") SCREAMING_SNAKE_CASE_: str = CONFIG_MAPPING[backbone_model_type] SCREAMING_SNAKE_CASE_: Tuple = config_class.from_dict(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: str = backbone_config SCREAMING_SNAKE_CASE_: Optional[Any] = hidden_size SCREAMING_SNAKE_CASE_: Dict = initializer_range SCREAMING_SNAKE_CASE_: Any = pool_scales SCREAMING_SNAKE_CASE_: Optional[Any] = use_auxiliary_head SCREAMING_SNAKE_CASE_: str = auxiliary_loss_weight SCREAMING_SNAKE_CASE_: List[Any] = auxiliary_in_channels SCREAMING_SNAKE_CASE_: Union[str, Any] = auxiliary_channels SCREAMING_SNAKE_CASE_: Dict = auxiliary_num_convs SCREAMING_SNAKE_CASE_: str = auxiliary_concat_input SCREAMING_SNAKE_CASE_: Dict = loss_ignore_index def _SCREAMING_SNAKE_CASE ( self : Tuple): SCREAMING_SNAKE_CASE_: Tuple = copy.deepcopy(self.__dict__) SCREAMING_SNAKE_CASE_: int = self.backbone_config.to_dict() SCREAMING_SNAKE_CASE_: Optional[int] = self.__class__.model_type return output
671
1
from itertools import count def A_ ( _UpperCAmelCase = 50 ): SCREAMING_SNAKE_CASE_: Union[str, Any] = [1] * min_block_length for n in count(_UpperCAmelCase ): fill_count_functions.append(1 ) for block_length in range(_UpperCAmelCase , n + 1 ): for block_start in range(n - block_length ): fill_count_functions[n] += fill_count_functions[ n - block_start - block_length - 1 ] fill_count_functions[n] += 1 if fill_count_functions[n] > 1_00_00_00: break return n if __name__ == "__main__": print(f'''{solution() = }''')
671
import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class __lowercase ( unittest.TestCase ): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : Any): SCREAMING_SNAKE_CASE_: List[str] = torch.nn.Linear(10 , 10) SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.optim.SGD(model.parameters() , 0.1) SCREAMING_SNAKE_CASE_: Any = Accelerator() SCREAMING_SNAKE_CASE_: List[str] = accelerator.prepare(lowerCAmelCase__) try: pickle.loads(pickle.dumps(lowerCAmelCase__)) except Exception as e: self.fail(F"Accelerated optimizer pickling failed with {e}") AcceleratorState._reset_state()
671
1
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_xlnet import XLNetTokenizer else: lowerCAmelCase : Optional[int] = None lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) lowerCAmelCase : Union[str, Any] = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} lowerCAmelCase : Tuple = { """vocab_file""": { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""", }, """tokenizer_file""": { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""", }, } lowerCAmelCase : Optional[Any] = { """xlnet-base-cased""": None, """xlnet-large-cased""": None, } lowerCAmelCase : Union[str, Any] = """▁""" # Segments (not really needed) lowerCAmelCase : Optional[int] = 0 lowerCAmelCase : Dict = 1 lowerCAmelCase : Tuple = 2 lowerCAmelCase : str = 3 lowerCAmelCase : int = 4 class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Dict = VOCAB_FILES_NAMES _UpperCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase : List[str] = '''left''' _UpperCAmelCase : Any = XLNetTokenizer def __init__( self : List[Any] , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : Dict=None , lowerCAmelCase__ : Any=False , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : Optional[Any]="<s>" , lowerCAmelCase__ : List[Any]="</s>" , lowerCAmelCase__ : str="<unk>" , lowerCAmelCase__ : List[str]="<sep>" , lowerCAmelCase__ : Optional[Any]="<pad>" , lowerCAmelCase__ : Optional[int]="<cls>" , lowerCAmelCase__ : List[Any]="<mask>" , lowerCAmelCase__ : Dict=["<eop>", "<eod>"] , **lowerCAmelCase__ : List[Any] , ): # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE_: Optional[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else mask_token super().__init__( vocab_file=lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , remove_space=lowerCAmelCase__ , keep_accents=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , **lowerCAmelCase__ , ) SCREAMING_SNAKE_CASE_: Optional[Any] = 3 SCREAMING_SNAKE_CASE_: List[Any] = do_lower_case SCREAMING_SNAKE_CASE_: Union[str, Any] = remove_space SCREAMING_SNAKE_CASE_: List[Any] = keep_accents SCREAMING_SNAKE_CASE_: int = vocab_file SCREAMING_SNAKE_CASE_: Dict = False if not self.vocab_file else True def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None): SCREAMING_SNAKE_CASE_: Tuple = [self.sep_token_id] SCREAMING_SNAKE_CASE_: Dict = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None): SCREAMING_SNAKE_CASE_: str = [self.sep_token_id] SCREAMING_SNAKE_CASE_: Union[str, Any] = [2] if token_ids_a is None: return len(token_ids_a + sep) * [0] + cls_segment_id return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None): if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer.") if not os.path.isdir(lowerCAmelCase__): logger.error(F"Vocabulary path ({save_directory}) should be a directory") return SCREAMING_SNAKE_CASE_: str = os.path.join( lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__): copyfile(self.vocab_file , lowerCAmelCase__) return (out_vocab_file,)
671
from itertools import count def A_ ( _UpperCAmelCase = 50 ): SCREAMING_SNAKE_CASE_: Union[str, Any] = [1] * min_block_length for n in count(_UpperCAmelCase ): fill_count_functions.append(1 ) for block_length in range(_UpperCAmelCase , n + 1 ): for block_start in range(n - block_length ): fill_count_functions[n] += fill_count_functions[ n - block_start - block_length - 1 ] fill_count_functions[n] += 1 if fill_count_functions[n] > 1_00_00_00: break return n if __name__ == "__main__": print(f'''{solution() = }''')
671
1
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) lowerCAmelCase : Tuple = """▁""" lowerCAmelCase : Optional[int] = {"""vocab_file""": """sentencepiece.bpe.model"""} lowerCAmelCase : int = { """vocab_file""": { """facebook/nllb-200-distilled-600M""": ( """https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model""" ), } } lowerCAmelCase : Tuple = { """facebook/nllb-200-distilled-600M""": 1024, } # fmt: off lowerCAmelCase : Optional[int] = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""] class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : List[Any] = VOCAB_FILES_NAMES _UpperCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase : Any = ['''input_ids''', '''attention_mask'''] _UpperCAmelCase : List[int] = [] _UpperCAmelCase : List[int] = [] def __init__( self : Tuple , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Tuple="<s>" , lowerCAmelCase__ : Any="</s>" , lowerCAmelCase__ : int="</s>" , lowerCAmelCase__ : Tuple="<s>" , lowerCAmelCase__ : Optional[int]="<unk>" , lowerCAmelCase__ : Any="<pad>" , lowerCAmelCase__ : Optional[Any]="<mask>" , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : Optional[int]=None , lowerCAmelCase__ : str=None , lowerCAmelCase__ : Optional[Dict[str, Any]] = None , lowerCAmelCase__ : int=None , lowerCAmelCase__ : Union[str, Any]=False , **lowerCAmelCase__ : Optional[Any] , ): # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE_: List[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else mask_token SCREAMING_SNAKE_CASE_: Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs SCREAMING_SNAKE_CASE_: Dict = legacy_behaviour super().__init__( bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , src_lang=lowerCAmelCase__ , tgt_lang=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=lowerCAmelCase__ , **lowerCAmelCase__ , ) SCREAMING_SNAKE_CASE_: List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(str(lowerCAmelCase__)) SCREAMING_SNAKE_CASE_: List[Any] = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' # spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s' # Mimic fairseq token-to-id alignment for the first 4 token SCREAMING_SNAKE_CASE_: int = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab SCREAMING_SNAKE_CASE_: Dict = 1 SCREAMING_SNAKE_CASE_: Optional[Any] = len(self.sp_model) SCREAMING_SNAKE_CASE_: Optional[int] = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCAmelCase__) } SCREAMING_SNAKE_CASE_: List[Any] = {v: k for k, v in self.lang_code_to_id.items()} SCREAMING_SNAKE_CASE_: Optional[int] = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id) SCREAMING_SNAKE_CASE_: Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()} SCREAMING_SNAKE_CASE_: Tuple = list(self.lang_code_to_id.keys()) if additional_special_tokens is not None: # Only add those special tokens if they are not already there. self._additional_special_tokens.extend( [t for t in additional_special_tokens if t not in self._additional_special_tokens]) SCREAMING_SNAKE_CASE_: List[Any] = src_lang if src_lang is not None else "eng_Latn" SCREAMING_SNAKE_CASE_: Any = self.lang_code_to_id[self._src_lang] SCREAMING_SNAKE_CASE_: Optional[int] = tgt_lang self.set_src_lang_special_tokens(self._src_lang) def __getstate__( self : Any): SCREAMING_SNAKE_CASE_: Any = self.__dict__.copy() SCREAMING_SNAKE_CASE_: Union[str, Any] = None SCREAMING_SNAKE_CASE_: Dict = self.sp_model.serialized_model_proto() return state def __setstate__( self : str , lowerCAmelCase__ : Any): SCREAMING_SNAKE_CASE_: Tuple = d # for backward compatibility if not hasattr(self , "sp_model_kwargs"): SCREAMING_SNAKE_CASE_: List[Any] = {} SCREAMING_SNAKE_CASE_: Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.LoadFromSerializedProto(self.sp_model_proto) @property def _SCREAMING_SNAKE_CASE ( self : Optional[int]): return len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def _SCREAMING_SNAKE_CASE ( self : Optional[int]): return self._src_lang @src_lang.setter def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : str): SCREAMING_SNAKE_CASE_: List[str] = new_src_lang self.set_src_lang_special_tokens(self._src_lang) def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Dict = [1] * len(self.prefix_tokens) SCREAMING_SNAKE_CASE_: Tuple = [1] * len(self.suffix_tokens) if token_ids_a is None: return prefix_ones + ([0] * len(lowerCAmelCase__)) + suffix_ones return prefix_ones + ([0] * len(lowerCAmelCase__)) + ([0] * len(lowerCAmelCase__)) + suffix_ones def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None): if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None): SCREAMING_SNAKE_CASE_: str = [self.sep_token_id] SCREAMING_SNAKE_CASE_: Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] , lowerCAmelCase__ : Optional[str] , **lowerCAmelCase__ : int): if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model") SCREAMING_SNAKE_CASE_: str = src_lang SCREAMING_SNAKE_CASE_: Tuple = self(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Any = self.convert_tokens_to_ids(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[Any] = tgt_lang_id return inputs def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): SCREAMING_SNAKE_CASE_: Dict = {self.convert_ids_to_tokens(lowerCAmelCase__): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : str): return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Tuple): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] SCREAMING_SNAKE_CASE_: Dict = self.sp_model.PieceToId(lowerCAmelCase__) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : str): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Optional[int]): SCREAMING_SNAKE_CASE_: Optional[Any] = "".join(lowerCAmelCase__).replace(lowerCAmelCase__ , " ").strip() return out_string def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None): if not os.path.isdir(lowerCAmelCase__): logger.error(F"Vocabulary path ({save_directory}) should be a directory") return SCREAMING_SNAKE_CASE_: List[str] = os.path.join( lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , lowerCAmelCase__) elif not os.path.isfile(self.vocab_file): with open(lowerCAmelCase__ , "wb") as fi: SCREAMING_SNAKE_CASE_: Optional[int] = self.sp_model.serialized_model_proto() fi.write(lowerCAmelCase__) return (out_vocab_file,) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str = "eng_Latn" , lowerCAmelCase__ : Optional[List[str]] = None , lowerCAmelCase__ : str = "fra_Latn" , **lowerCAmelCase__ : int , ): SCREAMING_SNAKE_CASE_: Dict = src_lang SCREAMING_SNAKE_CASE_: Union[str, Any] = tgt_lang return super().prepare_seqaseq_batch(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): return self.set_src_lang_special_tokens(self.src_lang) def _SCREAMING_SNAKE_CASE ( self : Tuple): return self.set_tgt_lang_special_tokens(self.tgt_lang) def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : int): SCREAMING_SNAKE_CASE_: Optional[int] = self.lang_code_to_id[src_lang] if self.legacy_behaviour: SCREAMING_SNAKE_CASE_: Any = [] SCREAMING_SNAKE_CASE_: Optional[int] = [self.eos_token_id, self.cur_lang_code] else: SCREAMING_SNAKE_CASE_: str = [self.cur_lang_code] SCREAMING_SNAKE_CASE_: str = [self.eos_token_id] def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : str): SCREAMING_SNAKE_CASE_: str = self.lang_code_to_id[lang] if self.legacy_behaviour: SCREAMING_SNAKE_CASE_: Union[str, Any] = [] SCREAMING_SNAKE_CASE_: str = [self.eos_token_id, self.cur_lang_code] else: SCREAMING_SNAKE_CASE_: int = [self.cur_lang_code] SCREAMING_SNAKE_CASE_: Any = [self.eos_token_id]
671
def A_ ( _UpperCAmelCase ): if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise TypeError("only integers accepted as input" ) else: SCREAMING_SNAKE_CASE_: List[Any] = str(abs(_UpperCAmelCase ) ) SCREAMING_SNAKE_CASE_: Tuple = [list(_UpperCAmelCase ) for char in range(len(_UpperCAmelCase ) )] for index in range(len(_UpperCAmelCase ) ): num_transpositions[index].pop(_UpperCAmelCase ) return max( int("".join(list(_UpperCAmelCase ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__("""doctest""").testmod()
671
1
import random def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Union[str, Any] = a[left_index] SCREAMING_SNAKE_CASE_: Any = left_index + 1 for j in range(left_index + 1 , _UpperCAmelCase ): if a[j] < pivot: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = a[i], a[j] i += 1 SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = a[i - 1], a[left_index] return i - 1 def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): if left < right: SCREAMING_SNAKE_CASE_: List[Any] = random.randint(_UpperCAmelCase , right - 1 ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = ( a[left], a[pivot], ) # switches the pivot with the left most bound SCREAMING_SNAKE_CASE_: List[Any] = partition(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) quick_sort_random( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # recursive quicksort to the left of the pivot point quick_sort_random( _UpperCAmelCase , pivot_index + 1 , _UpperCAmelCase ) # recursive quicksort to the right of the pivot point def A_ ( ): SCREAMING_SNAKE_CASE_: int = input("Enter numbers separated by a comma:\n" ).strip() SCREAMING_SNAKE_CASE_: str = [int(_UpperCAmelCase ) for item in user_input.split("," )] quick_sort_random(_UpperCAmelCase , 0 , len(_UpperCAmelCase ) ) print(_UpperCAmelCase ) if __name__ == "__main__": main()
671
from __future__ import annotations from collections.abc import Iterator from typing import Any class __lowercase : """simple docstring""" def __init__( self : List[str] , lowerCAmelCase__ : Any): SCREAMING_SNAKE_CASE_: Any = data SCREAMING_SNAKE_CASE_: Node | None = None class __lowercase : """simple docstring""" def __init__( self : int): SCREAMING_SNAKE_CASE_: Dict = None SCREAMING_SNAKE_CASE_: str = None def __iter__( self : List[str]): SCREAMING_SNAKE_CASE_: Tuple = self.head while self.head: yield node.data SCREAMING_SNAKE_CASE_: List[str] = node.next if node == self.head: break def __len__( self : Dict): return sum(1 for _ in self) def __repr__( self : Dict): return "->".join(str(lowerCAmelCase__) for item in iter(self)) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Any): self.insert_nth(len(self) , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Any): self.insert_nth(0 , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Any): if index < 0 or index > len(self): raise IndexError("list index out of range.") SCREAMING_SNAKE_CASE_: Any = Node(lowerCAmelCase__) if self.head is None: SCREAMING_SNAKE_CASE_: str = new_node # first node points itself SCREAMING_SNAKE_CASE_: Optional[Any] = new_node elif index == 0: # insert at head SCREAMING_SNAKE_CASE_: Optional[Any] = self.head SCREAMING_SNAKE_CASE_: str = new_node else: SCREAMING_SNAKE_CASE_: int = self.head for _ in range(index - 1): SCREAMING_SNAKE_CASE_: Optional[Any] = temp.next SCREAMING_SNAKE_CASE_: List[str] = temp.next SCREAMING_SNAKE_CASE_: int = new_node if index == len(self) - 1: # insert at tail SCREAMING_SNAKE_CASE_: Any = new_node def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): return self.delete_nth(0) def _SCREAMING_SNAKE_CASE ( self : Any): return self.delete_nth(len(self) - 1) def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : int = 0): if not 0 <= index < len(self): raise IndexError("list index out of range.") SCREAMING_SNAKE_CASE_: Optional[Any] = self.head if self.head == self.tail: # just one node SCREAMING_SNAKE_CASE_: List[str] = None elif index == 0: # delete head node SCREAMING_SNAKE_CASE_: int = self.tail.next.next SCREAMING_SNAKE_CASE_: Tuple = self.head.next else: SCREAMING_SNAKE_CASE_: Optional[int] = self.head for _ in range(index - 1): SCREAMING_SNAKE_CASE_: Any = temp.next SCREAMING_SNAKE_CASE_: Optional[Any] = temp.next SCREAMING_SNAKE_CASE_: int = temp.next.next if index == len(self) - 1: # delete at tail SCREAMING_SNAKE_CASE_: int = temp return delete_node.data def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): return len(self) == 0 def A_ ( ): SCREAMING_SNAKE_CASE_: Dict = CircularLinkedList() assert len(_UpperCAmelCase ) == 0 assert circular_linked_list.is_empty() is True assert str(_UpperCAmelCase ) == "" try: circular_linked_list.delete_front() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_tail() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_nth(-1 ) raise AssertionError except IndexError: assert True try: circular_linked_list.delete_nth(0 ) raise AssertionError except IndexError: assert True assert circular_linked_list.is_empty() is True for i in range(5 ): assert len(_UpperCAmelCase ) == i circular_linked_list.insert_nth(_UpperCAmelCase , i + 1 ) assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 6 ) ) circular_linked_list.insert_tail(6 ) assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 7 ) ) circular_linked_list.insert_head(0 ) assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(0 , 7 ) ) assert circular_linked_list.delete_front() == 0 assert circular_linked_list.delete_tail() == 6 assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 6 ) ) assert circular_linked_list.delete_nth(2 ) == 3 circular_linked_list.insert_nth(2 , 3 ) assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 6 ) ) assert circular_linked_list.is_empty() is False if __name__ == "__main__": import doctest doctest.testmod()
671
1
lowerCAmelCase : Optional[Any] = frozenset( [ """prompt""", """height""", """width""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", """cross_attention_kwargs""", ] ) lowerCAmelCase : List[str] = frozenset(["""prompt""", """negative_prompt"""]) lowerCAmelCase : Any = frozenset([]) lowerCAmelCase : Any = frozenset(["""image"""]) lowerCAmelCase : List[str] = frozenset( [ """image""", """height""", """width""", """guidance_scale""", ] ) lowerCAmelCase : Any = frozenset(["""image"""]) lowerCAmelCase : Dict = frozenset( [ """prompt""", """image""", """height""", """width""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", ] ) lowerCAmelCase : Union[str, Any] = frozenset(["""prompt""", """image""", """negative_prompt"""]) lowerCAmelCase : List[str] = frozenset( [ # Text guided image variation with an image mask """prompt""", """image""", """mask_image""", """height""", """width""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", ] ) lowerCAmelCase : Dict = frozenset(["""prompt""", """image""", """mask_image""", """negative_prompt"""]) lowerCAmelCase : Union[str, Any] = frozenset( [ # image variation with an image mask """image""", """mask_image""", """height""", """width""", """guidance_scale""", ] ) lowerCAmelCase : str = frozenset(["""image""", """mask_image"""]) lowerCAmelCase : int = frozenset( [ """example_image""", """image""", """mask_image""", """height""", """width""", """guidance_scale""", ] ) lowerCAmelCase : int = frozenset(["""example_image""", """image""", """mask_image"""]) lowerCAmelCase : Union[str, Any] = frozenset(["""class_labels"""]) lowerCAmelCase : Optional[int] = frozenset(["""class_labels"""]) lowerCAmelCase : Optional[Any] = frozenset(["""batch_size"""]) lowerCAmelCase : Union[str, Any] = frozenset([]) lowerCAmelCase : List[Any] = frozenset(["""batch_size"""]) lowerCAmelCase : Optional[int] = frozenset([]) lowerCAmelCase : Dict = frozenset( [ """prompt""", """audio_length_in_s""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", """cross_attention_kwargs""", ] ) lowerCAmelCase : int = frozenset(["""prompt""", """negative_prompt"""]) lowerCAmelCase : Optional[int] = frozenset(["""input_tokens"""]) lowerCAmelCase : int = frozenset(["""input_tokens"""])
671
from collections import defaultdict from math import ceil, sqrt def A_ ( _UpperCAmelCase = 1_00_00_00 , _UpperCAmelCase = 10 ): SCREAMING_SNAKE_CASE_: defaultdict = defaultdict(_UpperCAmelCase ) for outer_width in range(3 , (t_limit // 4) + 2 ): if outer_width * outer_width > t_limit: SCREAMING_SNAKE_CASE_: Tuple = max( ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 ) else: SCREAMING_SNAKE_CASE_: Optional[Any] = 1 hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2 for hole_width in range(_UpperCAmelCase , outer_width - 1 , 2 ): count[outer_width * outer_width - hole_width * hole_width] += 1 return sum(1 for n in count.values() if 1 <= n <= 10 ) if __name__ == "__main__": print(f'''{solution() = }''')
671
1
from collections import defaultdict from math import ceil, sqrt def A_ ( _UpperCAmelCase = 1_00_00_00 , _UpperCAmelCase = 10 ): SCREAMING_SNAKE_CASE_: defaultdict = defaultdict(_UpperCAmelCase ) for outer_width in range(3 , (t_limit // 4) + 2 ): if outer_width * outer_width > t_limit: SCREAMING_SNAKE_CASE_: Tuple = max( ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 ) else: SCREAMING_SNAKE_CASE_: Optional[Any] = 1 hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2 for hole_width in range(_UpperCAmelCase , outer_width - 1 , 2 ): count[outer_width * outer_width - hole_width * hole_width] += 1 return sum(1 for n in count.values() if 1 <= n <= 10 ) if __name__ == "__main__": print(f'''{solution() = }''')
671
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowerCAmelCase : str = { """configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""], """tokenization_xlm""": ["""XLMTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Dict = [ """XLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """XLMForMultipleChoice""", """XLMForQuestionAnswering""", """XLMForQuestionAnsweringSimple""", """XLMForSequenceClassification""", """XLMForTokenClassification""", """XLMModel""", """XLMPreTrainedModel""", """XLMWithLMHeadModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[str] = [ """TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFXLMForMultipleChoice""", """TFXLMForQuestionAnsweringSimple""", """TFXLMForSequenceClassification""", """TFXLMForTokenClassification""", """TFXLMMainLayer""", """TFXLMModel""", """TFXLMPreTrainedModel""", """TFXLMWithLMHeadModel""", ] if TYPE_CHECKING: from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig from .tokenization_xlm import XLMTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm import ( XLM_PRETRAINED_MODEL_ARCHIVE_LIST, XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMPreTrainedModel, XLMWithLMHeadModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm import ( TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, TFXLMForTokenClassification, TFXLMMainLayer, TFXLMModel, TFXLMPreTrainedModel, TFXLMWithLMHeadModel, ) else: import sys lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
671
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available lowerCAmelCase : Dict = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[Any] = ["""MLukeTokenizer"""] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mluke import MLukeTokenizer else: import sys lowerCAmelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
671
lowerCAmelCase : List[str] = { """A""": ["""B""", """C""", """E"""], """B""": ["""A""", """D""", """E"""], """C""": ["""A""", """F""", """G"""], """D""": ["""B"""], """E""": ["""A""", """B""", """D"""], """F""": ["""C"""], """G""": ["""C"""], } def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Any = set() # keep track of all the paths to be checked SCREAMING_SNAKE_CASE_: Tuple = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue SCREAMING_SNAKE_CASE_: List[Any] = queue.pop(0 ) # get the last node from the path SCREAMING_SNAKE_CASE_: Tuple = path[-1] if node not in explored: SCREAMING_SNAKE_CASE_: Union[str, Any] = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: SCREAMING_SNAKE_CASE_: int = list(_UpperCAmelCase ) new_path.append(_UpperCAmelCase ) queue.append(_UpperCAmelCase ) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(_UpperCAmelCase ) # in case there's no path between the 2 nodes return [] def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 SCREAMING_SNAKE_CASE_: List[Any] = [start] SCREAMING_SNAKE_CASE_: List[str] = set(_UpperCAmelCase ) # Keep tab on distances from `start` node. SCREAMING_SNAKE_CASE_: Union[str, Any] = {start: 0, target: -1} while queue: SCREAMING_SNAKE_CASE_: Dict = queue.pop(0 ) if node == target: SCREAMING_SNAKE_CASE_: Tuple = ( dist[node] if dist[target] == -1 else min(dist[target] , dist[node] ) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(_UpperCAmelCase ) queue.append(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Union[str, Any] = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, """G""", """D""")) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, """G""", """D""")) # returns 4
671
1
import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): # Initialise PyTorch model SCREAMING_SNAKE_CASE_: List[Any] = BertConfig.from_json_file(_UpperCAmelCase ) print(f"Building PyTorch model from configuration: {config}" ) SCREAMING_SNAKE_CASE_: Tuple = BertForPreTraining(_UpperCAmelCase ) # Load weights from tf checkpoint load_tf_weights_in_bert(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # Save pytorch-model print(f"Save PyTorch model to {pytorch_dump_path}" ) torch.save(model.state_dict() , _UpperCAmelCase ) if __name__ == "__main__": lowerCAmelCase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--bert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained BERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) lowerCAmelCase : Optional[Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
671
from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class __lowercase ( UpperCAmelCase_ ): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : float): return 0.0 def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: List[str] = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] ) SCREAMING_SNAKE_CASE_: Dict = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] ) return lowest, highest def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[int] = 5_12 SCREAMING_SNAKE_CASE_: str = [1] + [0] * (size - 1) SCREAMING_SNAKE_CASE_: Dict = [filter_type.process(_UpperCAmelCase ) for item in inputs] SCREAMING_SNAKE_CASE_: Optional[Any] = [0] * (samplerate - size) # zero-padding outputs += filler SCREAMING_SNAKE_CASE_: Tuple = np.abs(np.fft.fft(_UpperCAmelCase ) ) SCREAMING_SNAKE_CASE_: Optional[Any] = 20 * np.logaa(_UpperCAmelCase ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel("Frequency (Hz)" ) plt.xscale("log" ) # Display within reasonable bounds SCREAMING_SNAKE_CASE_: Any = get_bounds(_UpperCAmelCase , _UpperCAmelCase ) plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) ) plt.ylabel("Gain (dB)" ) plt.plot(_UpperCAmelCase ) plt.show() def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[int] = 5_12 SCREAMING_SNAKE_CASE_: Union[str, Any] = [1] + [0] * (size - 1) SCREAMING_SNAKE_CASE_: Dict = [filter_type.process(_UpperCAmelCase ) for item in inputs] SCREAMING_SNAKE_CASE_: int = [0] * (samplerate - size) # zero-padding outputs += filler SCREAMING_SNAKE_CASE_: Any = np.angle(np.fft.fft(_UpperCAmelCase ) ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel("Frequency (Hz)" ) plt.xscale("log" ) plt.ylim(-2 * pi , 2 * pi ) plt.ylabel("Phase shift (Radians)" ) plt.plot(np.unwrap(_UpperCAmelCase , -2 * pi ) ) plt.show()
671
1
class __lowercase : """simple docstring""" def __init__( self : int , lowerCAmelCase__ : Optional[int]): SCREAMING_SNAKE_CASE_: Optional[int] = val SCREAMING_SNAKE_CASE_: List[Any] = None SCREAMING_SNAKE_CASE_: List[Any] = None def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Optional[Any]): if self.val: if val < self.val: if self.left is None: SCREAMING_SNAKE_CASE_: List[Any] = Node(lowerCAmelCase__) else: self.left.insert(lowerCAmelCase__) elif val > self.val: if self.right is None: SCREAMING_SNAKE_CASE_: int = Node(lowerCAmelCase__) else: self.right.insert(lowerCAmelCase__) else: SCREAMING_SNAKE_CASE_: Optional[Any] = val def A_ ( _UpperCAmelCase , _UpperCAmelCase ): # Recursive traversal if root: inorder(root.left , _UpperCAmelCase ) res.append(root.val ) inorder(root.right , _UpperCAmelCase ) def A_ ( _UpperCAmelCase ): # Build BST if len(_UpperCAmelCase ) == 0: return arr SCREAMING_SNAKE_CASE_: Any = Node(arr[0] ) for i in range(1 , len(_UpperCAmelCase ) ): root.insert(arr[i] ) # Traverse BST in order. SCREAMING_SNAKE_CASE_: int = [] inorder(_UpperCAmelCase , _UpperCAmelCase ) return res if __name__ == "__main__": print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
671
from __future__ import annotations from math import ceil, floor, sqrt def A_ ( _UpperCAmelCase = 2_00_00_00 ): SCREAMING_SNAKE_CASE_: list[int] = [0] SCREAMING_SNAKE_CASE_: int for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ): triangle_numbers.append(triangle_numbers[-1] + idx ) # we want this to be as close as possible to target SCREAMING_SNAKE_CASE_: int = 0 # the area corresponding to the grid that gives the product closest to target SCREAMING_SNAKE_CASE_: int = 0 # an estimate of b, using the quadratic formula SCREAMING_SNAKE_CASE_: float # the largest integer less than b_estimate SCREAMING_SNAKE_CASE_: int # the largest integer less than b_estimate SCREAMING_SNAKE_CASE_: int # the triangle number corresponding to b_floor SCREAMING_SNAKE_CASE_: int # the triangle number corresponding to b_ceil SCREAMING_SNAKE_CASE_: int for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ): SCREAMING_SNAKE_CASE_: List[Any] = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2 SCREAMING_SNAKE_CASE_: Any = floor(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: List[str] = ceil(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Any = triangle_numbers[b_floor] SCREAMING_SNAKE_CASE_: List[Any] = triangle_numbers[b_ceil] if abs(target - triangle_b_first_guess * triangle_a ) < abs( target - best_product ): SCREAMING_SNAKE_CASE_: int = triangle_b_first_guess * triangle_a SCREAMING_SNAKE_CASE_: int = idx_a * b_floor if abs(target - triangle_b_second_guess * triangle_a ) < abs( target - best_product ): SCREAMING_SNAKE_CASE_: Optional[Any] = triangle_b_second_guess * triangle_a SCREAMING_SNAKE_CASE_: Tuple = idx_a * b_ceil return area if __name__ == "__main__": print(f'''{solution() = }''')
671
1
lowerCAmelCase : List[str] = { """A""": ["""B""", """C""", """E"""], """B""": ["""A""", """D""", """E"""], """C""": ["""A""", """F""", """G"""], """D""": ["""B"""], """E""": ["""A""", """B""", """D"""], """F""": ["""C"""], """G""": ["""C"""], } def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Any = set() # keep track of all the paths to be checked SCREAMING_SNAKE_CASE_: Tuple = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue SCREAMING_SNAKE_CASE_: List[Any] = queue.pop(0 ) # get the last node from the path SCREAMING_SNAKE_CASE_: Tuple = path[-1] if node not in explored: SCREAMING_SNAKE_CASE_: Union[str, Any] = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: SCREAMING_SNAKE_CASE_: int = list(_UpperCAmelCase ) new_path.append(_UpperCAmelCase ) queue.append(_UpperCAmelCase ) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(_UpperCAmelCase ) # in case there's no path between the 2 nodes return [] def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 SCREAMING_SNAKE_CASE_: List[Any] = [start] SCREAMING_SNAKE_CASE_: List[str] = set(_UpperCAmelCase ) # Keep tab on distances from `start` node. SCREAMING_SNAKE_CASE_: Union[str, Any] = {start: 0, target: -1} while queue: SCREAMING_SNAKE_CASE_: Dict = queue.pop(0 ) if node == target: SCREAMING_SNAKE_CASE_: Tuple = ( dist[node] if dist[target] == -1 else min(dist[target] , dist[node] ) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(_UpperCAmelCase ) queue.append(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Union[str, Any] = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, """G""", """D""")) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, """G""", """D""")) # returns 4
671
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase : Optional[int] = { """configuration_longformer""": [ """LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongformerConfig""", """LongformerOnnxConfig""", ], """tokenization_longformer""": ["""LongformerTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[str] = ["""LongformerTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Union[str, Any] = [ """LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """LongformerForMaskedLM""", """LongformerForMultipleChoice""", """LongformerForQuestionAnswering""", """LongformerForSequenceClassification""", """LongformerForTokenClassification""", """LongformerModel""", """LongformerPreTrainedModel""", """LongformerSelfAttention""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : int = [ """TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFLongformerForMaskedLM""", """TFLongformerForMultipleChoice""", """TFLongformerForQuestionAnswering""", """TFLongformerForSequenceClassification""", """TFLongformerForTokenClassification""", """TFLongformerModel""", """TFLongformerPreTrainedModel""", """TFLongformerSelfAttention""", ] if TYPE_CHECKING: from .configuration_longformer import ( LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig, LongformerOnnxConfig, ) from .tokenization_longformer import LongformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_longformer_fast import LongformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longformer import ( LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, LongformerPreTrainedModel, LongformerSelfAttention, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_longformer import ( TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFLongformerForMaskedLM, TFLongformerForMultipleChoice, TFLongformerForQuestionAnswering, TFLongformerForSequenceClassification, TFLongformerForTokenClassification, TFLongformerModel, TFLongformerPreTrainedModel, TFLongformerSelfAttention, ) else: import sys lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
671
1
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class __lowercase ( unittest.TestCase ): """simple docstring""" def __init__( self : Optional[int] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[Any]=7 , lowerCAmelCase__ : Dict=3 , lowerCAmelCase__ : Union[str, Any]=18 , lowerCAmelCase__ : List[str]=30 , lowerCAmelCase__ : Optional[int]=400 , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : str=None , lowerCAmelCase__ : List[Any]=True , lowerCAmelCase__ : Tuple=None , ): SCREAMING_SNAKE_CASE_: Optional[int] = size if size is not None else {"shortest_edge": 20} SCREAMING_SNAKE_CASE_: Any = crop_size if crop_size is not None else {"height": 18, "width": 18} SCREAMING_SNAKE_CASE_: str = parent SCREAMING_SNAKE_CASE_: Optional[int] = batch_size SCREAMING_SNAKE_CASE_: str = num_channels SCREAMING_SNAKE_CASE_: Union[str, Any] = image_size SCREAMING_SNAKE_CASE_: Dict = min_resolution SCREAMING_SNAKE_CASE_: str = max_resolution SCREAMING_SNAKE_CASE_: List[Any] = do_resize SCREAMING_SNAKE_CASE_: List[Any] = size SCREAMING_SNAKE_CASE_: Optional[Any] = do_center_crop SCREAMING_SNAKE_CASE_: Union[str, Any] = crop_size def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, } @require_torch @require_vision class __lowercase ( UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" _UpperCAmelCase : Optional[int] = MobileNetVaImageProcessor if is_vision_available() else None def _SCREAMING_SNAKE_CASE ( self : Optional[int]): SCREAMING_SNAKE_CASE_: List[Any] = MobileNetVaImageProcessingTester(self) @property def _SCREAMING_SNAKE_CASE ( self : int): return self.image_processor_tester.prepare_image_processor_dict() def _SCREAMING_SNAKE_CASE ( self : List[Any]): SCREAMING_SNAKE_CASE_: Union[str, Any] = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize")) self.assertTrue(hasattr(lowerCAmelCase__ , "size")) self.assertTrue(hasattr(lowerCAmelCase__ , "do_center_crop")) self.assertTrue(hasattr(lowerCAmelCase__ , "crop_size")) def _SCREAMING_SNAKE_CASE ( self : List[str]): SCREAMING_SNAKE_CASE_: Any = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {"shortest_edge": 20}) self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18}) SCREAMING_SNAKE_CASE_: str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84) self.assertEqual(image_processor.size , {"shortest_edge": 42}) self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84}) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): pass def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): # Initialize image_processing SCREAMING_SNAKE_CASE_: Optional[int] = self.image_processing_class(**self.image_processor_dict) # create random PIL images SCREAMING_SNAKE_CASE_: str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , Image.Image) # Test not batched input SCREAMING_SNAKE_CASE_: Optional[int] = image_processing(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched SCREAMING_SNAKE_CASE_: Tuple = image_processing(lowerCAmelCase__ , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def _SCREAMING_SNAKE_CASE ( self : int): # Initialize image_processing SCREAMING_SNAKE_CASE_: List[Any] = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors SCREAMING_SNAKE_CASE_: List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , np.ndarray) # Test not batched input SCREAMING_SNAKE_CASE_: List[Any] = image_processing(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched SCREAMING_SNAKE_CASE_: Union[str, Any] = image_processing(lowerCAmelCase__ , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def _SCREAMING_SNAKE_CASE ( self : int): # Initialize image_processing SCREAMING_SNAKE_CASE_: int = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors SCREAMING_SNAKE_CASE_: Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , torch.Tensor) # Test not batched input SCREAMING_SNAKE_CASE_: int = image_processing(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched SCREAMING_SNAKE_CASE_: Optional[Any] = image_processing(lowerCAmelCase__ , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , )
671
import argparse import os.path as osp import re import torch from safetensors.torch import load_file, save_file # =================# # UNet Conversion # # =================# lowerCAmelCase : Optional[int] = [ # (stable-diffusion, HF Diffusers) ("""time_embed.0.weight""", """time_embedding.linear_1.weight"""), ("""time_embed.0.bias""", """time_embedding.linear_1.bias"""), ("""time_embed.2.weight""", """time_embedding.linear_2.weight"""), ("""time_embed.2.bias""", """time_embedding.linear_2.bias"""), ("""input_blocks.0.0.weight""", """conv_in.weight"""), ("""input_blocks.0.0.bias""", """conv_in.bias"""), ("""out.0.weight""", """conv_norm_out.weight"""), ("""out.0.bias""", """conv_norm_out.bias"""), ("""out.2.weight""", """conv_out.weight"""), ("""out.2.bias""", """conv_out.bias"""), ] lowerCAmelCase : str = [ # (stable-diffusion, HF Diffusers) ("""in_layers.0""", """norm1"""), ("""in_layers.2""", """conv1"""), ("""out_layers.0""", """norm2"""), ("""out_layers.3""", """conv2"""), ("""emb_layers.1""", """time_emb_proj"""), ("""skip_connection""", """conv_shortcut"""), ] lowerCAmelCase : List[str] = [] # hardcoded number of downblocks and resnets/attentions... # would need smarter logic for other networks. for i in range(4): # loop over downblocks/upblocks for j in range(2): # loop over resnets/attentions for downblocks lowerCAmelCase : int = f'''down_blocks.{i}.resnets.{j}.''' lowerCAmelCase : List[str] = f'''input_blocks.{3*i + j + 1}.0.''' unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix)) if i < 3: # no attention layers in down_blocks.3 lowerCAmelCase : Any = f'''down_blocks.{i}.attentions.{j}.''' lowerCAmelCase : List[Any] = f'''input_blocks.{3*i + j + 1}.1.''' unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix)) for j in range(3): # loop over resnets/attentions for upblocks lowerCAmelCase : Any = f'''up_blocks.{i}.resnets.{j}.''' lowerCAmelCase : str = f'''output_blocks.{3*i + j}.0.''' unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix)) if i > 0: # no attention layers in up_blocks.0 lowerCAmelCase : List[Any] = f'''up_blocks.{i}.attentions.{j}.''' lowerCAmelCase : str = f'''output_blocks.{3*i + j}.1.''' unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix)) if i < 3: # no downsample in down_blocks.3 lowerCAmelCase : Any = f'''down_blocks.{i}.downsamplers.0.conv.''' lowerCAmelCase : Tuple = f'''input_blocks.{3*(i+1)}.0.op.''' unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix)) # no upsample in up_blocks.3 lowerCAmelCase : Tuple = f'''up_blocks.{i}.upsamplers.0.''' lowerCAmelCase : Tuple = f'''output_blocks.{3*i + 2}.{1 if i == 0 else 2}.''' unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix)) lowerCAmelCase : Any = """mid_block.attentions.0.""" lowerCAmelCase : Dict = """middle_block.1.""" unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix)) for j in range(2): lowerCAmelCase : int = f'''mid_block.resnets.{j}.''' lowerCAmelCase : Union[str, Any] = f'''middle_block.{2*j}.''' unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix)) def A_ ( _UpperCAmelCase ): # buyer beware: this is a *brittle* function, # and correct output requires that all of these pieces interact in # the exact order in which I have arranged them. SCREAMING_SNAKE_CASE_: Dict = {k: k for k in unet_state_dict.keys()} for sd_name, hf_name in unet_conversion_map: SCREAMING_SNAKE_CASE_: Optional[int] = sd_name for k, v in mapping.items(): if "resnets" in k: for sd_part, hf_part in unet_conversion_map_resnet: SCREAMING_SNAKE_CASE_: Any = v.replace(_UpperCAmelCase , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: str = v for k, v in mapping.items(): for sd_part, hf_part in unet_conversion_map_layer: SCREAMING_SNAKE_CASE_: Optional[Any] = v.replace(_UpperCAmelCase , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Optional[int] = v SCREAMING_SNAKE_CASE_: Optional[Any] = {v: unet_state_dict[k] for k, v in mapping.items()} return new_state_dict # ================# # VAE Conversion # # ================# lowerCAmelCase : Union[str, Any] = [ # (stable-diffusion, HF Diffusers) ("""nin_shortcut""", """conv_shortcut"""), ("""norm_out""", """conv_norm_out"""), ("""mid.attn_1.""", """mid_block.attentions.0."""), ] for i in range(4): # down_blocks have two resnets for j in range(2): lowerCAmelCase : Union[str, Any] = f'''encoder.down_blocks.{i}.resnets.{j}.''' lowerCAmelCase : Optional[Any] = f'''encoder.down.{i}.block.{j}.''' vae_conversion_map.append((sd_down_prefix, hf_down_prefix)) if i < 3: lowerCAmelCase : Dict = f'''down_blocks.{i}.downsamplers.0.''' lowerCAmelCase : List[str] = f'''down.{i}.downsample.''' vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix)) lowerCAmelCase : List[str] = f'''up_blocks.{i}.upsamplers.0.''' lowerCAmelCase : int = f'''up.{3-i}.upsample.''' vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix)) # up_blocks have three resnets # also, up blocks in hf are numbered in reverse from sd for j in range(3): lowerCAmelCase : Any = f'''decoder.up_blocks.{i}.resnets.{j}.''' lowerCAmelCase : int = f'''decoder.up.{3-i}.block.{j}.''' vae_conversion_map.append((sd_up_prefix, hf_up_prefix)) # this part accounts for mid blocks in both the encoder and the decoder for i in range(2): lowerCAmelCase : str = f'''mid_block.resnets.{i}.''' lowerCAmelCase : Tuple = f'''mid.block_{i+1}.''' vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix)) lowerCAmelCase : List[Any] = [ # (stable-diffusion, HF Diffusers) ("""norm.""", """group_norm."""), ("""q.""", """query."""), ("""k.""", """key."""), ("""v.""", """value."""), ("""proj_out.""", """proj_attn."""), ] def A_ ( _UpperCAmelCase ): # convert HF linear weights to SD conv2d weights return w.reshape(*w.shape , 1 , 1 ) def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[Any] = {k: k for k in vae_state_dict.keys()} for k, v in mapping.items(): for sd_part, hf_part in vae_conversion_map: SCREAMING_SNAKE_CASE_: Union[str, Any] = v.replace(_UpperCAmelCase , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Union[str, Any] = v for k, v in mapping.items(): if "attentions" in k: for sd_part, hf_part in vae_conversion_map_attn: SCREAMING_SNAKE_CASE_: Any = v.replace(_UpperCAmelCase , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: List[str] = v SCREAMING_SNAKE_CASE_: Tuple = {v: vae_state_dict[k] for k, v in mapping.items()} SCREAMING_SNAKE_CASE_: Union[str, Any] = ["q", "k", "v", "proj_out"] for k, v in new_state_dict.items(): for weight_name in weights_to_convert: if f"mid.attn_1.{weight_name}.weight" in k: print(f"Reshaping {k} for SD format" ) SCREAMING_SNAKE_CASE_: List[str] = reshape_weight_for_sd(_UpperCAmelCase ) return new_state_dict # =========================# # Text Encoder Conversion # # =========================# lowerCAmelCase : Optional[Any] = [ # (stable-diffusion, HF Diffusers) ("""resblocks.""", """text_model.encoder.layers."""), ("""ln_1""", """layer_norm1"""), ("""ln_2""", """layer_norm2"""), (""".c_fc.""", """.fc1."""), (""".c_proj.""", """.fc2."""), (""".attn""", """.self_attn"""), ("""ln_final.""", """transformer.text_model.final_layer_norm."""), ("""token_embedding.weight""", """transformer.text_model.embeddings.token_embedding.weight"""), ("""positional_embedding""", """transformer.text_model.embeddings.position_embedding.weight"""), ] lowerCAmelCase : Optional[Any] = {re.escape(x[1]): x[0] for x in textenc_conversion_lst} lowerCAmelCase : Optional[int] = re.compile("""|""".join(protected.keys())) # Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp lowerCAmelCase : str = {"""q""": 0, """k""": 1, """v""": 2} def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: str = {} SCREAMING_SNAKE_CASE_: str = {} SCREAMING_SNAKE_CASE_: List[str] = {} for k, v in text_enc_dict.items(): if ( k.endswith(".self_attn.q_proj.weight" ) or k.endswith(".self_attn.k_proj.weight" ) or k.endswith(".self_attn.v_proj.weight" ) ): SCREAMING_SNAKE_CASE_: str = k[: -len(".q_proj.weight" )] SCREAMING_SNAKE_CASE_: Dict = k[-len("q_proj.weight" )] if k_pre not in capture_qkv_weight: SCREAMING_SNAKE_CASE_: Tuple = [None, None, None] SCREAMING_SNAKE_CASE_: Union[str, Any] = v continue if ( k.endswith(".self_attn.q_proj.bias" ) or k.endswith(".self_attn.k_proj.bias" ) or k.endswith(".self_attn.v_proj.bias" ) ): SCREAMING_SNAKE_CASE_: Union[str, Any] = k[: -len(".q_proj.bias" )] SCREAMING_SNAKE_CASE_: Any = k[-len("q_proj.bias" )] if k_pre not in capture_qkv_bias: SCREAMING_SNAKE_CASE_: List[Any] = [None, None, None] SCREAMING_SNAKE_CASE_: List[str] = v continue SCREAMING_SNAKE_CASE_: int = textenc_pattern.sub(lambda _UpperCAmelCase : protected[re.escape(m.group(0 ) )] , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Dict = v for k_pre, tensors in capture_qkv_weight.items(): if None in tensors: raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" ) SCREAMING_SNAKE_CASE_: str = textenc_pattern.sub(lambda _UpperCAmelCase : protected[re.escape(m.group(0 ) )] , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: int = torch.cat(_UpperCAmelCase ) for k_pre, tensors in capture_qkv_bias.items(): if None in tensors: raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" ) SCREAMING_SNAKE_CASE_: Optional[int] = textenc_pattern.sub(lambda _UpperCAmelCase : protected[re.escape(m.group(0 ) )] , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: List[Any] = torch.cat(_UpperCAmelCase ) return new_state_dict def A_ ( _UpperCAmelCase ): return text_enc_dict if __name__ == "__main__": lowerCAmelCase : int = argparse.ArgumentParser() parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""") parser.add_argument( """--use_safetensors""", action="""store_true""", help="""Save weights use safetensors, default is ckpt.""" ) lowerCAmelCase : Optional[Any] = parser.parse_args() assert args.model_path is not None, "Must provide a model path!" assert args.checkpoint_path is not None, "Must provide a checkpoint path!" # Path for safetensors lowerCAmelCase : int = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.safetensors""") lowerCAmelCase : List[str] = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.safetensors""") lowerCAmelCase : Optional[int] = osp.join(args.model_path, """text_encoder""", """model.safetensors""") # Load models from safetensors if it exists, if it doesn't pytorch if osp.exists(unet_path): lowerCAmelCase : Optional[int] = load_file(unet_path, device="""cpu""") else: lowerCAmelCase : Union[str, Any] = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.bin""") lowerCAmelCase : Optional[Any] = torch.load(unet_path, map_location="""cpu""") if osp.exists(vae_path): lowerCAmelCase : str = load_file(vae_path, device="""cpu""") else: lowerCAmelCase : List[Any] = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.bin""") lowerCAmelCase : Optional[Any] = torch.load(vae_path, map_location="""cpu""") if osp.exists(text_enc_path): lowerCAmelCase : List[Any] = load_file(text_enc_path, device="""cpu""") else: lowerCAmelCase : List[Any] = osp.join(args.model_path, """text_encoder""", """pytorch_model.bin""") lowerCAmelCase : Optional[Any] = torch.load(text_enc_path, map_location="""cpu""") # Convert the UNet model lowerCAmelCase : int = convert_unet_state_dict(unet_state_dict) lowerCAmelCase : Optional[int] = {"""model.diffusion_model.""" + k: v for k, v in unet_state_dict.items()} # Convert the VAE model lowerCAmelCase : Union[str, Any] = convert_vae_state_dict(vae_state_dict) lowerCAmelCase : Optional[int] = {"""first_stage_model.""" + k: v for k, v in vae_state_dict.items()} # Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper lowerCAmelCase : Any = """text_model.encoder.layers.22.layer_norm2.bias""" in text_enc_dict if is_vaa_model: # Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm lowerCAmelCase : Any = {"""transformer.""" + k: v for k, v in text_enc_dict.items()} lowerCAmelCase : str = convert_text_enc_state_dict_vaa(text_enc_dict) lowerCAmelCase : Dict = {"""cond_stage_model.model.""" + k: v for k, v in text_enc_dict.items()} else: lowerCAmelCase : Any = convert_text_enc_state_dict(text_enc_dict) lowerCAmelCase : Optional[Any] = {"""cond_stage_model.transformer.""" + k: v for k, v in text_enc_dict.items()} # Put together new checkpoint lowerCAmelCase : Union[str, Any] = {**unet_state_dict, **vae_state_dict, **text_enc_dict} if args.half: lowerCAmelCase : str = {k: v.half() for k, v in state_dict.items()} if args.use_safetensors: save_file(state_dict, args.checkpoint_path) else: lowerCAmelCase : int = {"""state_dict""": state_dict} torch.save(state_dict, args.checkpoint_path)
671
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase : Optional[Any] = {"""configuration_mbart""": ["""MBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MBartConfig""", """MBartOnnxConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Optional[Any] = ["""MBartTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[str] = ["""MBartTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Optional[Any] = [ """MBART_PRETRAINED_MODEL_ARCHIVE_LIST""", """MBartForCausalLM""", """MBartForConditionalGeneration""", """MBartForQuestionAnswering""", """MBartForSequenceClassification""", """MBartModel""", """MBartPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Optional[int] = [ """TFMBartForConditionalGeneration""", """TFMBartModel""", """TFMBartPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Tuple = [ """FlaxMBartForConditionalGeneration""", """FlaxMBartForQuestionAnswering""", """FlaxMBartForSequenceClassification""", """FlaxMBartModel""", """FlaxMBartPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart import MBartTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart_fast import MBartTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mbart import ( MBART_PRETRAINED_MODEL_ARCHIVE_LIST, MBartForCausalLM, MBartForConditionalGeneration, MBartForQuestionAnswering, MBartForSequenceClassification, MBartModel, MBartPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mbart import ( FlaxMBartForConditionalGeneration, FlaxMBartForQuestionAnswering, FlaxMBartForSequenceClassification, FlaxMBartModel, FlaxMBartPreTrainedModel, ) else: import sys lowerCAmelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
671
from typing import Callable, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase : int = logging.get_logger(__name__) lowerCAmelCase : Dict = { """microsoft/xprophetnet-large-wiki100-cased""": ( """https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json""" ), } class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Optional[Any] = '''xlm-prophetnet''' _UpperCAmelCase : Any = ['''past_key_values'''] _UpperCAmelCase : Tuple = { '''num_attention_heads''': '''num_encoder_attention_heads''', } def __init__( self : str , lowerCAmelCase__ : Optional[float] = 0.1 , lowerCAmelCase__ : Optional[Union[str, Callable]] = "gelu" , lowerCAmelCase__ : Optional[int] = 3_0522 , lowerCAmelCase__ : Optional[int] = 1024 , lowerCAmelCase__ : Optional[int] = 4096 , lowerCAmelCase__ : Optional[int] = 12 , lowerCAmelCase__ : Optional[int] = 16 , lowerCAmelCase__ : Optional[int] = 4096 , lowerCAmelCase__ : Optional[int] = 12 , lowerCAmelCase__ : Optional[int] = 16 , lowerCAmelCase__ : Optional[float] = 0.1 , lowerCAmelCase__ : Optional[float] = 0.1 , lowerCAmelCase__ : Optional[int] = 512 , lowerCAmelCase__ : Optional[float] = 0.02 , lowerCAmelCase__ : Optional[bool] = True , lowerCAmelCase__ : Optional[bool] = True , lowerCAmelCase__ : Optional[int] = 0 , lowerCAmelCase__ : Optional[int] = 2 , lowerCAmelCase__ : Optional[int] = 32 , lowerCAmelCase__ : Optional[int] = 128 , lowerCAmelCase__ : Optional[bool] = False , lowerCAmelCase__ : Optional[float] = 0.0 , lowerCAmelCase__ : Optional[bool] = True , lowerCAmelCase__ : Optional[int] = 0 , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : Optional[int] = 2 , **lowerCAmelCase__ : List[str] , ): SCREAMING_SNAKE_CASE_: List[Any] = vocab_size SCREAMING_SNAKE_CASE_: int = hidden_size SCREAMING_SNAKE_CASE_: Any = encoder_ffn_dim SCREAMING_SNAKE_CASE_: Tuple = num_encoder_layers SCREAMING_SNAKE_CASE_: List[Any] = num_encoder_attention_heads SCREAMING_SNAKE_CASE_: Dict = decoder_ffn_dim SCREAMING_SNAKE_CASE_: Any = num_decoder_layers SCREAMING_SNAKE_CASE_: Tuple = num_decoder_attention_heads SCREAMING_SNAKE_CASE_: str = max_position_embeddings SCREAMING_SNAKE_CASE_: str = init_std # Normal(0, this parameter) SCREAMING_SNAKE_CASE_: Dict = activation_function # parameters for xlmprophetnet SCREAMING_SNAKE_CASE_: Optional[int] = ngram SCREAMING_SNAKE_CASE_: Tuple = num_buckets SCREAMING_SNAKE_CASE_: Union[str, Any] = relative_max_distance SCREAMING_SNAKE_CASE_: List[str] = disable_ngram_loss SCREAMING_SNAKE_CASE_: Dict = eps # 3 Types of Dropout SCREAMING_SNAKE_CASE_: Any = attention_dropout SCREAMING_SNAKE_CASE_: Optional[int] = activation_dropout SCREAMING_SNAKE_CASE_: str = dropout SCREAMING_SNAKE_CASE_: Optional[int] = use_cache super().__init__( pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , add_cross_attention=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , ) @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): return self.num_encoder_layers + self.num_decoder_layers @num_hidden_layers.setter def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : Any): raise NotImplementedError( "This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and" " `num_decoder_layers`.")
671
1
import json import os from collections import Counter import torch import torchvision import torchvision.transforms as transforms from PIL import Image from torch import nn from torch.utils.data import Dataset lowerCAmelCase : List[str] = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)} class __lowercase ( nn.Module ): """simple docstring""" def __init__( self : Dict , lowerCAmelCase__ : Any): super().__init__() SCREAMING_SNAKE_CASE_: List[str] = torchvision.models.resnetaaa(pretrained=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = list(model.children())[:-2] SCREAMING_SNAKE_CASE_: Tuple = nn.Sequential(*lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Dict = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds]) def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : Any): # Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048 SCREAMING_SNAKE_CASE_: Tuple = self.pool(self.model(lowerCAmelCase__)) SCREAMING_SNAKE_CASE_: Tuple = torch.flatten(lowerCAmelCase__ , start_dim=2) SCREAMING_SNAKE_CASE_: Tuple = out.transpose(1 , 2).contiguous() return out # BxNx2048 class __lowercase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : Optional[int] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple): SCREAMING_SNAKE_CASE_: Union[str, Any] = [json.loads(lowerCAmelCase__) for l in open(lowerCAmelCase__)] SCREAMING_SNAKE_CASE_: Optional[Any] = os.path.dirname(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: int = tokenizer SCREAMING_SNAKE_CASE_: List[Any] = labels SCREAMING_SNAKE_CASE_: List[Any] = len(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[Any] = max_seq_length SCREAMING_SNAKE_CASE_: Optional[int] = transforms def __len__( self : List[str]): return len(self.data) def __getitem__( self : Dict , lowerCAmelCase__ : int): SCREAMING_SNAKE_CASE_: List[Any] = torch.LongTensor(self.tokenizer.encode(self.data[index]["text"] , add_special_tokens=lowerCAmelCase__)) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = sentence[0], sentence[1:-1], sentence[-1] SCREAMING_SNAKE_CASE_: Tuple = sentence[: self.max_seq_length] SCREAMING_SNAKE_CASE_: Dict = torch.zeros(self.n_classes) SCREAMING_SNAKE_CASE_: Optional[Any] = 1 SCREAMING_SNAKE_CASE_: Union[str, Any] = Image.open(os.path.join(self.data_dir , self.data[index]["img"])).convert("RGB") SCREAMING_SNAKE_CASE_: Union[str, Any] = self.transforms(lowerCAmelCase__) return { "image_start_token": start_token, "image_end_token": end_token, "sentence": sentence, "image": image, "label": label, } def _SCREAMING_SNAKE_CASE ( self : int): SCREAMING_SNAKE_CASE_: int = Counter() for row in self.data: label_freqs.update(row["label"]) return label_freqs def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Union[str, Any] = [len(row["sentence"] ) for row in batch] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = len(_UpperCAmelCase ), max(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Any = torch.zeros(_UpperCAmelCase , _UpperCAmelCase , dtype=torch.long ) SCREAMING_SNAKE_CASE_: str = torch.zeros(_UpperCAmelCase , _UpperCAmelCase , dtype=torch.long ) for i_batch, (input_row, length) in enumerate(zip(_UpperCAmelCase , _UpperCAmelCase ) ): SCREAMING_SNAKE_CASE_: Any = input_row["sentence"] SCREAMING_SNAKE_CASE_: List[Any] = 1 SCREAMING_SNAKE_CASE_: Any = torch.stack([row["image"] for row in batch] ) SCREAMING_SNAKE_CASE_: List[Any] = torch.stack([row["label"] for row in batch] ) SCREAMING_SNAKE_CASE_: Dict = torch.stack([row["image_start_token"] for row in batch] ) SCREAMING_SNAKE_CASE_: List[Any] = torch.stack([row["image_end_token"] for row in batch] ) return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor def A_ ( ): return [ "Crime", "Drama", "Thriller", "Action", "Comedy", "Romance", "Documentary", "Short", "Mystery", "History", "Family", "Adventure", "Fantasy", "Sci-Fi", "Western", "Horror", "Sport", "War", "Music", "Musical", "Animation", "Biography", "Film-Noir", ] def A_ ( ): return transforms.Compose( [ transforms.Resize(2_56 ), transforms.CenterCrop(2_24 ), transforms.ToTensor(), transforms.Normalize( mean=[0.4_6_7_7_7_0_4_4, 0.4_4_5_3_1_4_2_9, 0.4_0_6_6_1_0_1_7] , std=[0.1_2_2_2_1_9_9_4, 0.1_2_1_4_5_8_3_5, 0.1_4_3_8_0_4_6_9] , ), ] )
671
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowerCAmelCase : Dict = logging.get_logger(__name__) def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[int] = b.T SCREAMING_SNAKE_CASE_: Dict = np.sum(np.square(_UpperCAmelCase ) , axis=1 ) SCREAMING_SNAKE_CASE_: Tuple = np.sum(np.square(_UpperCAmelCase ) , axis=0 ) SCREAMING_SNAKE_CASE_: List[Any] = np.matmul(_UpperCAmelCase , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Dict = aa[:, None] - 2 * ab + ba[None, :] return d def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: int = x.reshape(-1 , 3 ) SCREAMING_SNAKE_CASE_: Tuple = squared_euclidean_distance(_UpperCAmelCase , _UpperCAmelCase ) return np.argmin(_UpperCAmelCase , axis=1 ) class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : int = ['''pixel_values'''] def __init__( self : Tuple , lowerCAmelCase__ : Optional[Union[List[List[int]], np.ndarray]] = None , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : bool = True , **lowerCAmelCase__ : List[str] , ): super().__init__(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Any = size if size is not None else {"height": 256, "width": 256} SCREAMING_SNAKE_CASE_: Tuple = get_size_dict(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = np.array(lowerCAmelCase__) if clusters is not None else None SCREAMING_SNAKE_CASE_: Dict = do_resize SCREAMING_SNAKE_CASE_: str = size SCREAMING_SNAKE_CASE_: List[Any] = resample SCREAMING_SNAKE_CASE_: Optional[int] = do_normalize SCREAMING_SNAKE_CASE_: Dict = do_color_quantize def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Optional[Any] , ): SCREAMING_SNAKE_CASE_: List[str] = get_size_dict(lowerCAmelCase__) if "height" not in size or "width" not in size: raise ValueError(F"Size dictionary must contain both height and width keys. Got {size.keys()}") return resize( lowerCAmelCase__ , size=(size["height"], size["width"]) , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , ): SCREAMING_SNAKE_CASE_: str = rescale(image=lowerCAmelCase__ , scale=1 / 127.5 , data_format=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[int] = image - 1 return image def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : ImageInput , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[Union[List[List[int]], np.ndarray]] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **lowerCAmelCase__ : Union[str, Any] , ): SCREAMING_SNAKE_CASE_: Tuple = do_resize if do_resize is not None else self.do_resize SCREAMING_SNAKE_CASE_: Optional[int] = size if size is not None else self.size SCREAMING_SNAKE_CASE_: Dict = get_size_dict(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[str] = resample if resample is not None else self.resample SCREAMING_SNAKE_CASE_: int = do_normalize if do_normalize is not None else self.do_normalize SCREAMING_SNAKE_CASE_: List[str] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize SCREAMING_SNAKE_CASE_: Tuple = clusters if clusters is not None else self.clusters SCREAMING_SNAKE_CASE_: Optional[int] = np.array(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[int] = make_list_of_images(lowerCAmelCase__) if not valid_images(lowerCAmelCase__): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray.") if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True.") if do_color_quantize and clusters is None: raise ValueError("Clusters must be specified if do_color_quantize is True.") # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE_: Union[str, Any] = [to_numpy_array(lowerCAmelCase__) for image in images] if do_resize: SCREAMING_SNAKE_CASE_: Optional[Any] = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__) for image in images] if do_normalize: SCREAMING_SNAKE_CASE_: str = [self.normalize(image=lowerCAmelCase__) for image in images] if do_color_quantize: SCREAMING_SNAKE_CASE_: Any = [to_channel_dimension_format(lowerCAmelCase__ , ChannelDimension.LAST) for image in images] # color quantize from (batch_size, height, width, 3) to (batch_size, height, width) SCREAMING_SNAKE_CASE_: List[Any] = np.array(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[str] = color_quantize(lowerCAmelCase__ , lowerCAmelCase__).reshape(images.shape[:-1]) # flatten to (batch_size, height*width) SCREAMING_SNAKE_CASE_: str = images.shape[0] SCREAMING_SNAKE_CASE_: Tuple = images.reshape(lowerCAmelCase__ , -1) # We need to convert back to a list of images to keep consistent behaviour across processors. SCREAMING_SNAKE_CASE_: str = list(lowerCAmelCase__) else: SCREAMING_SNAKE_CASE_: Dict = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__) for image in images] SCREAMING_SNAKE_CASE_: Optional[Any] = {"input_ids": images} return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__)
671
1
from ..utils import DummyObject, requires_backends class __lowercase ( metaclass=UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Tuple = ['''onnx'''] def __init__( self : List[str] , *lowerCAmelCase__ : Optional[int] , **lowerCAmelCase__ : Any): requires_backends(self , ["onnx"]) @classmethod def _SCREAMING_SNAKE_CASE ( cls : int , *lowerCAmelCase__ : List[Any] , **lowerCAmelCase__ : Any): requires_backends(cls , ["onnx"]) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] , *lowerCAmelCase__ : Tuple , **lowerCAmelCase__ : Any): requires_backends(cls , ["onnx"])
671
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert import BertTokenizer lowerCAmelCase : Optional[int] = logging.get_logger(__name__) lowerCAmelCase : str = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} lowerCAmelCase : Tuple = { """vocab_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } lowerCAmelCase : Union[str, Any] = { """vocab_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } lowerCAmelCase : List[str] = { """vocab_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json""" ), }, } lowerCAmelCase : int = { """facebook/dpr-ctx_encoder-single-nq-base""": 512, """facebook/dpr-ctx_encoder-multiset-base""": 512, } lowerCAmelCase : int = { """facebook/dpr-question_encoder-single-nq-base""": 512, """facebook/dpr-question_encoder-multiset-base""": 512, } lowerCAmelCase : List[Any] = { """facebook/dpr-reader-single-nq-base""": 512, """facebook/dpr-reader-multiset-base""": 512, } lowerCAmelCase : Optional[int] = { """facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True}, } lowerCAmelCase : Optional[int] = { """facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True}, } lowerCAmelCase : List[str] = { """facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True}, } class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Any = VOCAB_FILES_NAMES _UpperCAmelCase : Optional[Any] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase : List[Any] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase : List[Any] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES _UpperCAmelCase : Optional[int] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase : Any = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase : str = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION lowerCAmelCase : List[Any] = collections.namedtuple( """DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""] ) lowerCAmelCase : Optional[Any] = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""]) lowerCAmelCase : int = R""" Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: ``` [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> ``` Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Returns: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. """ @add_start_docstrings(UpperCAmelCase_ ) class __lowercase : """simple docstring""" def __call__( self : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : Union[bool, str] = False , lowerCAmelCase__ : Union[bool, str] = False , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : Optional[bool] = None , **lowerCAmelCase__ : Tuple , ): if titles is None and texts is None: return super().__call__( lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , ) elif titles is None or texts is None: SCREAMING_SNAKE_CASE_: List[str] = titles if texts is None else texts return super().__call__( lowerCAmelCase__ , lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , ) SCREAMING_SNAKE_CASE_: Optional[int] = titles if not isinstance(lowerCAmelCase__ , lowerCAmelCase__) else [titles] SCREAMING_SNAKE_CASE_: int = texts if not isinstance(lowerCAmelCase__ , lowerCAmelCase__) else [texts] SCREAMING_SNAKE_CASE_: str = len(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = questions if not isinstance(lowerCAmelCase__ , lowerCAmelCase__) else [questions] * n_passages if len(lowerCAmelCase__) != len(lowerCAmelCase__): raise ValueError( F"There should be as many titles than texts but got {len(lowerCAmelCase__)} titles and {len(lowerCAmelCase__)} texts.") SCREAMING_SNAKE_CASE_: Optional[Any] = super().__call__(lowerCAmelCase__ , lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__)["input_ids"] SCREAMING_SNAKE_CASE_: Union[str, Any] = super().__call__(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__)["input_ids"] SCREAMING_SNAKE_CASE_: int = { "input_ids": [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(lowerCAmelCase__ , lowerCAmelCase__) ] } if return_attention_mask is not False: SCREAMING_SNAKE_CASE_: Dict = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids]) SCREAMING_SNAKE_CASE_: int = attention_mask return self.pad(lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : BatchEncoding , lowerCAmelCase__ : DPRReaderOutput , lowerCAmelCase__ : int = 16 , lowerCAmelCase__ : int = 64 , lowerCAmelCase__ : int = 4 , ): SCREAMING_SNAKE_CASE_: int = reader_input["input_ids"] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = reader_output[:3] SCREAMING_SNAKE_CASE_: Tuple = len(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = sorted(range(lowerCAmelCase__) , reverse=lowerCAmelCase__ , key=relevance_logits.__getitem__) SCREAMING_SNAKE_CASE_: List[DPRReaderOutput] = [] for doc_id in sorted_docs: SCREAMING_SNAKE_CASE_: Optional[int] = list(input_ids[doc_id]) # assuming question & title information is at the beginning of the sequence SCREAMING_SNAKE_CASE_: str = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: SCREAMING_SNAKE_CASE_: List[Any] = sequence_ids.index(self.pad_token_id) else: SCREAMING_SNAKE_CASE_: Dict = len(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[Any] = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCAmelCase__ , top_spans=lowerCAmelCase__ , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCAmelCase__ , start_index=lowerCAmelCase__ , end_index=lowerCAmelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1]) , )) if len(lowerCAmelCase__) >= num_spans: break return nbest_spans_predictions[:num_spans] def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , ): SCREAMING_SNAKE_CASE_: Any = [] for start_index, start_score in enumerate(lowerCAmelCase__): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]): scores.append(((start_index, start_index + answer_length), start_score + end_score)) SCREAMING_SNAKE_CASE_: Union[str, Any] = sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__: x[1] , reverse=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[str] = [] for (start_index, end_index), score in scores: if start_index > end_index: raise ValueError(F"Wrong span indices: [{start_index}:{end_index}]") SCREAMING_SNAKE_CASE_: int = end_index - start_index + 1 if length > max_answer_length: raise ValueError(F"Span is too long: {length} > {max_answer_length}") if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals): continue chosen_span_intervals.append((start_index, end_index)) if len(lowerCAmelCase__) == top_spans: break return chosen_span_intervals @add_end_docstrings(UpperCAmelCase_ ) class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Any = VOCAB_FILES_NAMES _UpperCAmelCase : Optional[Any] = READER_PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase : int = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase : Optional[int] = READER_PRETRAINED_INIT_CONFIGURATION _UpperCAmelCase : str = ['''input_ids''', '''attention_mask''']
671
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) lowerCAmelCase : Union[str, Any] = { """configuration_efficientformer""": [ """EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """EfficientFormerConfig""", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : str = ["""EfficientFormerImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Tuple = [ """EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """EfficientFormerForImageClassification""", """EfficientFormerForImageClassificationWithTeacher""", """EfficientFormerModel""", """EfficientFormerPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : int = [ """TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFEfficientFormerForImageClassification""", """TFEfficientFormerForImageClassificationWithTeacher""", """TFEfficientFormerModel""", """TFEfficientFormerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientformer import EfficientFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientformer import ( EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientFormerForImageClassification, EfficientFormerForImageClassificationWithTeacher, EfficientFormerModel, EfficientFormerPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, TFEfficientFormerPreTrainedModel, ) else: import sys lowerCAmelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
671
from transformers import DistilBertTokenizer, DistilBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Optional[Any] = DistilBertTokenizer _UpperCAmelCase : Union[str, Any] = DistilBertTokenizerFast _UpperCAmelCase : int = True @slow def _SCREAMING_SNAKE_CASE ( self : Any): SCREAMING_SNAKE_CASE_: Optional[Any] = DistilBertTokenizer.from_pretrained("distilbert-base-uncased") SCREAMING_SNAKE_CASE_: Any = tokenizer.encode("sequence builders" , add_special_tokens=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: int = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ]
671
1
import os import re import shutil import sys import tempfile import unittest import black lowerCAmelCase : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated. lowerCAmelCase : List[str] = """ def __init__(self, config): super().__init__() self.transform = BertPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states """ class __lowercase ( unittest.TestCase ): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : int): SCREAMING_SNAKE_CASE_: Optional[int] = tempfile.mkdtemp() os.makedirs(os.path.join(self.transformer_dir , "models/bert/")) SCREAMING_SNAKE_CASE_: List[Any] = self.transformer_dir shutil.copy( os.path.join(lowerCAmelCase__ , "src/transformers/models/bert/modeling_bert.py") , os.path.join(self.transformer_dir , "models/bert/modeling_bert.py") , ) def _SCREAMING_SNAKE_CASE ( self : List[Any]): SCREAMING_SNAKE_CASE_: Optional[Any] = "src/transformers" shutil.rmtree(self.transformer_dir) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : str=None): SCREAMING_SNAKE_CASE_: List[str] = comment + F"\nclass {class_name}(nn.Module):\n" + class_code if overwrite_result is not None: SCREAMING_SNAKE_CASE_: List[str] = comment + F"\nclass {class_name}(nn.Module):\n" + overwrite_result SCREAMING_SNAKE_CASE_: int = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119) SCREAMING_SNAKE_CASE_: Optional[int] = black.format_str(lowerCAmelCase__ , mode=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: int = os.path.join(self.transformer_dir , "new_code.py") with open(lowerCAmelCase__ , "w" , newline="\n") as f: f.write(lowerCAmelCase__) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(lowerCAmelCase__)) == 0) else: check_copies.is_copy_consistent(f.name , overwrite=lowerCAmelCase__) with open(lowerCAmelCase__ , "r") as f: self.assertTrue(f.read() , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Dict): SCREAMING_SNAKE_CASE_: int = check_copies.find_code_in_transformers("models.bert.modeling_bert.BertLMPredictionHead") self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : List[Any]): # Base copy consistency self.check_copy_consistency( "# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , REFERENCE_CODE + "\n" , ) # With no empty line at the end self.check_copy_consistency( "# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , lowerCAmelCase__ , ) # Copy consistency with rename self.check_copy_consistency( "# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , re.sub("Bert" , "TestModel" , lowerCAmelCase__) , ) # Copy consistency with a really long name SCREAMING_SNAKE_CASE_: List[str] = "TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason" self.check_copy_consistency( F"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}" , F"{long_class_name}LMPredictionHead" , re.sub("Bert" , lowerCAmelCase__ , lowerCAmelCase__) , ) # Copy consistency with overwrite self.check_copy_consistency( "# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , lowerCAmelCase__ , overwrite_result=re.sub("Bert" , "TestModel" , lowerCAmelCase__) , ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): SCREAMING_SNAKE_CASE_: Optional[Any] = check_copies.LOCALIZED_READMES["README_zh-hans.md"] SCREAMING_SNAKE_CASE_: Any = ( "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the" " Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for" " Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong" " Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1." " **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace)," " released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and" " lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same" " method has been applied to compress GPT2 into" " [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into" " [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation)," " Multilingual BERT into" " [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German" " version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**" " (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders" " as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang" " Luong, Quoc V. Le, Christopher D. Manning." ) SCREAMING_SNAKE_CASE_: str = ( "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the" " Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of" " Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian" " Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n" ) SCREAMING_SNAKE_CASE_: Union[str, Any] = ( "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the" " Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of" " Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian" " Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1." " **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文" " [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and" " lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same" " method has been applied to compress GPT2 into" " [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into" " [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation)," " Multilingual BERT into" " [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German" " version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自" " Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather" " than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le," " Christopher D. Manning 发布。\n" ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = check_copies.convert_to_localized_md( lowerCAmelCase__ , lowerCAmelCase__ , localized_readme["format_model_list"]) self.assertFalse(lowerCAmelCase__) self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = check_copies.convert_to_localized_md( lowerCAmelCase__ , lowerCAmelCase__ , localized_readme["format_model_list"]) # Check whether the number of models is equal to README.md after conversion. self.assertTrue(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Dict = ( "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the" " Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for" " Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong" " Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut." ) SCREAMING_SNAKE_CASE_: Union[str, Any] = ( "1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and" " the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of" " Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian" " Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n" ) SCREAMING_SNAKE_CASE_: Union[str, Any] = ( "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the" " Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of" " Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian" " Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n" ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = check_copies.convert_to_localized_md( lowerCAmelCase__ , lowerCAmelCase__ , localized_readme["format_model_list"]) # Check if the model link is synchronized. self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
671
import collections import json import math import os import re import time from fnmatch import fnmatch from typing import Dict import requests from slack_sdk import WebClient lowerCAmelCase : List[Any] = WebClient(token=os.environ["""CI_SLACK_BOT_TOKEN"""]) def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[int] = test_results.split(" " ) SCREAMING_SNAKE_CASE_: Tuple = 0 SCREAMING_SNAKE_CASE_: str = 0 # When the output is short enough, the output is surrounded by = signs: "== OUTPUT ==" # When it is too long, those signs are not present. SCREAMING_SNAKE_CASE_: Optional[Any] = expressions[-2] if "=" in expressions[-1] else expressions[-1] for i, expression in enumerate(_UpperCAmelCase ): if "failed" in expression: failed += int(expressions[i - 1] ) if "passed" in expression: success += int(expressions[i - 1] ) return failed, success, time_spent def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: str = {} SCREAMING_SNAKE_CASE_: Any = None SCREAMING_SNAKE_CASE_: Union[str, Any] = False for line in failures_short_lines.split("\n" ): if re.search(R"_ \[doctest\]" , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: List[Any] = True SCREAMING_SNAKE_CASE_: Dict = line.split(" " )[2] elif in_error and not line.split(" " )[0].isdigit(): SCREAMING_SNAKE_CASE_: Union[str, Any] = line SCREAMING_SNAKE_CASE_: List[str] = False return failures class __lowercase : """simple docstring""" def __init__( self : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Dict): SCREAMING_SNAKE_CASE_: Dict = title SCREAMING_SNAKE_CASE_: int = doc_test_results["time_spent"].split(",")[0] SCREAMING_SNAKE_CASE_: int = doc_test_results["success"] SCREAMING_SNAKE_CASE_: Optional[Any] = doc_test_results["failures"] SCREAMING_SNAKE_CASE_: Any = self.n_success + self.n_failures # Failures and success of the modeling tests SCREAMING_SNAKE_CASE_: Optional[int] = doc_test_results @property def _SCREAMING_SNAKE_CASE ( self : Any): SCREAMING_SNAKE_CASE_: int = [self._time_spent] SCREAMING_SNAKE_CASE_: List[Any] = 0 for time in time_spent: SCREAMING_SNAKE_CASE_: Union[str, Any] = time.split(":") # Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute. if len(lowerCAmelCase__) == 1: SCREAMING_SNAKE_CASE_: Dict = [0, 0, time_parts[0]] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = int(time_parts[0]), int(time_parts[1]), float(time_parts[2]) total_secs += hours * 3600 + minutes * 60 + seconds SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60 return F"{int(lowerCAmelCase__)}h{int(lowerCAmelCase__)}m{int(lowerCAmelCase__)}s" @property def _SCREAMING_SNAKE_CASE ( self : List[Any]): return {"type": "header", "text": {"type": "plain_text", "text": self.title}} @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): return { "type": "section", "text": { "type": "plain_text", "text": F"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.", "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): return { "type": "section", "text": { "type": "plain_text", "text": ( F"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in" F" {self.time}." ), "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } @property def _SCREAMING_SNAKE_CASE ( self : Any): SCREAMING_SNAKE_CASE_: Optional[Any] = 40 SCREAMING_SNAKE_CASE_: List[str] = {k: v["failed"] for k, v in doc_test_results.items() if isinstance(lowerCAmelCase__ , lowerCAmelCase__)} SCREAMING_SNAKE_CASE_: Tuple = "" for category, failures in category_failures.items(): if len(lowerCAmelCase__) == 0: continue if report != "": report += "\n\n" report += F"*{category} failures*:".ljust(line_length // 2).rjust(line_length // 2) + "\n" report += "`" report += "`\n`".join(lowerCAmelCase__) report += "`" return { "type": "section", "text": { "type": "mrkdwn", "text": F"The following examples had failures:\n\n\n{report}\n", }, } @property def _SCREAMING_SNAKE_CASE ( self : str): SCREAMING_SNAKE_CASE_: Optional[Any] = [self.header] if self.n_failures > 0: blocks.append(self.failures) if self.n_failures > 0: blocks.extend([self.category_failures]) if self.n_failures == 0: blocks.append(self.no_failures) return json.dumps(lowerCAmelCase__) @staticmethod def _SCREAMING_SNAKE_CASE ( ): SCREAMING_SNAKE_CASE_: List[str] = [ { "type": "section", "text": { "type": "plain_text", "text": "There was an issue running the tests.", }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } ] print("Sending the following payload") print(json.dumps({"blocks": json.loads(lowerCAmelCase__)})) client.chat_postMessage( channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text="There was an issue running the tests." , blocks=lowerCAmelCase__ , ) def _SCREAMING_SNAKE_CASE ( self : Tuple): print("Sending the following payload") print(json.dumps({"blocks": json.loads(self.payload)})) SCREAMING_SNAKE_CASE_: Optional[Any] = F"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else "All tests passed." SCREAMING_SNAKE_CASE_: List[Any] = client.chat_postMessage( channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , blocks=self.payload , text=lowerCAmelCase__ , ) def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any]): SCREAMING_SNAKE_CASE_: Dict = "" for key, value in failures.items(): SCREAMING_SNAKE_CASE_: str = value[:200] + " [Truncated]" if len(lowerCAmelCase__) > 250 else value failures_text += F"*{key}*\n_{value}_\n\n" SCREAMING_SNAKE_CASE_: Any = job_name SCREAMING_SNAKE_CASE_: List[Any] = {"type": "section", "text": {"type": "mrkdwn", "text": text}} if job_link is not None: SCREAMING_SNAKE_CASE_: Tuple = { "type": "button", "text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True}, "url": job_link, } return [ {"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}}, content, {"type": "section", "text": {"type": "mrkdwn", "text": failures_text}}, ] def _SCREAMING_SNAKE_CASE ( self : Any): if self.thread_ts is None: raise ValueError("Can only post reply if a post has been made.") SCREAMING_SNAKE_CASE_: Tuple = self.doc_test_results.pop("job_link") self.doc_test_results.pop("failures") self.doc_test_results.pop("success") self.doc_test_results.pop("time_spent") SCREAMING_SNAKE_CASE_: Any = sorted(self.doc_test_results.items() , key=lambda lowerCAmelCase__: t[0]) for job, job_result in sorted_dict: if len(job_result["failures"]): SCREAMING_SNAKE_CASE_: Union[str, Any] = F"*Num failures* :{len(job_result['failed'])} \n" SCREAMING_SNAKE_CASE_: Optional[Any] = job_result["failures"] SCREAMING_SNAKE_CASE_: Optional[Any] = self.get_reply_blocks(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , text=lowerCAmelCase__) print("Sending the following reply") print(json.dumps({"blocks": blocks})) client.chat_postMessage( channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text=F"Results for {job}" , blocks=lowerCAmelCase__ , thread_ts=self.thread_ts["ts"] , ) time.sleep(1) def A_ ( ): SCREAMING_SNAKE_CASE_: Tuple = os.environ["GITHUB_RUN_ID"] SCREAMING_SNAKE_CASE_: Any = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100" SCREAMING_SNAKE_CASE_: List[Any] = requests.get(_UpperCAmelCase ).json() SCREAMING_SNAKE_CASE_: Optional[Any] = {} try: jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} ) SCREAMING_SNAKE_CASE_: Any = math.ceil((result["total_count"] - 1_00) / 1_00 ) for i in range(_UpperCAmelCase ): SCREAMING_SNAKE_CASE_: str = requests.get(url + f"&page={i + 2}" ).json() jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} ) return jobs except Exception as e: print("Unknown error, could not fetch links." , _UpperCAmelCase ) return {} def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[Any] = {} if os.path.exists(_UpperCAmelCase ): SCREAMING_SNAKE_CASE_: List[str] = os.listdir(_UpperCAmelCase ) for file in files: try: with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , encoding="utf-8" ) as f: SCREAMING_SNAKE_CASE_: Dict = f.read() except UnicodeDecodeError as e: raise ValueError(f"Could not open {os.path.join(_UpperCAmelCase , _UpperCAmelCase )}." ) from e return _artifact def A_ ( ): class __lowercase : """simple docstring""" def __init__( self : List[str] , lowerCAmelCase__ : str): SCREAMING_SNAKE_CASE_: Dict = name SCREAMING_SNAKE_CASE_: List[str] = [] def __str__( self : Optional[Any]): return self.name def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : str): self.paths.append({"name": self.name, "path": path}) SCREAMING_SNAKE_CASE_: Dict[str, Artifact] = {} SCREAMING_SNAKE_CASE_: List[Any] = filter(os.path.isdir , os.listdir() ) for directory in directories: SCREAMING_SNAKE_CASE_: Dict = directory if artifact_name not in _available_artifacts: SCREAMING_SNAKE_CASE_: Tuple = Artifact(_UpperCAmelCase ) _available_artifacts[artifact_name].add_path(_UpperCAmelCase ) return _available_artifacts if __name__ == "__main__": lowerCAmelCase : Tuple = get_job_links() lowerCAmelCase : Optional[Any] = retrieve_available_artifacts() lowerCAmelCase : Any = collections.OrderedDict( [ ("""*.py""", """API Examples"""), ("""*.md""", """MD Examples"""), ] ) # This dict will contain all the information relative to each doc test category: # - failed: list of failed tests # - failures: dict in the format 'test': 'error_message' lowerCAmelCase : int = { v: { """failed""": [], """failures""": {}, } for v in docs.values() } # Link to the GitHub Action job lowerCAmelCase : Optional[int] = github_actions_job_links.get("""run_doctests""") lowerCAmelCase : List[Any] = available_artifacts["""doc_tests_gpu_test_reports"""].paths[0] lowerCAmelCase : Any = retrieve_artifact(artifact_path["""name"""]) if "stats" in artifact: lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[str] = handle_test_results(artifact["""stats"""]) lowerCAmelCase : List[str] = failed lowerCAmelCase : Any = success lowerCAmelCase : Dict = time_spent[1:-1] + """, """ lowerCAmelCase : str = extract_first_line_failure(artifact["""failures_short"""]) for line in artifact["summary_short"].split("""\n"""): if re.search("""FAILED""", line): lowerCAmelCase : Tuple = line.replace("""FAILED """, """""") lowerCAmelCase : str = line.split()[0].replace("""\n""", """""") if "::" in line: lowerCAmelCase , lowerCAmelCase : Optional[int] = line.split("""::""") else: lowerCAmelCase , lowerCAmelCase : str = line, line for file_regex in docs.keys(): if fnmatch(file_path, file_regex): lowerCAmelCase : str = docs[file_regex] doc_test_results[category]["failed"].append(test) lowerCAmelCase : str = all_failures[test] if test in all_failures else """N/A""" lowerCAmelCase : Any = failure break lowerCAmelCase : Union[str, Any] = Message("""🤗 Results of the doc tests.""", doc_test_results) message.post() message.post_reply()
671
1
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType from ...utils.imports import is_botoa_available from .config_args import SageMakerConfig from .config_utils import ( DYNAMO_BACKENDS, _ask_field, _ask_options, _convert_dynamo_backend, _convert_mixed_precision, _convert_sagemaker_distributed_mode, _convert_yes_no_to_bool, ) if is_botoa_available(): import botoa # noqa: F401 def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[Any] = botoa.client("iam" ) SCREAMING_SNAKE_CASE_: List[Any] = { "Version": "2012-10-17", "Statement": [ {"Effect": "Allow", "Principal": {"Service": "sagemaker.amazonaws.com"}, "Action": "sts:AssumeRole"} ], } try: # create the role, associated with the chosen trust policy iam_client.create_role( RoleName=_UpperCAmelCase , AssumeRolePolicyDocument=json.dumps(_UpperCAmelCase , indent=2 ) ) SCREAMING_SNAKE_CASE_: Dict = { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "sagemaker:*", "ecr:GetDownloadUrlForLayer", "ecr:BatchGetImage", "ecr:BatchCheckLayerAvailability", "ecr:GetAuthorizationToken", "cloudwatch:PutMetricData", "cloudwatch:GetMetricData", "cloudwatch:GetMetricStatistics", "cloudwatch:ListMetrics", "logs:CreateLogGroup", "logs:CreateLogStream", "logs:DescribeLogStreams", "logs:PutLogEvents", "logs:GetLogEvents", "s3:CreateBucket", "s3:ListBucket", "s3:GetBucketLocation", "s3:GetObject", "s3:PutObject", ], "Resource": "*", } ], } # attach policy to role iam_client.put_role_policy( RoleName=_UpperCAmelCase , PolicyName=f"{role_name}_policy_permission" , PolicyDocument=json.dumps(_UpperCAmelCase , indent=2 ) , ) except iam_client.exceptions.EntityAlreadyExistsException: print(f"role {role_name} already exists. Using existing one" ) def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: List[str] = botoa.client("iam" ) return iam_client.get_role(RoleName=_UpperCAmelCase )["Role"]["Arn"] def A_ ( ): SCREAMING_SNAKE_CASE_: List[str] = _ask_options( "How do you want to authorize?" , ["AWS Profile", "Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) "] , _UpperCAmelCase , ) SCREAMING_SNAKE_CASE_: Dict = None if credentials_configuration == 0: SCREAMING_SNAKE_CASE_: Union[str, Any] = _ask_field("Enter your AWS Profile name: [default] " , default="default" ) SCREAMING_SNAKE_CASE_: int = aws_profile else: print( "Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with," "`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`" ) SCREAMING_SNAKE_CASE_: List[str] = _ask_field("AWS Access Key ID: " ) SCREAMING_SNAKE_CASE_: Optional[int] = aws_access_key_id SCREAMING_SNAKE_CASE_: Union[str, Any] = _ask_field("AWS Secret Access Key: " ) SCREAMING_SNAKE_CASE_: Union[str, Any] = aws_secret_access_key SCREAMING_SNAKE_CASE_: str = _ask_field("Enter your AWS Region: [us-east-1]" , default="us-east-1" ) SCREAMING_SNAKE_CASE_: Any = aws_region SCREAMING_SNAKE_CASE_: Dict = _ask_options( "Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?" , ["Provide IAM Role name", "Create new IAM role using credentials"] , _UpperCAmelCase , ) if role_management == 0: SCREAMING_SNAKE_CASE_: Tuple = _ask_field("Enter your IAM role name: " ) else: SCREAMING_SNAKE_CASE_: str = "accelerate_sagemaker_execution_role" print(f"Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials" ) _create_iam_role_for_sagemaker(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Optional[int] = _ask_field( "Do you want to use custom Docker image? [yes/NO]: " , _convert_yes_no_to_bool , default=_UpperCAmelCase , error_message="Please enter yes or no." , ) SCREAMING_SNAKE_CASE_: str = None if is_custom_docker_image: SCREAMING_SNAKE_CASE_: Dict = _ask_field("Enter your Docker image: " , lambda _UpperCAmelCase : str(_UpperCAmelCase ).lower() ) SCREAMING_SNAKE_CASE_: List[str] = _ask_field( "Do you want to provide SageMaker input channels with data locations? [yes/NO]: " , _convert_yes_no_to_bool , default=_UpperCAmelCase , error_message="Please enter yes or no." , ) SCREAMING_SNAKE_CASE_: List[str] = None if is_sagemaker_inputs_enabled: SCREAMING_SNAKE_CASE_: str = _ask_field( "Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): " , lambda _UpperCAmelCase : str(_UpperCAmelCase ).lower() , ) SCREAMING_SNAKE_CASE_: Dict = _ask_field( "Do you want to enable SageMaker metrics? [yes/NO]: " , _convert_yes_no_to_bool , default=_UpperCAmelCase , error_message="Please enter yes or no." , ) SCREAMING_SNAKE_CASE_: Any = None if is_sagemaker_metrics_enabled: SCREAMING_SNAKE_CASE_: Union[str, Any] = _ask_field( "Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): " , lambda _UpperCAmelCase : str(_UpperCAmelCase ).lower() , ) SCREAMING_SNAKE_CASE_: List[Any] = _ask_options( "What is the distributed mode?" , ["No distributed training", "Data parallelism"] , _convert_sagemaker_distributed_mode , ) SCREAMING_SNAKE_CASE_: Tuple = {} SCREAMING_SNAKE_CASE_: str = _ask_field( "Do you wish to optimize your script with torch dynamo?[yes/NO]:" , _convert_yes_no_to_bool , default=_UpperCAmelCase , error_message="Please enter yes or no." , ) if use_dynamo: SCREAMING_SNAKE_CASE_: str = "dynamo_" SCREAMING_SNAKE_CASE_: Optional[int] = _ask_options( "Which dynamo backend would you like to use?" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , ) SCREAMING_SNAKE_CASE_: Tuple = _ask_field( "Do you want to customize the defaults sent to torch.compile? [yes/NO]: " , _convert_yes_no_to_bool , default=_UpperCAmelCase , error_message="Please enter yes or no." , ) if use_custom_options: SCREAMING_SNAKE_CASE_: Optional[Any] = _ask_options( "Which mode do you want to use?" , _UpperCAmelCase , lambda _UpperCAmelCase : TORCH_DYNAMO_MODES[int(_UpperCAmelCase )] , default="default" , ) SCREAMING_SNAKE_CASE_: Any = _ask_field( "Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: " , _convert_yes_no_to_bool , default=_UpperCAmelCase , error_message="Please enter yes or no." , ) SCREAMING_SNAKE_CASE_: Optional[Any] = _ask_field( "Do you want to enable dynamic shape tracing? [yes/NO]: " , _convert_yes_no_to_bool , default=_UpperCAmelCase , error_message="Please enter yes or no." , ) SCREAMING_SNAKE_CASE_: List[Any] = "Which EC2 instance type you want to use for your training?" if distributed_type != SageMakerDistributedType.NO: SCREAMING_SNAKE_CASE_: Union[str, Any] = _ask_options( _UpperCAmelCase , _UpperCAmelCase , lambda _UpperCAmelCase : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(_UpperCAmelCase )] ) else: eca_instance_query += "? [ml.p3.2xlarge]:" SCREAMING_SNAKE_CASE_: Tuple = _ask_field(_UpperCAmelCase , lambda _UpperCAmelCase : str(_UpperCAmelCase ).lower() , default="ml.p3.2xlarge" ) SCREAMING_SNAKE_CASE_: Dict = 1 if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL): SCREAMING_SNAKE_CASE_: List[Any] = _ask_field( "How many machines do you want use? [1]: " , _UpperCAmelCase , default=1 , ) SCREAMING_SNAKE_CASE_: Optional[Any] = _ask_options( "Do you wish to use FP16 or BF16 (mixed precision)?" , ["no", "fp16", "bf16", "fp8"] , _convert_mixed_precision , ) if use_dynamo and mixed_precision == "no": print( "Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts." ) return SageMakerConfig( image_uri=_UpperCAmelCase , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=_UpperCAmelCase , use_cpu=_UpperCAmelCase , dynamo_config=_UpperCAmelCase , eca_instance_type=_UpperCAmelCase , profile=_UpperCAmelCase , region=_UpperCAmelCase , iam_role_name=_UpperCAmelCase , mixed_precision=_UpperCAmelCase , num_machines=_UpperCAmelCase , sagemaker_inputs_file=_UpperCAmelCase , sagemaker_metrics_file=_UpperCAmelCase , )
671
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # and perform gradient accumulation # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## lowerCAmelCase : str = 16 lowerCAmelCase : List[Any] = 32 def A_ ( _UpperCAmelCase , _UpperCAmelCase = 16 ): SCREAMING_SNAKE_CASE_: List[Any] = AutoTokenizer.from_pretrained("bert-base-cased" ) SCREAMING_SNAKE_CASE_: Tuple = load_dataset("glue" , "mrpc" ) def tokenize_function(_UpperCAmelCase ): # max_length=None => use the model max length (it's actually the default) SCREAMING_SNAKE_CASE_: List[Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): SCREAMING_SNAKE_CASE_: str = datasets.map( _UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library SCREAMING_SNAKE_CASE_: Optional[Any] = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(_UpperCAmelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. SCREAMING_SNAKE_CASE_: List[Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": SCREAMING_SNAKE_CASE_: Tuple = 16 elif accelerator.mixed_precision != "no": SCREAMING_SNAKE_CASE_: int = 8 else: SCREAMING_SNAKE_CASE_: Any = None return tokenizer.pad( _UpperCAmelCase , padding="longest" , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors="pt" , ) # Instantiate dataloaders. SCREAMING_SNAKE_CASE_: Optional[Any] = DataLoader( tokenized_datasets["train"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Tuple = DataLoader( tokenized_datasets["validation"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders lowerCAmelCase : Optional[int] = mocked_dataloaders # noqa: F811 def A_ ( _UpperCAmelCase , _UpperCAmelCase ): # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS" , _UpperCAmelCase ) == "1": SCREAMING_SNAKE_CASE_: Tuple = 2 # New Code # SCREAMING_SNAKE_CASE_: List[str] = int(args.gradient_accumulation_steps ) # Initialize accelerator SCREAMING_SNAKE_CASE_: int = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_UpperCAmelCase ) if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1: raise NotImplementedError( "Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs SCREAMING_SNAKE_CASE_: Tuple = config["lr"] SCREAMING_SNAKE_CASE_: List[str] = int(config["num_epochs"] ) SCREAMING_SNAKE_CASE_: List[str] = int(config["seed"] ) SCREAMING_SNAKE_CASE_: Optional[int] = int(config["batch_size"] ) SCREAMING_SNAKE_CASE_: str = evaluate.load("glue" , "mrpc" ) set_seed(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) SCREAMING_SNAKE_CASE_: Union[str, Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_UpperCAmelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). SCREAMING_SNAKE_CASE_: List[Any] = model.to(accelerator.device ) # Instantiate optimizer SCREAMING_SNAKE_CASE_: Union[str, Any] = AdamW(params=model.parameters() , lr=_UpperCAmelCase ) # Instantiate scheduler SCREAMING_SNAKE_CASE_: str = get_linear_schedule_with_warmup( optimizer=_UpperCAmelCase , num_warmup_steps=1_00 , num_training_steps=(len(_UpperCAmelCase ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = accelerator.prepare( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # Now we train the model for epoch in range(_UpperCAmelCase ): model.train() for step, batch in enumerate(_UpperCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(_UpperCAmelCase ): SCREAMING_SNAKE_CASE_: List[Any] = model(**_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: List[Any] = output.loss accelerator.backward(_UpperCAmelCase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_UpperCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): SCREAMING_SNAKE_CASE_: Optional[Any] = model(**_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: List[Any] = outputs.logits.argmax(dim=-1 ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=_UpperCAmelCase , references=_UpperCAmelCase , ) SCREAMING_SNAKE_CASE_: List[str] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"epoch {epoch}:" , _UpperCAmelCase ) def A_ ( ): SCREAMING_SNAKE_CASE_: str = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) # New Code # parser.add_argument( "--gradient_accumulation_steps" , type=_UpperCAmelCase , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) SCREAMING_SNAKE_CASE_: List[Any] = parser.parse_args() SCREAMING_SNAKE_CASE_: Tuple = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(_UpperCAmelCase , _UpperCAmelCase ) if __name__ == "__main__": main()
671
1
import collections import os import re from pathlib import Path lowerCAmelCase : Union[str, Any] = """src/transformers""" # Matches is_xxx_available() lowerCAmelCase : Optional[int] = re.compile(R"""is\_([a-z_]*)_available()""") # Catches a one-line _import_struct = {xxx} lowerCAmelCase : Any = re.compile(R"""^_import_structure\s+=\s+\{([^\}]+)\}""") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] lowerCAmelCase : int = re.compile(R"""\s+\"\S*\":\s+\[([^\]]*)\]""") # Catches a line if not is_foo_available lowerCAmelCase : Dict = re.compile(R"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""") # Catches a line _import_struct["bla"].append("foo") lowerCAmelCase : int = re.compile(R"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] lowerCAmelCase : List[str] = re.compile(R"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""") # Catches a line with an object between quotes and a comma: "MyModel", lowerCAmelCase : Dict = re.compile(R"""^\s+\"([^\"]+)\",""") # Catches a line with objects between brackets only: ["foo", "bar"], lowerCAmelCase : List[str] = re.compile(R"""^\s+\[([^\]]+)\]""") # Catches a line with from foo import bar, bla, boo lowerCAmelCase : Optional[int] = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""") # Catches a line with try: lowerCAmelCase : Dict = re.compile(R"""^\s*try:""") # Catches a line with else: lowerCAmelCase : List[Any] = re.compile(R"""^\s*else:""") def A_ ( _UpperCAmelCase ): if _re_test_backend.search(_UpperCAmelCase ) is None: return None SCREAMING_SNAKE_CASE_: str = [b[0] for b in _re_backend.findall(_UpperCAmelCase )] backends.sort() return "_and_".join(_UpperCAmelCase ) def A_ ( _UpperCAmelCase ): with open(_UpperCAmelCase , "r" , encoding="utf-8" , newline="\n" ) as f: SCREAMING_SNAKE_CASE_: List[str] = f.readlines() SCREAMING_SNAKE_CASE_: Any = 0 while line_index < len(_UpperCAmelCase ) and not lines[line_index].startswith("_import_structure = {" ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(_UpperCAmelCase ): return None # First grab the objects without a specific backend in _import_structure SCREAMING_SNAKE_CASE_: Tuple = [] while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None: SCREAMING_SNAKE_CASE_: List[str] = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(_UpperCAmelCase ): SCREAMING_SNAKE_CASE_: List[str] = _re_one_line_import_struct.search(_UpperCAmelCase ).groups()[0] SCREAMING_SNAKE_CASE_: List[Any] = re.findall(R"\[([^\]]+)\]" , _UpperCAmelCase ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(", " )] ) line_index += 1 continue SCREAMING_SNAKE_CASE_: Dict = _re_import_struct_key_value.search(_UpperCAmelCase ) if single_line_import_search is not None: SCREAMING_SNAKE_CASE_: Any = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(_UpperCAmelCase ) > 0] objects.extend(_UpperCAmelCase ) elif line.startswith(" " * 8 + "\"" ): objects.append(line[9:-3] ) line_index += 1 SCREAMING_SNAKE_CASE_: str = {"none": objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith("if TYPE_CHECKING" ): # If the line is an if not is_backend_available, we grab all objects associated. SCREAMING_SNAKE_CASE_: Union[str, Any] = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: SCREAMING_SNAKE_CASE_: str = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 SCREAMING_SNAKE_CASE_: int = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ): SCREAMING_SNAKE_CASE_: Tuple = lines[line_index] if _re_import_struct_add_one.search(_UpperCAmelCase ) is not None: objects.append(_re_import_struct_add_one.search(_UpperCAmelCase ).groups()[0] ) elif _re_import_struct_add_many.search(_UpperCAmelCase ) is not None: SCREAMING_SNAKE_CASE_: Optional[int] = _re_import_struct_add_many.search(_UpperCAmelCase ).groups()[0].split(", " ) SCREAMING_SNAKE_CASE_: str = [obj[1:-1] for obj in imports if len(_UpperCAmelCase ) > 0] objects.extend(_UpperCAmelCase ) elif _re_between_brackets.search(_UpperCAmelCase ) is not None: SCREAMING_SNAKE_CASE_: Optional[int] = _re_between_brackets.search(_UpperCAmelCase ).groups()[0].split(", " ) SCREAMING_SNAKE_CASE_: Optional[Any] = [obj[1:-1] for obj in imports if len(_UpperCAmelCase ) > 0] objects.extend(_UpperCAmelCase ) elif _re_quote_object.search(_UpperCAmelCase ) is not None: objects.append(_re_quote_object.search(_UpperCAmelCase ).groups()[0] ) elif line.startswith(" " * 8 + "\"" ): objects.append(line[9:-3] ) elif line.startswith(" " * 12 + "\"" ): objects.append(line[13:-3] ) line_index += 1 SCREAMING_SNAKE_CASE_: Dict = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend SCREAMING_SNAKE_CASE_: List[Any] = [] while ( line_index < len(_UpperCAmelCase ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith("else" ) ): SCREAMING_SNAKE_CASE_: Optional[int] = lines[line_index] SCREAMING_SNAKE_CASE_: Optional[Any] = _re_import.search(_UpperCAmelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(", " ) ) elif line.startswith(" " * 8 ): objects.append(line[8:-2] ) line_index += 1 SCREAMING_SNAKE_CASE_: Any = {"none": objects} # Let's continue with backend-specific objects while line_index < len(_UpperCAmelCase ): # If the line is an if is_backend_available, we grab all objects associated. SCREAMING_SNAKE_CASE_: int = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: SCREAMING_SNAKE_CASE_: Optional[Any] = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 SCREAMING_SNAKE_CASE_: int = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ): SCREAMING_SNAKE_CASE_: str = lines[line_index] SCREAMING_SNAKE_CASE_: List[Any] = _re_import.search(_UpperCAmelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(", " ) ) elif line.startswith(" " * 12 ): objects.append(line[12:-2] ) line_index += 1 SCREAMING_SNAKE_CASE_: Optional[int] = objects else: line_index += 1 return import_dict_objects, type_hint_objects def A_ ( _UpperCAmelCase , _UpperCAmelCase ): def find_duplicates(_UpperCAmelCase ): return [k for k, v in collections.Counter(_UpperCAmelCase ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] SCREAMING_SNAKE_CASE_: Dict = [] for key in import_dict_objects.keys(): SCREAMING_SNAKE_CASE_: List[str] = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(f"Duplicate _import_structure definitions for: {duplicate_imports}" ) SCREAMING_SNAKE_CASE_: Optional[Any] = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(f"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): SCREAMING_SNAKE_CASE_: Optional[Any] = "base imports" if key == "none" else f"{key} backend" errors.append(f"Differences for {name}:" ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(f" {a} in TYPE_HINT but not in _import_structure." ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(f" {a} in _import_structure but not in TYPE_HINT." ) return errors def A_ ( ): SCREAMING_SNAKE_CASE_: List[Any] = [] for root, _, files in os.walk(_UpperCAmelCase ): if "__init__.py" in files: SCREAMING_SNAKE_CASE_: List[str] = os.path.join(_UpperCAmelCase , "__init__.py" ) SCREAMING_SNAKE_CASE_: str = parse_init(_UpperCAmelCase ) if objects is not None: SCREAMING_SNAKE_CASE_: Any = analyze_results(*_UpperCAmelCase ) if len(_UpperCAmelCase ) > 0: SCREAMING_SNAKE_CASE_: List[str] = f"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}" failures.append("\n".join(_UpperCAmelCase ) ) if len(_UpperCAmelCase ) > 0: raise ValueError("\n\n".join(_UpperCAmelCase ) ) def A_ ( ): SCREAMING_SNAKE_CASE_: Dict = [] for path, directories, files in os.walk(_UpperCAmelCase ): for folder in directories: # Ignore private modules if folder.startswith("_" ): directories.remove(_UpperCAmelCase ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(_UpperCAmelCase ) / folder).glob("*.py" ) ) ) == 0: continue SCREAMING_SNAKE_CASE_: Tuple = str((Path(_UpperCAmelCase ) / folder).relative_to(_UpperCAmelCase ) ) SCREAMING_SNAKE_CASE_: str = short_path.replace(os.path.sep , "." ) submodules.append(_UpperCAmelCase ) for fname in files: if fname == "__init__.py": continue SCREAMING_SNAKE_CASE_: Optional[int] = str((Path(_UpperCAmelCase ) / fname).relative_to(_UpperCAmelCase ) ) SCREAMING_SNAKE_CASE_: Optional[int] = short_path.replace(".py" , "" ).replace(os.path.sep , "." ) if len(submodule.split("." ) ) == 1: submodules.append(_UpperCAmelCase ) return submodules lowerCAmelCase : str = [ """convert_pytorch_checkpoint_to_tf2""", """modeling_flax_pytorch_utils""", """models.esm.openfold_utils""", ] def A_ ( ): # This is to make sure the transformers module imported is the one in the repo. from transformers.utils import direct_transformers_import SCREAMING_SNAKE_CASE_: Optional[Any] = direct_transformers_import(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Optional[int] = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(_UpperCAmelCase , "__init__.py" ) , "r" ) as f: SCREAMING_SNAKE_CASE_: Union[str, Any] = f.read() import_structure_keys.update(set(re.findall(R"import_structure\[\"([^\"]*)\"\]" , _UpperCAmelCase ) ) ) SCREAMING_SNAKE_CASE_: Optional[Any] = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(_UpperCAmelCase ) > 0: SCREAMING_SNAKE_CASE_: Tuple = "\n".join(f"- {module}" for module in module_not_registered ) raise ValueError( "The following submodules are not properly registed in the main init of Transformers:\n" f"{list_of_modules}\n" "Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." ) if __name__ == "__main__": check_all_inits() check_submodules()
671
from math import asin, atan, cos, radians, sin, sqrt, tan lowerCAmelCase : Union[str, Any] = 637_8137.0 lowerCAmelCase : int = 635_6752.31_4245 lowerCAmelCase : Union[str, Any] = 6378137 def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: List[Any] = (AXIS_A - AXIS_B) / AXIS_A SCREAMING_SNAKE_CASE_: str = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) ) SCREAMING_SNAKE_CASE_: Optional[int] = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) ) SCREAMING_SNAKE_CASE_: Any = radians(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Dict = radians(_UpperCAmelCase ) # Equation SCREAMING_SNAKE_CASE_: str = sin((phi_a - phi_a) / 2 ) SCREAMING_SNAKE_CASE_: List[Any] = sin((lambda_a - lambda_a) / 2 ) # Square both values sin_sq_phi *= sin_sq_phi sin_sq_lambda *= sin_sq_lambda SCREAMING_SNAKE_CASE_: Tuple = sqrt(sin_sq_phi + (cos(_UpperCAmelCase ) * cos(_UpperCAmelCase ) * sin_sq_lambda) ) return 2 * RADIUS * asin(_UpperCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
671
1
import os import tempfile import unittest import numpy as np from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline @require_flax class __lowercase ( unittest.TestCase ): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): with tempfile.TemporaryDirectory() as tmpdirname: # pipeline has Flax weights SCREAMING_SNAKE_CASE_: Union[str, Any] = FlaxDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=lowerCAmelCase__ , cache_dir=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = [t[-1] for t in os.walk(os.path.join(lowerCAmelCase__ , os.listdir(lowerCAmelCase__)[0] , "snapshots"))] SCREAMING_SNAKE_CASE_: Optional[int] = [item for sublist in all_root_files for item in sublist] # None of the downloaded files should be a PyTorch file even if we have some here: # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin assert not any(f.endswith(".bin") for f in files) @slow @require_flax class __lowercase ( unittest.TestCase ): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = FlaxStableDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-pipe" , safety_checker=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) SCREAMING_SNAKE_CASE_: List[str] = jax.random.PRNGKey(0) SCREAMING_SNAKE_CASE_: str = 4 SCREAMING_SNAKE_CASE_: int = jax.device_count() SCREAMING_SNAKE_CASE_: List[str] = num_samples * [prompt] SCREAMING_SNAKE_CASE_: Union[str, Any] = pipeline.prepare_inputs(lowerCAmelCase__) # shard inputs and rng SCREAMING_SNAKE_CASE_: int = replicate(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[Any] = jax.random.split(lowerCAmelCase__ , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[int] = shard(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[int] = pipeline(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , jit=lowerCAmelCase__).images assert images.shape == (num_samples, 1, 64, 64, 3) if jax.device_count() == 8: assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 4.151_4745) < 1E-3 assert np.abs(np.abs(lowerCAmelCase__ , dtype=np.floataa).sum() - 4_9947.875) < 5E-1 SCREAMING_SNAKE_CASE_: str = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:]))) assert len(lowerCAmelCase__) == num_samples def _SCREAMING_SNAKE_CASE ( self : Dict): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="flax" , safety_checker=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Dict = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) SCREAMING_SNAKE_CASE_: Union[str, Any] = jax.random.PRNGKey(0) SCREAMING_SNAKE_CASE_: Any = 50 SCREAMING_SNAKE_CASE_: str = jax.device_count() SCREAMING_SNAKE_CASE_: List[Any] = num_samples * [prompt] SCREAMING_SNAKE_CASE_: Any = pipeline.prepare_inputs(lowerCAmelCase__) # shard inputs and rng SCREAMING_SNAKE_CASE_: List[Any] = replicate(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[Any] = jax.random.split(lowerCAmelCase__ , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[int] = shard(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: int = pipeline(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , jit=lowerCAmelCase__).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.0565_2401)) < 1E-3 assert np.abs((np.abs(lowerCAmelCase__ , dtype=np.floataa).sum() - 238_3808.2)) < 5E-1 def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) SCREAMING_SNAKE_CASE_: List[str] = jax.random.PRNGKey(0) SCREAMING_SNAKE_CASE_: List[Any] = 50 SCREAMING_SNAKE_CASE_: Union[str, Any] = jax.device_count() SCREAMING_SNAKE_CASE_: str = num_samples * [prompt] SCREAMING_SNAKE_CASE_: str = pipeline.prepare_inputs(lowerCAmelCase__) # shard inputs and rng SCREAMING_SNAKE_CASE_: Dict = replicate(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[int] = jax.random.split(lowerCAmelCase__ , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Any = shard(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Any = pipeline(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , jit=lowerCAmelCase__).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.0400_3906)) < 1E-3 assert np.abs((np.abs(lowerCAmelCase__ , dtype=np.floataa).sum() - 237_3516.75)) < 5E-1 def _SCREAMING_SNAKE_CASE ( self : str): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa) SCREAMING_SNAKE_CASE_: List[str] = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) SCREAMING_SNAKE_CASE_: Union[str, Any] = jax.random.PRNGKey(0) SCREAMING_SNAKE_CASE_: Any = 50 SCREAMING_SNAKE_CASE_: List[str] = jax.device_count() SCREAMING_SNAKE_CASE_: Dict = num_samples * [prompt] SCREAMING_SNAKE_CASE_: Union[str, Any] = pipeline.prepare_inputs(lowerCAmelCase__) # shard inputs and rng SCREAMING_SNAKE_CASE_: int = replicate(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = jax.random.split(lowerCAmelCase__ , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = shard(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = pipeline(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , jit=lowerCAmelCase__).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.0400_3906)) < 1E-3 assert np.abs((np.abs(lowerCAmelCase__ , dtype=np.floataa).sum() - 237_3516.75)) < 5E-1 def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): SCREAMING_SNAKE_CASE_: Optional[int] = FlaxDDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , set_alpha_to_one=lowerCAmelCase__ , steps_offset=1 , ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , ) SCREAMING_SNAKE_CASE_: List[str] = scheduler.create_state() SCREAMING_SNAKE_CASE_: str = scheduler_state SCREAMING_SNAKE_CASE_: int = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) SCREAMING_SNAKE_CASE_: str = jax.random.PRNGKey(0) SCREAMING_SNAKE_CASE_: Any = 50 SCREAMING_SNAKE_CASE_: List[str] = jax.device_count() SCREAMING_SNAKE_CASE_: List[str] = num_samples * [prompt] SCREAMING_SNAKE_CASE_: Optional[int] = pipeline.prepare_inputs(lowerCAmelCase__) # shard inputs and rng SCREAMING_SNAKE_CASE_: List[Any] = replicate(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: int = jax.random.split(lowerCAmelCase__ , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[int] = shard(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = pipeline(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , jit=lowerCAmelCase__).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.0_4504_3945)) < 1E-3 assert np.abs((np.abs(lowerCAmelCase__ , dtype=np.floataa).sum() - 234_7693.5)) < 5E-1 def _SCREAMING_SNAKE_CASE ( self : List[Any]): SCREAMING_SNAKE_CASE_: List[str] = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) SCREAMING_SNAKE_CASE_: List[str] = jax.device_count() SCREAMING_SNAKE_CASE_: int = num_samples * [prompt] SCREAMING_SNAKE_CASE_: str = jax.random.split(jax.random.PRNGKey(0) , lowerCAmelCase__) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowerCAmelCase__ , ) SCREAMING_SNAKE_CASE_: List[Any] = replicate(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[int] = pipeline.prepare_inputs(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[str] = shard(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Dict = pipeline(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , jit=lowerCAmelCase__).images assert images.shape == (num_samples, 1, 512, 512, 3) SCREAMING_SNAKE_CASE_: List[Any] = images[2, 0, 256, 10:17, 1] # With memory efficient attention SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="bf16" , dtype=jnp.bfloataa , safety_checker=lowerCAmelCase__ , use_memory_efficient_attention=lowerCAmelCase__ , ) SCREAMING_SNAKE_CASE_: Optional[int] = replicate(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[str] = pipeline.prepare_inputs(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[str] = shard(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: int = pipeline(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , jit=lowerCAmelCase__).images assert images_eff.shape == (num_samples, 1, 512, 512, 3) SCREAMING_SNAKE_CASE_: Tuple = images[2, 0, 256, 10:17, 1] # I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum` # over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now. assert abs(slice_eff - slice).max() < 1E-2
671
import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): # Initialise PyTorch model SCREAMING_SNAKE_CASE_: List[Any] = BertConfig.from_json_file(_UpperCAmelCase ) print(f"Building PyTorch model from configuration: {config}" ) SCREAMING_SNAKE_CASE_: Tuple = BertForPreTraining(_UpperCAmelCase ) # Load weights from tf checkpoint load_tf_weights_in_bert(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # Save pytorch-model print(f"Save PyTorch model to {pytorch_dump_path}" ) torch.save(model.state_dict() , _UpperCAmelCase ) if __name__ == "__main__": lowerCAmelCase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--bert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained BERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) lowerCAmelCase : Optional[Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
671
1
import argparse from tax import checkpoints from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: List[Any] = AutoConfig.from_pretrained(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: List[Any] = FlaxAutoModelForSeqaSeqLM.from_config(config=_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: List[str] = checkpoints.load_tax_checkpoint(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Dict = "wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"] if config.model_type == "t5": SCREAMING_SNAKE_CASE_: Tuple = "SelfAttention" if config.model_type == "longt5" and config.encoder_attention_type == "local": SCREAMING_SNAKE_CASE_: Optional[Any] = "LocalSelfAttention" elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global": SCREAMING_SNAKE_CASE_: Dict = "TransientGlobalSelfAttention" else: raise ValueError( "Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`" " attribute with a value from ['local', 'transient-global]." ) # Encoder for layer_index in range(config.num_layers ): SCREAMING_SNAKE_CASE_: List[Any] = f"layers_{str(_UpperCAmelCase )}" # Self-Attention SCREAMING_SNAKE_CASE_: Tuple = tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"] SCREAMING_SNAKE_CASE_: Union[str, Any] = tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"] SCREAMING_SNAKE_CASE_: Optional[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"] SCREAMING_SNAKE_CASE_: List[str] = tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"] # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": SCREAMING_SNAKE_CASE_: Union[str, Any] = tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"] # Layer Normalization SCREAMING_SNAKE_CASE_: Dict = tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"] if split_mlp_wi: SCREAMING_SNAKE_CASE_: str = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"] SCREAMING_SNAKE_CASE_: Optional[Any] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"] else: SCREAMING_SNAKE_CASE_: Tuple = tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"] SCREAMING_SNAKE_CASE_: str = tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"] # Layer Normalization SCREAMING_SNAKE_CASE_: int = tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"] # Assigning SCREAMING_SNAKE_CASE_: Tuple = flax_model.params["encoder"]["block"][str(_UpperCAmelCase )]["layer"] SCREAMING_SNAKE_CASE_: List[Any] = tax_attention_key SCREAMING_SNAKE_CASE_: Union[str, Any] = tax_attention_out SCREAMING_SNAKE_CASE_: Any = tax_attention_query SCREAMING_SNAKE_CASE_: int = tax_attention_value SCREAMING_SNAKE_CASE_: Optional[Any] = tax_attention_layer_norm # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": SCREAMING_SNAKE_CASE_: Union[str, Any] = tax_global_layer_norm if split_mlp_wi: SCREAMING_SNAKE_CASE_: Optional[int] = tax_mlp_wi_a SCREAMING_SNAKE_CASE_: Tuple = tax_mlp_wi_a else: SCREAMING_SNAKE_CASE_: Optional[Any] = tax_mlp_wi SCREAMING_SNAKE_CASE_: Tuple = tax_mlp_wo SCREAMING_SNAKE_CASE_: Optional[Any] = tax_mlp_layer_norm SCREAMING_SNAKE_CASE_: Optional[Any] = flax_model_encoder_layer_block # Only for layer 0: SCREAMING_SNAKE_CASE_: Dict = tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T SCREAMING_SNAKE_CASE_: Union[str, Any] = tax_encoder_rel_embedding # Side/global relative position_bias + layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": SCREAMING_SNAKE_CASE_: List[str] = tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T SCREAMING_SNAKE_CASE_: Union[str, Any] = tax_encoder_global_rel_embedding # Assigning SCREAMING_SNAKE_CASE_: Optional[Any] = tax_model["target"]["encoder"]["encoder_norm"]["scale"] SCREAMING_SNAKE_CASE_: Tuple = tax_encoder_norm # Decoder for layer_index in range(config.num_layers ): SCREAMING_SNAKE_CASE_: Optional[int] = f"layers_{str(_UpperCAmelCase )}" # Self-Attention SCREAMING_SNAKE_CASE_: List[str] = tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"] SCREAMING_SNAKE_CASE_: Dict = tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"] SCREAMING_SNAKE_CASE_: List[str] = tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"] SCREAMING_SNAKE_CASE_: Union[str, Any] = tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"] # Layer Normalization SCREAMING_SNAKE_CASE_: Optional[int] = tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][ "scale" ] # Encoder-Decoder-Attention SCREAMING_SNAKE_CASE_: Any = tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"] SCREAMING_SNAKE_CASE_: Optional[int] = tax_enc_dec_attention_module["key"]["kernel"] SCREAMING_SNAKE_CASE_: Any = tax_enc_dec_attention_module["out"]["kernel"] SCREAMING_SNAKE_CASE_: Dict = tax_enc_dec_attention_module["query"]["kernel"] SCREAMING_SNAKE_CASE_: int = tax_enc_dec_attention_module["value"]["kernel"] # Layer Normalization SCREAMING_SNAKE_CASE_: str = tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"] # MLP if split_mlp_wi: SCREAMING_SNAKE_CASE_: int = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"] SCREAMING_SNAKE_CASE_: List[Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"] else: SCREAMING_SNAKE_CASE_: List[Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"] SCREAMING_SNAKE_CASE_: Tuple = tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"] # Layer Normalization SCREAMING_SNAKE_CASE_: Dict = tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"] # Assigning SCREAMING_SNAKE_CASE_: Tuple = flax_model.params["decoder"]["block"][str(_UpperCAmelCase )]["layer"] SCREAMING_SNAKE_CASE_: Optional[int] = tax_attention_key SCREAMING_SNAKE_CASE_: int = tax_attention_out SCREAMING_SNAKE_CASE_: Optional[int] = tax_attention_query SCREAMING_SNAKE_CASE_: List[str] = tax_attention_value SCREAMING_SNAKE_CASE_: Any = tax_pre_attention_layer_norm SCREAMING_SNAKE_CASE_: int = tax_enc_dec_attention_key SCREAMING_SNAKE_CASE_: Dict = tax_enc_dec_attention_out SCREAMING_SNAKE_CASE_: Tuple = tax_enc_dec_attention_query SCREAMING_SNAKE_CASE_: Union[str, Any] = tax_enc_dec_attention_value SCREAMING_SNAKE_CASE_: Union[str, Any] = tax_cross_layer_norm if split_mlp_wi: SCREAMING_SNAKE_CASE_: Any = tax_mlp_wi_a SCREAMING_SNAKE_CASE_: Optional[int] = tax_mlp_wi_a else: SCREAMING_SNAKE_CASE_: Any = tax_mlp_wi SCREAMING_SNAKE_CASE_: Union[str, Any] = tax_mlp_wo SCREAMING_SNAKE_CASE_: Optional[Any] = txa_mlp_layer_norm SCREAMING_SNAKE_CASE_: Union[str, Any] = flax_model_decoder_layer_block # Decoder Normalization SCREAMING_SNAKE_CASE_: Any = tax_model["target"]["decoder"]["decoder_norm"]["scale"] SCREAMING_SNAKE_CASE_: str = txa_decoder_norm # Only for layer 0: SCREAMING_SNAKE_CASE_: Tuple = tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T SCREAMING_SNAKE_CASE_: List[str] = tax_decoder_rel_embedding # Token Embeddings SCREAMING_SNAKE_CASE_: str = tax_model["target"]["token_embedder"]["embedding"] SCREAMING_SNAKE_CASE_: Tuple = txa_token_embeddings # LM Head (only in v1.1 and LongT5 checkpoints) if "logits_dense" in tax_model["target"]["decoder"]: SCREAMING_SNAKE_CASE_: Union[str, Any] = tax_model["target"]["decoder"]["logits_dense"]["kernel"] flax_model.save_pretrained(_UpperCAmelCase ) print("T5X Model was sucessfully converted!" ) if __name__ == "__main__": lowerCAmelCase : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( """--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path the T5X checkpoint.""" ) parser.add_argument("""--config_name""", default=None, type=str, required=True, help="""Config name of LongT5/T5 model.""") parser.add_argument( """--flax_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output FLAX model.""" ) lowerCAmelCase : Optional[int] = parser.parse_args() convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
671
import math def A_ ( _UpperCAmelCase ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def A_ ( _UpperCAmelCase = 0.1 ): SCREAMING_SNAKE_CASE_: Union[str, Any] = 3 SCREAMING_SNAKE_CASE_: Optional[int] = 3 while primes / (2 * j - 1) >= ratio: for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ): primes += is_prime(_UpperCAmelCase ) j += 2 return j if __name__ == "__main__": import doctest doctest.testmod()
671
1
from __future__ import annotations from math import ceil, floor, sqrt def A_ ( _UpperCAmelCase = 2_00_00_00 ): SCREAMING_SNAKE_CASE_: list[int] = [0] SCREAMING_SNAKE_CASE_: int for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ): triangle_numbers.append(triangle_numbers[-1] + idx ) # we want this to be as close as possible to target SCREAMING_SNAKE_CASE_: int = 0 # the area corresponding to the grid that gives the product closest to target SCREAMING_SNAKE_CASE_: int = 0 # an estimate of b, using the quadratic formula SCREAMING_SNAKE_CASE_: float # the largest integer less than b_estimate SCREAMING_SNAKE_CASE_: int # the largest integer less than b_estimate SCREAMING_SNAKE_CASE_: int # the triangle number corresponding to b_floor SCREAMING_SNAKE_CASE_: int # the triangle number corresponding to b_ceil SCREAMING_SNAKE_CASE_: int for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ): SCREAMING_SNAKE_CASE_: List[Any] = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2 SCREAMING_SNAKE_CASE_: Any = floor(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: List[str] = ceil(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Any = triangle_numbers[b_floor] SCREAMING_SNAKE_CASE_: List[Any] = triangle_numbers[b_ceil] if abs(target - triangle_b_first_guess * triangle_a ) < abs( target - best_product ): SCREAMING_SNAKE_CASE_: int = triangle_b_first_guess * triangle_a SCREAMING_SNAKE_CASE_: int = idx_a * b_floor if abs(target - triangle_b_second_guess * triangle_a ) < abs( target - best_product ): SCREAMING_SNAKE_CASE_: Optional[Any] = triangle_b_second_guess * triangle_a SCREAMING_SNAKE_CASE_: Tuple = idx_a * b_ceil return area if __name__ == "__main__": print(f'''{solution() = }''')
671
import re def A_ ( _UpperCAmelCase ): return [char.split() for char in re.split(R"[^ a-z A-Z 0-9 \s]" , str_ )] def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: int = split_input(str_ ) return "".join( ["".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] ) def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): try: SCREAMING_SNAKE_CASE_: List[Any] = split_input(_UpperCAmelCase ) if upper: SCREAMING_SNAKE_CASE_: List[str] = "".join( [ separator.join([char.upper() for char in sub_str] ) for sub_str in string_split ] ) else: SCREAMING_SNAKE_CASE_: Optional[int] = "".join( [ separator.join([char.lower() for char in sub_str] ) for sub_str in string_split ] ) return res_str except IndexError: return "not valid string" def A_ ( _UpperCAmelCase ): return to_simple_case(_UpperCAmelCase ) def A_ ( _UpperCAmelCase ): try: SCREAMING_SNAKE_CASE_: Optional[int] = to_simple_case(_UpperCAmelCase ) return res_str[0].lower() + res_str[1:] except IndexError: return "not valid string" def A_ ( _UpperCAmelCase , _UpperCAmelCase ): return to_complex_case(_UpperCAmelCase , _UpperCAmelCase , "_" ) def A_ ( _UpperCAmelCase , _UpperCAmelCase ): return to_complex_case(_UpperCAmelCase , _UpperCAmelCase , "-" ) if __name__ == "__main__": __import__("""doctest""").testmod()
671
1
from __future__ import annotations lowerCAmelCase : Any = tuple[int, int, int] lowerCAmelCase : str = tuple[str, str, str] # used alphabet -------------------------- # from string.ascii_uppercase lowerCAmelCase : Union[str, Any] = """ABCDEFGHIJKLMNOPQRSTUVWXYZ""" # -------------------------- default selection -------------------------- # rotors -------------------------- lowerCAmelCase : Tuple = """EGZWVONAHDCLFQMSIPJBYUKXTR""" lowerCAmelCase : Dict = """FOBHMDKEXQNRAULPGSJVTYICZW""" lowerCAmelCase : Tuple = """ZJXESIUQLHAVRMDOYGTNFWPBKC""" # reflector -------------------------- lowerCAmelCase : List[str] = { """A""": """N""", """N""": """A""", """B""": """O""", """O""": """B""", """C""": """P""", """P""": """C""", """D""": """Q""", """Q""": """D""", """E""": """R""", """R""": """E""", """F""": """S""", """S""": """F""", """G""": """T""", """T""": """G""", """H""": """U""", """U""": """H""", """I""": """V""", """V""": """I""", """J""": """W""", """W""": """J""", """K""": """X""", """X""": """K""", """L""": """Y""", """Y""": """L""", """M""": """Z""", """Z""": """M""", } # -------------------------- extra rotors -------------------------- lowerCAmelCase : str = """RMDJXFUWGISLHVTCQNKYPBEZOA""" lowerCAmelCase : List[Any] = """SGLCPQWZHKXAREONTFBVIYJUDM""" lowerCAmelCase : Dict = """HVSICLTYKQUBXDWAJZOMFGPREN""" lowerCAmelCase : Any = """RZWQHFMVDBKICJLNTUXAGYPSOE""" lowerCAmelCase : List[Any] = """LFKIJODBEGAMQPXVUHYSTCZRWN""" lowerCAmelCase : Tuple = """KOAEGVDHXPQZMLFTYWJNBRCIUS""" def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): # Checks if there are 3 unique rotors if (unique_rotsel := len(set(_UpperCAmelCase ) )) < 3: SCREAMING_SNAKE_CASE_: List[str] = f"Please use 3 unique rotors (not {unique_rotsel})" raise Exception(_UpperCAmelCase ) # Checks if rotor positions are valid SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = rotpos if not 0 < rotorposa <= len(_UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Tuple = f"First rotor position is not within range of 1..26 ({rotorposa}" raise ValueError(_UpperCAmelCase ) if not 0 < rotorposa <= len(_UpperCAmelCase ): SCREAMING_SNAKE_CASE_: List[str] = f"Second rotor position is not within range of 1..26 ({rotorposa})" raise ValueError(_UpperCAmelCase ) if not 0 < rotorposa <= len(_UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Dict = f"Third rotor position is not within range of 1..26 ({rotorposa})" raise ValueError(_UpperCAmelCase ) # Validates string and returns dict SCREAMING_SNAKE_CASE_: List[str] = _plugboard(_UpperCAmelCase ) return rotpos, rotsel, pbdict def A_ ( _UpperCAmelCase ): # tests the input string if it # a) is type string # b) has even length (so pairs can be made) if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Tuple = f"Plugboard setting isn't type string ({type(_UpperCAmelCase )})" raise TypeError(_UpperCAmelCase ) elif len(_UpperCAmelCase ) % 2 != 0: SCREAMING_SNAKE_CASE_: Optional[Any] = f"Odd number of symbols ({len(_UpperCAmelCase )})" raise Exception(_UpperCAmelCase ) elif pbstring == "": return {} pbstring.replace(" " , "" ) # Checks if all characters are unique SCREAMING_SNAKE_CASE_: Optional[Any] = set() for i in pbstring: if i not in abc: SCREAMING_SNAKE_CASE_: int = f"'{i}' not in list of symbols" raise Exception(_UpperCAmelCase ) elif i in tmppbl: SCREAMING_SNAKE_CASE_: Optional[Any] = f"Duplicate symbol ({i})" raise Exception(_UpperCAmelCase ) else: tmppbl.add(_UpperCAmelCase ) del tmppbl # Created the dictionary SCREAMING_SNAKE_CASE_: Any = {} for j in range(0 , len(_UpperCAmelCase ) - 1 , 2 ): SCREAMING_SNAKE_CASE_: Tuple = pbstring[j + 1] SCREAMING_SNAKE_CASE_: Any = pbstring[j] return pb def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = (rotora, rotora, rotora) , _UpperCAmelCase = "" , ): SCREAMING_SNAKE_CASE_: List[Any] = text.upper() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = _validator( _UpperCAmelCase , _UpperCAmelCase , plugb.upper() ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = rotor_position SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = rotor_selection rotorposa -= 1 rotorposa -= 1 rotorposa -= 1 SCREAMING_SNAKE_CASE_: Union[str, Any] = [] # encryption/decryption process -------------------------- for symbol in text: if symbol in abc: # 1st plugboard -------------------------- if symbol in plugboard: SCREAMING_SNAKE_CASE_: Tuple = plugboard[symbol] # rotor ra -------------------------- SCREAMING_SNAKE_CASE_: Optional[Any] = abc.index(_UpperCAmelCase ) + rotorposa SCREAMING_SNAKE_CASE_: Optional[Any] = rotora[index % len(_UpperCAmelCase )] # rotor rb -------------------------- SCREAMING_SNAKE_CASE_: Union[str, Any] = abc.index(_UpperCAmelCase ) + rotorposa SCREAMING_SNAKE_CASE_: List[str] = rotora[index % len(_UpperCAmelCase )] # rotor rc -------------------------- SCREAMING_SNAKE_CASE_: Tuple = abc.index(_UpperCAmelCase ) + rotorposa SCREAMING_SNAKE_CASE_: Union[str, Any] = rotora[index % len(_UpperCAmelCase )] # reflector -------------------------- # this is the reason you don't need another machine to decipher SCREAMING_SNAKE_CASE_: str = reflector[symbol] # 2nd rotors SCREAMING_SNAKE_CASE_: int = abc[rotora.index(_UpperCAmelCase ) - rotorposa] SCREAMING_SNAKE_CASE_: Union[str, Any] = abc[rotora.index(_UpperCAmelCase ) - rotorposa] SCREAMING_SNAKE_CASE_: Optional[int] = abc[rotora.index(_UpperCAmelCase ) - rotorposa] # 2nd plugboard if symbol in plugboard: SCREAMING_SNAKE_CASE_: List[Any] = plugboard[symbol] # moves/resets rotor positions rotorposa += 1 if rotorposa >= len(_UpperCAmelCase ): SCREAMING_SNAKE_CASE_: List[Any] = 0 rotorposa += 1 if rotorposa >= len(_UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Any = 0 rotorposa += 1 if rotorposa >= len(_UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Tuple = 0 # else: # pass # Error could be also raised # raise ValueError( # 'Invalid symbol('+repr(symbol)+')') result.append(_UpperCAmelCase ) return "".join(_UpperCAmelCase ) if __name__ == "__main__": lowerCAmelCase : int = """This is my Python script that emulates the Enigma machine from WWII.""" lowerCAmelCase : str = (1, 1, 1) lowerCAmelCase : List[str] = """pictures""" lowerCAmelCase : Optional[Any] = (rotora, rotora, rotora) lowerCAmelCase : List[str] = enigma(message, rotor_pos, rotor_sel, pb) print("""Encrypted message:""", en) print("""Decrypted message:""", enigma(en, rotor_pos, rotor_sel, pb))
671
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : List[Any] = '''upernet''' def __init__( self : Any , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : List[str]=512 , lowerCAmelCase__ : Any=0.02 , lowerCAmelCase__ : str=[1, 2, 3, 6] , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : Dict=0.4 , lowerCAmelCase__ : int=384 , lowerCAmelCase__ : Union[str, Any]=256 , lowerCAmelCase__ : Any=1 , lowerCAmelCase__ : Tuple=False , lowerCAmelCase__ : List[str]=255 , **lowerCAmelCase__ : List[str] , ): super().__init__(**lowerCAmelCase__) if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.") SCREAMING_SNAKE_CASE_: Dict = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"]) elif isinstance(lowerCAmelCase__ , lowerCAmelCase__): SCREAMING_SNAKE_CASE_: str = backbone_config.get("model_type") SCREAMING_SNAKE_CASE_: str = CONFIG_MAPPING[backbone_model_type] SCREAMING_SNAKE_CASE_: Tuple = config_class.from_dict(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: str = backbone_config SCREAMING_SNAKE_CASE_: Optional[Any] = hidden_size SCREAMING_SNAKE_CASE_: Dict = initializer_range SCREAMING_SNAKE_CASE_: Any = pool_scales SCREAMING_SNAKE_CASE_: Optional[Any] = use_auxiliary_head SCREAMING_SNAKE_CASE_: str = auxiliary_loss_weight SCREAMING_SNAKE_CASE_: List[Any] = auxiliary_in_channels SCREAMING_SNAKE_CASE_: Union[str, Any] = auxiliary_channels SCREAMING_SNAKE_CASE_: Dict = auxiliary_num_convs SCREAMING_SNAKE_CASE_: str = auxiliary_concat_input SCREAMING_SNAKE_CASE_: Dict = loss_ignore_index def _SCREAMING_SNAKE_CASE ( self : Tuple): SCREAMING_SNAKE_CASE_: Tuple = copy.deepcopy(self.__dict__) SCREAMING_SNAKE_CASE_: int = self.backbone_config.to_dict() SCREAMING_SNAKE_CASE_: Optional[int] = self.__class__.model_type return output
671
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCAmelCase : Union[str, Any] = { """configuration_graphormer""": ["""GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GraphormerConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Any = [ """GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """GraphormerForGraphClassification""", """GraphormerModel""", """GraphormerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_graphormer import ( GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST, GraphormerForGraphClassification, GraphormerModel, GraphormerPreTrainedModel, ) else: import sys lowerCAmelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
671
import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class __lowercase ( unittest.TestCase ): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : Any): SCREAMING_SNAKE_CASE_: List[str] = torch.nn.Linear(10 , 10) SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.optim.SGD(model.parameters() , 0.1) SCREAMING_SNAKE_CASE_: Any = Accelerator() SCREAMING_SNAKE_CASE_: List[str] = accelerator.prepare(lowerCAmelCase__) try: pickle.loads(pickle.dumps(lowerCAmelCase__)) except Exception as e: self.fail(F"Accelerated optimizer pickling failed with {e}") AcceleratorState._reset_state()
671
1
import logging import os import threading import time try: import warnings except ImportError: lowerCAmelCase : str = None try: import msvcrt except ImportError: lowerCAmelCase : Optional[int] = None try: import fcntl except ImportError: lowerCAmelCase : List[str] = None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: lowerCAmelCase : Union[str, Any] = OSError # Data # ------------------------------------------------ lowerCAmelCase : Union[str, Any] = [ """Timeout""", """BaseFileLock""", """WindowsFileLock""", """UnixFileLock""", """SoftFileLock""", """FileLock""", ] lowerCAmelCase : List[Any] = """3.0.12""" lowerCAmelCase : Union[str, Any] = None def A_ ( ): global _logger SCREAMING_SNAKE_CASE_: Any = _logger or logging.getLogger(__name__ ) return _logger class __lowercase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : Optional[Any] , lowerCAmelCase__ : List[str]): SCREAMING_SNAKE_CASE_: int = lock_file return None def __str__( self : Union[str, Any]): SCREAMING_SNAKE_CASE_: str = F"The file lock '{self.lock_file}' could not be acquired." return temp class __lowercase : """simple docstring""" def __init__( self : Tuple , lowerCAmelCase__ : str): SCREAMING_SNAKE_CASE_: Tuple = lock return None def __enter__( self : Dict): return self.lock def __exit__( self : List[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple): self.lock.release() return None class __lowercase : """simple docstring""" def __init__( self : Optional[int] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[int]=-1 , lowerCAmelCase__ : Optional[Any]=None): SCREAMING_SNAKE_CASE_: Optional[Any] = max_filename_length if max_filename_length is not None else 255 # Hash the filename if it's too long SCREAMING_SNAKE_CASE_: List[Any] = self.hash_filename_if_too_long(lowerCAmelCase__ , lowerCAmelCase__) # The path to the lock file. SCREAMING_SNAKE_CASE_: List[Any] = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. SCREAMING_SNAKE_CASE_: Union[str, Any] = None # The default timeout value. SCREAMING_SNAKE_CASE_: List[str] = timeout # We use this lock primarily for the lock counter. SCREAMING_SNAKE_CASE_: Optional[int] = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. SCREAMING_SNAKE_CASE_: int = 0 return None @property def _SCREAMING_SNAKE_CASE ( self : Optional[int]): return self._lock_file @property def _SCREAMING_SNAKE_CASE ( self : Tuple): return self._timeout @timeout.setter def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase__ : Tuple): SCREAMING_SNAKE_CASE_: Tuple = float(lowerCAmelCase__) return None def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): raise NotImplementedError() def _SCREAMING_SNAKE_CASE ( self : int): raise NotImplementedError() @property def _SCREAMING_SNAKE_CASE ( self : List[Any]): return self._lock_file_fd is not None def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : str=None , lowerCAmelCase__ : int=0.05): # Use the default timeout, if no timeout is provided. if timeout is None: SCREAMING_SNAKE_CASE_: Dict = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 SCREAMING_SNAKE_CASE_: int = id(self) SCREAMING_SNAKE_CASE_: List[str] = self._lock_file SCREAMING_SNAKE_CASE_: str = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(F"Attempting to acquire lock {lock_id} on {lock_filename}") self._acquire() if self.is_locked: logger().debug(F"Lock {lock_id} acquired on {lock_filename}") break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(F"Timeout on acquiring lock {lock_id} on {lock_filename}") raise Timeout(self._lock_file) else: logger().debug( F"Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...") time.sleep(lowerCAmelCase__) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: SCREAMING_SNAKE_CASE_: Optional[int] = max(0 , self._lock_counter - 1) raise return _Acquire_ReturnProxy(lock=self) def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : Optional[int]=False): with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: SCREAMING_SNAKE_CASE_: Optional[int] = id(self) SCREAMING_SNAKE_CASE_: Tuple = self._lock_file logger().debug(F"Attempting to release lock {lock_id} on {lock_filename}") self._release() SCREAMING_SNAKE_CASE_: int = 0 logger().debug(F"Lock {lock_id} released on {lock_filename}") return None def __enter__( self : Tuple): self.acquire() return self def __exit__( self : Tuple , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict): self.release() return None def __del__( self : List[Any]): self.release(force=lowerCAmelCase__) return None def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : int): SCREAMING_SNAKE_CASE_: List[str] = os.path.basename(lowerCAmelCase__) if len(lowerCAmelCase__) > max_length and max_length > 0: SCREAMING_SNAKE_CASE_: Optional[int] = os.path.dirname(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: int = str(hash(lowerCAmelCase__)) SCREAMING_SNAKE_CASE_: Tuple = filename[: max_length - len(lowerCAmelCase__) - 8] + "..." + hashed_filename + ".lock" return os.path.join(lowerCAmelCase__ , lowerCAmelCase__) else: return path class __lowercase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : Optional[int] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple=-1 , lowerCAmelCase__ : Dict=None): from .file_utils import relative_to_absolute_path super().__init__(lowerCAmelCase__ , timeout=lowerCAmelCase__ , max_filename_length=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: int = "\\\\?\\" + relative_to_absolute_path(self.lock_file) def _SCREAMING_SNAKE_CASE ( self : Tuple): SCREAMING_SNAKE_CASE_: Union[str, Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: SCREAMING_SNAKE_CASE_: Tuple = os.open(self._lock_file , lowerCAmelCase__) except OSError: pass else: try: msvcrt.locking(lowerCAmelCase__ , msvcrt.LK_NBLCK , 1) except OSError: os.close(lowerCAmelCase__) else: SCREAMING_SNAKE_CASE_: Any = fd return None def _SCREAMING_SNAKE_CASE ( self : Optional[int]): SCREAMING_SNAKE_CASE_: int = self._lock_file_fd SCREAMING_SNAKE_CASE_: Dict = None msvcrt.locking(lowerCAmelCase__ , msvcrt.LK_UNLCK , 1) os.close(lowerCAmelCase__) try: os.remove(self._lock_file) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class __lowercase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict=-1 , lowerCAmelCase__ : int=None): SCREAMING_SNAKE_CASE_: List[str] = os.statvfs(os.path.dirname(lowerCAmelCase__)).f_namemax super().__init__(lowerCAmelCase__ , timeout=lowerCAmelCase__ , max_filename_length=lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): SCREAMING_SNAKE_CASE_: str = os.O_RDWR | os.O_CREAT | os.O_TRUNC SCREAMING_SNAKE_CASE_: List[str] = os.open(self._lock_file , lowerCAmelCase__) try: fcntl.flock(lowerCAmelCase__ , fcntl.LOCK_EX | fcntl.LOCK_NB) except OSError: os.close(lowerCAmelCase__) else: SCREAMING_SNAKE_CASE_: Union[str, Any] = fd return None def _SCREAMING_SNAKE_CASE ( self : Optional[int]): # Do not remove the lockfile: # # https://github.com/benediktschmitt/py-filelock/issues/31 # https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition SCREAMING_SNAKE_CASE_: int = self._lock_file_fd SCREAMING_SNAKE_CASE_: str = None fcntl.flock(lowerCAmelCase__ , fcntl.LOCK_UN) os.close(lowerCAmelCase__) return None class __lowercase ( UpperCAmelCase_ ): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : Dict): SCREAMING_SNAKE_CASE_: Dict = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: SCREAMING_SNAKE_CASE_: List[str] = os.open(self._lock_file , lowerCAmelCase__) except OSError: pass else: SCREAMING_SNAKE_CASE_: Optional[int] = fd return None def _SCREAMING_SNAKE_CASE ( self : Tuple): os.close(self._lock_file_fd) SCREAMING_SNAKE_CASE_: Optional[Any] = None try: os.remove(self._lock_file) # The file is already deleted and that's what we want. except OSError: pass return None lowerCAmelCase : List[str] = None if msvcrt: lowerCAmelCase : Optional[Any] = WindowsFileLock elif fcntl: lowerCAmelCase : List[str] = UnixFileLock else: lowerCAmelCase : Dict = SoftFileLock if warnings is not None: warnings.warn("""only soft file lock is available""")
671
from itertools import count def A_ ( _UpperCAmelCase = 50 ): SCREAMING_SNAKE_CASE_: Union[str, Any] = [1] * min_block_length for n in count(_UpperCAmelCase ): fill_count_functions.append(1 ) for block_length in range(_UpperCAmelCase , n + 1 ): for block_start in range(n - block_length ): fill_count_functions[n] += fill_count_functions[ n - block_start - block_length - 1 ] fill_count_functions[n] += 1 if fill_count_functions[n] > 1_00_00_00: break return n if __name__ == "__main__": print(f'''{solution() = }''')
671
1
import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow lowerCAmelCase : Tuple = logging.getLogger() @unittest.skip('''Temporarily disable the doc tests.''' ) @require_torch @require_tf @slow class __lowercase ( unittest.TestCase ): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Path , lowerCAmelCase__ : Union[str, None] = None , lowerCAmelCase__ : Union[List[str], None] = None , lowerCAmelCase__ : Union[str, List[str], None] = None , lowerCAmelCase__ : bool = True , ): SCREAMING_SNAKE_CASE_: Any = [file for file in os.listdir(lowerCAmelCase__) if os.path.isfile(os.path.join(lowerCAmelCase__ , lowerCAmelCase__))] if identifier is not None: SCREAMING_SNAKE_CASE_: Dict = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(lowerCAmelCase__ , lowerCAmelCase__): for n_ in n_identifier: SCREAMING_SNAKE_CASE_: Any = [file for file in files if n_ not in file] else: SCREAMING_SNAKE_CASE_: Union[str, Any] = [file for file in files if n_identifier not in file] SCREAMING_SNAKE_CASE_: Tuple = ignore_files or [] ignore_files.append("__init__.py") SCREAMING_SNAKE_CASE_: Tuple = [file for file in files if file not in ignore_files] for file in files: # Open all files print("Testing" , lowerCAmelCase__) if only_modules: SCREAMING_SNAKE_CASE_: Tuple = file.split(".")[0] try: SCREAMING_SNAKE_CASE_: int = getattr(lowerCAmelCase__ , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: int = doctest.DocTestSuite(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Any = unittest.TextTestRunner().run(lowerCAmelCase__) self.assertIs(len(result.failures) , 0) except AttributeError: logger.info(F"{module_identifier} is not a module.") else: SCREAMING_SNAKE_CASE_: str = doctest.testfile(str(".." / directory / file) , optionflags=doctest.ELLIPSIS) self.assertIs(result.failed , 0) def _SCREAMING_SNAKE_CASE ( self : str): SCREAMING_SNAKE_CASE_: Optional[Any] = Path("src/transformers") SCREAMING_SNAKE_CASE_: str = "modeling" SCREAMING_SNAKE_CASE_: List[Any] = [ "modeling_ctrl.py", "modeling_tf_ctrl.py", ] self.analyze_directory(lowerCAmelCase__ , identifier=lowerCAmelCase__ , ignore_files=lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : List[str]): SCREAMING_SNAKE_CASE_: List[str] = Path("src/transformers") SCREAMING_SNAKE_CASE_: Optional[Any] = "tokenization" self.analyze_directory(lowerCAmelCase__ , identifier=lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): SCREAMING_SNAKE_CASE_: Any = Path("src/transformers") SCREAMING_SNAKE_CASE_: int = "configuration" self.analyze_directory(lowerCAmelCase__ , identifier=lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Dict): SCREAMING_SNAKE_CASE_: Optional[int] = Path("src/transformers") SCREAMING_SNAKE_CASE_: int = ["configuration", "modeling", "tokenization"] self.analyze_directory(lowerCAmelCase__ , n_identifier=lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : List[str]): SCREAMING_SNAKE_CASE_: List[Any] = Path("docs/source") SCREAMING_SNAKE_CASE_: List[Any] = ["favicon.ico"] self.analyze_directory(lowerCAmelCase__ , ignore_files=lowerCAmelCase__ , only_modules=lowerCAmelCase__)
671
def A_ ( _UpperCAmelCase ): if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise TypeError("only integers accepted as input" ) else: SCREAMING_SNAKE_CASE_: List[Any] = str(abs(_UpperCAmelCase ) ) SCREAMING_SNAKE_CASE_: Tuple = [list(_UpperCAmelCase ) for char in range(len(_UpperCAmelCase ) )] for index in range(len(_UpperCAmelCase ) ): num_transpositions[index].pop(_UpperCAmelCase ) return max( int("".join(list(_UpperCAmelCase ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__("""doctest""").testmod()
671
1
from datetime import datetime as dt import os from github import Github lowerCAmelCase : int = [ """good first issue""", """good second issue""", """good difficult issue""", """feature request""", """new model""", """wip""", ] def A_ ( ): SCREAMING_SNAKE_CASE_: List[str] = Github(os.environ["GITHUB_TOKEN"] ) SCREAMING_SNAKE_CASE_: int = g.get_repo("huggingface/transformers" ) SCREAMING_SNAKE_CASE_: str = repo.get_issues(state="open" ) for issue in open_issues: SCREAMING_SNAKE_CASE_: Dict = sorted([comment for comment in issue.get_comments()] , key=lambda _UpperCAmelCase : i.created_at , reverse=_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: List[str] = comments[0] if len(_UpperCAmelCase ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.") issue.edit(state="closed" ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would add stale comment to {issue.number}") issue.create_comment( "This issue has been automatically marked as stale because it has not had " "recent activity. If you think this still needs to be addressed " "please comment on this thread.\n\nPlease note that issues that do not follow the " "[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) " "are likely to be ignored." ) if __name__ == "__main__": main()
671
from __future__ import annotations from collections.abc import Iterator from typing import Any class __lowercase : """simple docstring""" def __init__( self : List[str] , lowerCAmelCase__ : Any): SCREAMING_SNAKE_CASE_: Any = data SCREAMING_SNAKE_CASE_: Node | None = None class __lowercase : """simple docstring""" def __init__( self : int): SCREAMING_SNAKE_CASE_: Dict = None SCREAMING_SNAKE_CASE_: str = None def __iter__( self : List[str]): SCREAMING_SNAKE_CASE_: Tuple = self.head while self.head: yield node.data SCREAMING_SNAKE_CASE_: List[str] = node.next if node == self.head: break def __len__( self : Dict): return sum(1 for _ in self) def __repr__( self : Dict): return "->".join(str(lowerCAmelCase__) for item in iter(self)) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Any): self.insert_nth(len(self) , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Any): self.insert_nth(0 , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Any): if index < 0 or index > len(self): raise IndexError("list index out of range.") SCREAMING_SNAKE_CASE_: Any = Node(lowerCAmelCase__) if self.head is None: SCREAMING_SNAKE_CASE_: str = new_node # first node points itself SCREAMING_SNAKE_CASE_: Optional[Any] = new_node elif index == 0: # insert at head SCREAMING_SNAKE_CASE_: Optional[Any] = self.head SCREAMING_SNAKE_CASE_: str = new_node else: SCREAMING_SNAKE_CASE_: int = self.head for _ in range(index - 1): SCREAMING_SNAKE_CASE_: Optional[Any] = temp.next SCREAMING_SNAKE_CASE_: List[str] = temp.next SCREAMING_SNAKE_CASE_: int = new_node if index == len(self) - 1: # insert at tail SCREAMING_SNAKE_CASE_: Any = new_node def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): return self.delete_nth(0) def _SCREAMING_SNAKE_CASE ( self : Any): return self.delete_nth(len(self) - 1) def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : int = 0): if not 0 <= index < len(self): raise IndexError("list index out of range.") SCREAMING_SNAKE_CASE_: Optional[Any] = self.head if self.head == self.tail: # just one node SCREAMING_SNAKE_CASE_: List[str] = None elif index == 0: # delete head node SCREAMING_SNAKE_CASE_: int = self.tail.next.next SCREAMING_SNAKE_CASE_: Tuple = self.head.next else: SCREAMING_SNAKE_CASE_: Optional[int] = self.head for _ in range(index - 1): SCREAMING_SNAKE_CASE_: Any = temp.next SCREAMING_SNAKE_CASE_: Optional[Any] = temp.next SCREAMING_SNAKE_CASE_: int = temp.next.next if index == len(self) - 1: # delete at tail SCREAMING_SNAKE_CASE_: int = temp return delete_node.data def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): return len(self) == 0 def A_ ( ): SCREAMING_SNAKE_CASE_: Dict = CircularLinkedList() assert len(_UpperCAmelCase ) == 0 assert circular_linked_list.is_empty() is True assert str(_UpperCAmelCase ) == "" try: circular_linked_list.delete_front() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_tail() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_nth(-1 ) raise AssertionError except IndexError: assert True try: circular_linked_list.delete_nth(0 ) raise AssertionError except IndexError: assert True assert circular_linked_list.is_empty() is True for i in range(5 ): assert len(_UpperCAmelCase ) == i circular_linked_list.insert_nth(_UpperCAmelCase , i + 1 ) assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 6 ) ) circular_linked_list.insert_tail(6 ) assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 7 ) ) circular_linked_list.insert_head(0 ) assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(0 , 7 ) ) assert circular_linked_list.delete_front() == 0 assert circular_linked_list.delete_tail() == 6 assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 6 ) ) assert circular_linked_list.delete_nth(2 ) == 3 circular_linked_list.insert_nth(2 , 3 ) assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 6 ) ) assert circular_linked_list.is_empty() is False if __name__ == "__main__": import doctest doctest.testmod()
671
1
from collections.abc import Callable import numpy as np def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Tuple = int(np.ceil((x_end - xa) / step_size ) ) SCREAMING_SNAKE_CASE_: Any = np.zeros((n + 1,) ) SCREAMING_SNAKE_CASE_: Tuple = ya SCREAMING_SNAKE_CASE_: List[str] = xa for k in range(_UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Dict = y[k] + step_size * ode_func(_UpperCAmelCase , y[k] ) SCREAMING_SNAKE_CASE_: str = y[k] + ( (step_size / 2) * (ode_func(_UpperCAmelCase , y[k] ) + ode_func(x + step_size , _UpperCAmelCase )) ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
671
from collections import defaultdict from math import ceil, sqrt def A_ ( _UpperCAmelCase = 1_00_00_00 , _UpperCAmelCase = 10 ): SCREAMING_SNAKE_CASE_: defaultdict = defaultdict(_UpperCAmelCase ) for outer_width in range(3 , (t_limit // 4) + 2 ): if outer_width * outer_width > t_limit: SCREAMING_SNAKE_CASE_: Tuple = max( ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 ) else: SCREAMING_SNAKE_CASE_: Optional[Any] = 1 hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2 for hole_width in range(_UpperCAmelCase , outer_width - 1 , 2 ): count[outer_width * outer_width - hole_width * hole_width] += 1 return sum(1 for n in count.values() if 1 <= n <= 10 ) if __name__ == "__main__": print(f'''{solution() = }''')
671
1
def A_ ( _UpperCAmelCase , _UpperCAmelCase ): return [sentence[i : i + ngram_size] for i in range(len(_UpperCAmelCase ) - ngram_size + 1 )] if __name__ == "__main__": from doctest import testmod testmod()
671
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowerCAmelCase : str = { """configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""], """tokenization_xlm""": ["""XLMTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Dict = [ """XLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """XLMForMultipleChoice""", """XLMForQuestionAnswering""", """XLMForQuestionAnsweringSimple""", """XLMForSequenceClassification""", """XLMForTokenClassification""", """XLMModel""", """XLMPreTrainedModel""", """XLMWithLMHeadModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[str] = [ """TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFXLMForMultipleChoice""", """TFXLMForQuestionAnsweringSimple""", """TFXLMForSequenceClassification""", """TFXLMForTokenClassification""", """TFXLMMainLayer""", """TFXLMModel""", """TFXLMPreTrainedModel""", """TFXLMWithLMHeadModel""", ] if TYPE_CHECKING: from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig from .tokenization_xlm import XLMTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm import ( XLM_PRETRAINED_MODEL_ARCHIVE_LIST, XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMPreTrainedModel, XLMWithLMHeadModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm import ( TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, TFXLMForTokenClassification, TFXLMMainLayer, TFXLMModel, TFXLMPreTrainedModel, TFXLMWithLMHeadModel, ) else: import sys lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
671
1
from __future__ import annotations import inspect import unittest from math import floor import numpy as np from transformers import CvtConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFCvtForImageClassification, TFCvtModel from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __lowercase ( UpperCAmelCase_ ): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : Tuple): SCREAMING_SNAKE_CASE_: str = self.config_class(**self.inputs_dict) self.parent.assertTrue(hasattr(lowerCAmelCase__ , "embed_dim")) self.parent.assertTrue(hasattr(lowerCAmelCase__ , "num_heads")) class __lowercase : """simple docstring""" def __init__( self : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : List[str]=13 , lowerCAmelCase__ : List[Any]=64 , lowerCAmelCase__ : Union[str, Any]=3 , lowerCAmelCase__ : Optional[int]=[16, 48, 96] , lowerCAmelCase__ : List[Any]=[1, 3, 6] , lowerCAmelCase__ : Optional[Any]=[1, 2, 10] , lowerCAmelCase__ : List[Any]=[7, 3, 3] , lowerCAmelCase__ : Optional[Any]=[4, 2, 2] , lowerCAmelCase__ : List[str]=[2, 1, 1] , lowerCAmelCase__ : Optional[int]=[2, 2, 2] , lowerCAmelCase__ : Tuple=[False, False, True] , lowerCAmelCase__ : Union[str, Any]=[0.0, 0.0, 0.0] , lowerCAmelCase__ : int=0.02 , lowerCAmelCase__ : Any=1E-12 , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : Union[str, Any]=2 , ): SCREAMING_SNAKE_CASE_: Tuple = parent SCREAMING_SNAKE_CASE_: Tuple = batch_size SCREAMING_SNAKE_CASE_: Optional[int] = image_size SCREAMING_SNAKE_CASE_: Tuple = patch_sizes SCREAMING_SNAKE_CASE_: str = patch_stride SCREAMING_SNAKE_CASE_: Tuple = patch_padding SCREAMING_SNAKE_CASE_: Union[str, Any] = is_training SCREAMING_SNAKE_CASE_: Optional[Any] = use_labels SCREAMING_SNAKE_CASE_: Dict = num_labels SCREAMING_SNAKE_CASE_: Union[str, Any] = num_channels SCREAMING_SNAKE_CASE_: Optional[Any] = embed_dim SCREAMING_SNAKE_CASE_: List[str] = num_heads SCREAMING_SNAKE_CASE_: Dict = stride_kv SCREAMING_SNAKE_CASE_: Optional[int] = depth SCREAMING_SNAKE_CASE_: Dict = cls_token SCREAMING_SNAKE_CASE_: Dict = attention_drop_rate SCREAMING_SNAKE_CASE_: List[str] = initializer_range SCREAMING_SNAKE_CASE_: str = layer_norm_eps def _SCREAMING_SNAKE_CASE ( self : str): SCREAMING_SNAKE_CASE_: int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) SCREAMING_SNAKE_CASE_: Any = None if self.use_labels: # create a random int32 tensor of given shape SCREAMING_SNAKE_CASE_: List[Any] = ids_tensor([self.batch_size] , self.num_labels) SCREAMING_SNAKE_CASE_: Optional[int] = self.get_config() return config, pixel_values, labels def _SCREAMING_SNAKE_CASE ( self : List[str]): return CvtConfig( image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int]): SCREAMING_SNAKE_CASE_: str = TFCvtModel(config=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: int = model(lowerCAmelCase__ , training=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Dict = (self.image_size, self.image_size) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = image_size[0], image_size[1] for i in range(len(self.depth)): SCREAMING_SNAKE_CASE_: int = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1) SCREAMING_SNAKE_CASE_: str = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width)) def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Union[str, Any]): SCREAMING_SNAKE_CASE_: Tuple = self.num_labels SCREAMING_SNAKE_CASE_: List[Any] = TFCvtForImageClassification(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Any = model(lowerCAmelCase__ , labels=lowerCAmelCase__ , training=lowerCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def _SCREAMING_SNAKE_CASE ( self : List[Any]): SCREAMING_SNAKE_CASE_: List[Any] = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = config_and_inputs SCREAMING_SNAKE_CASE_: Optional[Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" _UpperCAmelCase : Tuple = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else () _UpperCAmelCase : Optional[int] = ( {'''feature-extraction''': TFCvtModel, '''image-classification''': TFCvtForImageClassification} if is_tf_available() else {} ) _UpperCAmelCase : Dict = False _UpperCAmelCase : List[Any] = False _UpperCAmelCase : Union[str, Any] = False _UpperCAmelCase : int = False _UpperCAmelCase : Union[str, Any] = False def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): SCREAMING_SNAKE_CASE_: str = TFCvtModelTester(self) SCREAMING_SNAKE_CASE_: int = TFCvtConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37) def _SCREAMING_SNAKE_CASE ( self : Optional[int]): self.config_tester.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() @unittest.skip(reason="Cvt does not output attentions") def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): pass @unittest.skip(reason="Cvt does not use inputs_embeds") def _SCREAMING_SNAKE_CASE ( self : Any): pass @unittest.skip(reason="Cvt does not support input and output embeddings") def _SCREAMING_SNAKE_CASE ( self : str): pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , ) def _SCREAMING_SNAKE_CASE ( self : List[str]): super().test_dataset_conversion() @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , ) @slow def _SCREAMING_SNAKE_CASE ( self : str): super().test_keras_fit() @unittest.skip(reason="Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8") def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): SCREAMING_SNAKE_CASE_: str = tf.keras.mixed_precision.Policy("mixed_float16") tf.keras.mixed_precision.set_global_policy(lowerCAmelCase__) super().test_keras_fit() tf.keras.mixed_precision.set_global_policy("float32") def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_: Optional[Any] = model_class(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE_: Any = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE_: List[str] = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Dict): def check_hidden_states_output(lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str): SCREAMING_SNAKE_CASE_: List[str] = model_class(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__)) SCREAMING_SNAKE_CASE_: Tuple = outputs.hidden_states SCREAMING_SNAKE_CASE_: Optional[int] = len(self.model_tester.depth) self.assertEqual(len(lowerCAmelCase__) , lowerCAmelCase__) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:]) , [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE_: str = True check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE_: Optional[int] = True check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : List[Any]): SCREAMING_SNAKE_CASE_: Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : str): SCREAMING_SNAKE_CASE_: Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__) @slow def _SCREAMING_SNAKE_CASE ( self : List[Any]): for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE_: Dict = TFCvtModel.from_pretrained(lowerCAmelCase__) self.assertIsNotNone(lowerCAmelCase__) def A_ ( ): SCREAMING_SNAKE_CASE_: Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class __lowercase ( unittest.TestCase ): """simple docstring""" @cached_property def _SCREAMING_SNAKE_CASE ( self : Optional[int]): return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) @slow def _SCREAMING_SNAKE_CASE ( self : List[Any]): SCREAMING_SNAKE_CASE_: List[Any] = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) SCREAMING_SNAKE_CASE_: List[Any] = self.default_image_processor SCREAMING_SNAKE_CASE_: Union[str, Any] = prepare_img() SCREAMING_SNAKE_CASE_: List[str] = image_processor(images=lowerCAmelCase__ , return_tensors="tf") # forward pass SCREAMING_SNAKE_CASE_: Tuple = model(**lowerCAmelCase__) # verify the logits SCREAMING_SNAKE_CASE_: str = tf.TensorShape((1, 1000)) self.assertEqual(outputs.logits.shape , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = tf.constant([0.9285, 0.9015, -0.3150]) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowerCAmelCase__ , atol=1E-4))
671
lowerCAmelCase : List[str] = { """A""": ["""B""", """C""", """E"""], """B""": ["""A""", """D""", """E"""], """C""": ["""A""", """F""", """G"""], """D""": ["""B"""], """E""": ["""A""", """B""", """D"""], """F""": ["""C"""], """G""": ["""C"""], } def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Any = set() # keep track of all the paths to be checked SCREAMING_SNAKE_CASE_: Tuple = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue SCREAMING_SNAKE_CASE_: List[Any] = queue.pop(0 ) # get the last node from the path SCREAMING_SNAKE_CASE_: Tuple = path[-1] if node not in explored: SCREAMING_SNAKE_CASE_: Union[str, Any] = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: SCREAMING_SNAKE_CASE_: int = list(_UpperCAmelCase ) new_path.append(_UpperCAmelCase ) queue.append(_UpperCAmelCase ) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(_UpperCAmelCase ) # in case there's no path between the 2 nodes return [] def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 SCREAMING_SNAKE_CASE_: List[Any] = [start] SCREAMING_SNAKE_CASE_: List[str] = set(_UpperCAmelCase ) # Keep tab on distances from `start` node. SCREAMING_SNAKE_CASE_: Union[str, Any] = {start: 0, target: -1} while queue: SCREAMING_SNAKE_CASE_: Dict = queue.pop(0 ) if node == target: SCREAMING_SNAKE_CASE_: Tuple = ( dist[node] if dist[target] == -1 else min(dist[target] , dist[node] ) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(_UpperCAmelCase ) queue.append(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Union[str, Any] = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, """G""", """D""")) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, """G""", """D""")) # returns 4
671
1
import copy from dataclasses import dataclass from pathlib import Path from typing import Dict, Optional, Union @dataclass class __lowercase : """simple docstring""" _UpperCAmelCase : Optional[Union[str, Path]] = None _UpperCAmelCase : bool = False _UpperCAmelCase : bool = False _UpperCAmelCase : bool = False _UpperCAmelCase : Optional[Dict] = None _UpperCAmelCase : Optional[str] = None _UpperCAmelCase : bool = False _UpperCAmelCase : bool = False _UpperCAmelCase : bool = False _UpperCAmelCase : bool = True _UpperCAmelCase : Optional[int] = None _UpperCAmelCase : int = 1 _UpperCAmelCase : Optional[Union[str, bool]] = None _UpperCAmelCase : bool = False _UpperCAmelCase : Optional[Dict] = None _UpperCAmelCase : Optional[str] = None def _SCREAMING_SNAKE_CASE ( self : Any): return self.__class__(**{k: copy.deepcopy(lowerCAmelCase__) for k, v in self.__dict__.items()})
671
from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class __lowercase ( UpperCAmelCase_ ): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : float): return 0.0 def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: List[str] = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] ) SCREAMING_SNAKE_CASE_: Dict = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] ) return lowest, highest def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[int] = 5_12 SCREAMING_SNAKE_CASE_: str = [1] + [0] * (size - 1) SCREAMING_SNAKE_CASE_: Dict = [filter_type.process(_UpperCAmelCase ) for item in inputs] SCREAMING_SNAKE_CASE_: Optional[Any] = [0] * (samplerate - size) # zero-padding outputs += filler SCREAMING_SNAKE_CASE_: Tuple = np.abs(np.fft.fft(_UpperCAmelCase ) ) SCREAMING_SNAKE_CASE_: Optional[Any] = 20 * np.logaa(_UpperCAmelCase ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel("Frequency (Hz)" ) plt.xscale("log" ) # Display within reasonable bounds SCREAMING_SNAKE_CASE_: Any = get_bounds(_UpperCAmelCase , _UpperCAmelCase ) plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) ) plt.ylabel("Gain (dB)" ) plt.plot(_UpperCAmelCase ) plt.show() def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[int] = 5_12 SCREAMING_SNAKE_CASE_: Union[str, Any] = [1] + [0] * (size - 1) SCREAMING_SNAKE_CASE_: Dict = [filter_type.process(_UpperCAmelCase ) for item in inputs] SCREAMING_SNAKE_CASE_: int = [0] * (samplerate - size) # zero-padding outputs += filler SCREAMING_SNAKE_CASE_: Any = np.angle(np.fft.fft(_UpperCAmelCase ) ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel("Frequency (Hz)" ) plt.xscale("log" ) plt.ylim(-2 * pi , 2 * pi ) plt.ylabel("Phase shift (Radians)" ) plt.plot(np.unwrap(_UpperCAmelCase , -2 * pi ) ) plt.show()
671
1
def A_ ( _UpperCAmelCase ): try: SCREAMING_SNAKE_CASE_: int = float(_UpperCAmelCase ) except ValueError: raise ValueError("Please enter a valid number" ) SCREAMING_SNAKE_CASE_: str = decimal - int(_UpperCAmelCase ) if fractional_part == 0: return int(_UpperCAmelCase ), 1 else: SCREAMING_SNAKE_CASE_: Tuple = len(str(_UpperCAmelCase ).split("." )[1] ) SCREAMING_SNAKE_CASE_: Optional[Any] = int(decimal * (10**number_of_frac_digits) ) SCREAMING_SNAKE_CASE_: Tuple = 10**number_of_frac_digits SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = denominator, numerator while True: SCREAMING_SNAKE_CASE_: Optional[int] = dividend % divisor if remainder == 0: break SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = divisor, remainder SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = numerator / divisor, denominator / divisor return int(_UpperCAmelCase ), int(_UpperCAmelCase ) if __name__ == "__main__": print(f'''{decimal_to_fraction(2) = }''') print(f'''{decimal_to_fraction(89.0) = }''') print(f'''{decimal_to_fraction('67') = }''') print(f'''{decimal_to_fraction('45.0') = }''') print(f'''{decimal_to_fraction(1.5) = }''') print(f'''{decimal_to_fraction('6.25') = }''') print(f'''{decimal_to_fraction('78td') = }''')
671
from __future__ import annotations from math import ceil, floor, sqrt def A_ ( _UpperCAmelCase = 2_00_00_00 ): SCREAMING_SNAKE_CASE_: list[int] = [0] SCREAMING_SNAKE_CASE_: int for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ): triangle_numbers.append(triangle_numbers[-1] + idx ) # we want this to be as close as possible to target SCREAMING_SNAKE_CASE_: int = 0 # the area corresponding to the grid that gives the product closest to target SCREAMING_SNAKE_CASE_: int = 0 # an estimate of b, using the quadratic formula SCREAMING_SNAKE_CASE_: float # the largest integer less than b_estimate SCREAMING_SNAKE_CASE_: int # the largest integer less than b_estimate SCREAMING_SNAKE_CASE_: int # the triangle number corresponding to b_floor SCREAMING_SNAKE_CASE_: int # the triangle number corresponding to b_ceil SCREAMING_SNAKE_CASE_: int for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ): SCREAMING_SNAKE_CASE_: List[Any] = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2 SCREAMING_SNAKE_CASE_: Any = floor(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: List[str] = ceil(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Any = triangle_numbers[b_floor] SCREAMING_SNAKE_CASE_: List[Any] = triangle_numbers[b_ceil] if abs(target - triangle_b_first_guess * triangle_a ) < abs( target - best_product ): SCREAMING_SNAKE_CASE_: int = triangle_b_first_guess * triangle_a SCREAMING_SNAKE_CASE_: int = idx_a * b_floor if abs(target - triangle_b_second_guess * triangle_a ) < abs( target - best_product ): SCREAMING_SNAKE_CASE_: Optional[Any] = triangle_b_second_guess * triangle_a SCREAMING_SNAKE_CASE_: Tuple = idx_a * b_ceil return area if __name__ == "__main__": print(f'''{solution() = }''')
671
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCAmelCase : Optional[Any] = {"""configuration_unispeech""": ["""UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP""", """UniSpeechConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Union[str, Any] = [ """UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST""", """UniSpeechForCTC""", """UniSpeechForPreTraining""", """UniSpeechForSequenceClassification""", """UniSpeechModel""", """UniSpeechPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_unispeech import ( UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, UniSpeechModel, UniSpeechPreTrainedModel, ) else: import sys lowerCAmelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
671
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase : Optional[int] = { """configuration_longformer""": [ """LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongformerConfig""", """LongformerOnnxConfig""", ], """tokenization_longformer""": ["""LongformerTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[str] = ["""LongformerTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Union[str, Any] = [ """LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """LongformerForMaskedLM""", """LongformerForMultipleChoice""", """LongformerForQuestionAnswering""", """LongformerForSequenceClassification""", """LongformerForTokenClassification""", """LongformerModel""", """LongformerPreTrainedModel""", """LongformerSelfAttention""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : int = [ """TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFLongformerForMaskedLM""", """TFLongformerForMultipleChoice""", """TFLongformerForQuestionAnswering""", """TFLongformerForSequenceClassification""", """TFLongformerForTokenClassification""", """TFLongformerModel""", """TFLongformerPreTrainedModel""", """TFLongformerSelfAttention""", ] if TYPE_CHECKING: from .configuration_longformer import ( LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig, LongformerOnnxConfig, ) from .tokenization_longformer import LongformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_longformer_fast import LongformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longformer import ( LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, LongformerPreTrainedModel, LongformerSelfAttention, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_longformer import ( TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFLongformerForMaskedLM, TFLongformerForMultipleChoice, TFLongformerForQuestionAnswering, TFLongformerForSequenceClassification, TFLongformerForTokenClassification, TFLongformerModel, TFLongformerPreTrainedModel, TFLongformerSelfAttention, ) else: import sys lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
671
1
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Dict = ( '''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.''' '''It takes two arguments named `image` which should be the original image, and `label` which should be a text ''' '''describing the elements what should be identified in the segmentation mask. The tool returns the mask.''' ) _UpperCAmelCase : int = '''CIDAS/clipseg-rd64-refined''' _UpperCAmelCase : Any = '''image_segmenter''' _UpperCAmelCase : Union[str, Any] = CLIPSegForImageSegmentation _UpperCAmelCase : List[str] = ['''image''', '''text'''] _UpperCAmelCase : Any = ['''image'''] def __init__( self : Any , *lowerCAmelCase__ : Tuple , **lowerCAmelCase__ : Optional[Any]): requires_backends(self , ["vision"]) super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase__ : "Image" , lowerCAmelCase__ : str): return self.pre_processor(text=[label] , images=[image] , padding=lowerCAmelCase__ , return_tensors="pt") def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : Tuple): with torch.no_grad(): SCREAMING_SNAKE_CASE_: Any = self.model(**lowerCAmelCase__).logits return logits def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase__ : str): SCREAMING_SNAKE_CASE_: List[str] = outputs.cpu().detach().numpy() SCREAMING_SNAKE_CASE_: Union[str, Any] = 0 SCREAMING_SNAKE_CASE_: Union[str, Any] = 1 return Image.fromarray((array * 255).astype(np.uinta))
671
import argparse import os.path as osp import re import torch from safetensors.torch import load_file, save_file # =================# # UNet Conversion # # =================# lowerCAmelCase : Optional[int] = [ # (stable-diffusion, HF Diffusers) ("""time_embed.0.weight""", """time_embedding.linear_1.weight"""), ("""time_embed.0.bias""", """time_embedding.linear_1.bias"""), ("""time_embed.2.weight""", """time_embedding.linear_2.weight"""), ("""time_embed.2.bias""", """time_embedding.linear_2.bias"""), ("""input_blocks.0.0.weight""", """conv_in.weight"""), ("""input_blocks.0.0.bias""", """conv_in.bias"""), ("""out.0.weight""", """conv_norm_out.weight"""), ("""out.0.bias""", """conv_norm_out.bias"""), ("""out.2.weight""", """conv_out.weight"""), ("""out.2.bias""", """conv_out.bias"""), ] lowerCAmelCase : str = [ # (stable-diffusion, HF Diffusers) ("""in_layers.0""", """norm1"""), ("""in_layers.2""", """conv1"""), ("""out_layers.0""", """norm2"""), ("""out_layers.3""", """conv2"""), ("""emb_layers.1""", """time_emb_proj"""), ("""skip_connection""", """conv_shortcut"""), ] lowerCAmelCase : List[str] = [] # hardcoded number of downblocks and resnets/attentions... # would need smarter logic for other networks. for i in range(4): # loop over downblocks/upblocks for j in range(2): # loop over resnets/attentions for downblocks lowerCAmelCase : int = f'''down_blocks.{i}.resnets.{j}.''' lowerCAmelCase : List[str] = f'''input_blocks.{3*i + j + 1}.0.''' unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix)) if i < 3: # no attention layers in down_blocks.3 lowerCAmelCase : Any = f'''down_blocks.{i}.attentions.{j}.''' lowerCAmelCase : List[Any] = f'''input_blocks.{3*i + j + 1}.1.''' unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix)) for j in range(3): # loop over resnets/attentions for upblocks lowerCAmelCase : Any = f'''up_blocks.{i}.resnets.{j}.''' lowerCAmelCase : str = f'''output_blocks.{3*i + j}.0.''' unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix)) if i > 0: # no attention layers in up_blocks.0 lowerCAmelCase : List[Any] = f'''up_blocks.{i}.attentions.{j}.''' lowerCAmelCase : str = f'''output_blocks.{3*i + j}.1.''' unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix)) if i < 3: # no downsample in down_blocks.3 lowerCAmelCase : Any = f'''down_blocks.{i}.downsamplers.0.conv.''' lowerCAmelCase : Tuple = f'''input_blocks.{3*(i+1)}.0.op.''' unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix)) # no upsample in up_blocks.3 lowerCAmelCase : Tuple = f'''up_blocks.{i}.upsamplers.0.''' lowerCAmelCase : Tuple = f'''output_blocks.{3*i + 2}.{1 if i == 0 else 2}.''' unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix)) lowerCAmelCase : Any = """mid_block.attentions.0.""" lowerCAmelCase : Dict = """middle_block.1.""" unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix)) for j in range(2): lowerCAmelCase : int = f'''mid_block.resnets.{j}.''' lowerCAmelCase : Union[str, Any] = f'''middle_block.{2*j}.''' unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix)) def A_ ( _UpperCAmelCase ): # buyer beware: this is a *brittle* function, # and correct output requires that all of these pieces interact in # the exact order in which I have arranged them. SCREAMING_SNAKE_CASE_: Dict = {k: k for k in unet_state_dict.keys()} for sd_name, hf_name in unet_conversion_map: SCREAMING_SNAKE_CASE_: Optional[int] = sd_name for k, v in mapping.items(): if "resnets" in k: for sd_part, hf_part in unet_conversion_map_resnet: SCREAMING_SNAKE_CASE_: Any = v.replace(_UpperCAmelCase , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: str = v for k, v in mapping.items(): for sd_part, hf_part in unet_conversion_map_layer: SCREAMING_SNAKE_CASE_: Optional[Any] = v.replace(_UpperCAmelCase , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Optional[int] = v SCREAMING_SNAKE_CASE_: Optional[Any] = {v: unet_state_dict[k] for k, v in mapping.items()} return new_state_dict # ================# # VAE Conversion # # ================# lowerCAmelCase : Union[str, Any] = [ # (stable-diffusion, HF Diffusers) ("""nin_shortcut""", """conv_shortcut"""), ("""norm_out""", """conv_norm_out"""), ("""mid.attn_1.""", """mid_block.attentions.0."""), ] for i in range(4): # down_blocks have two resnets for j in range(2): lowerCAmelCase : Union[str, Any] = f'''encoder.down_blocks.{i}.resnets.{j}.''' lowerCAmelCase : Optional[Any] = f'''encoder.down.{i}.block.{j}.''' vae_conversion_map.append((sd_down_prefix, hf_down_prefix)) if i < 3: lowerCAmelCase : Dict = f'''down_blocks.{i}.downsamplers.0.''' lowerCAmelCase : List[str] = f'''down.{i}.downsample.''' vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix)) lowerCAmelCase : List[str] = f'''up_blocks.{i}.upsamplers.0.''' lowerCAmelCase : int = f'''up.{3-i}.upsample.''' vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix)) # up_blocks have three resnets # also, up blocks in hf are numbered in reverse from sd for j in range(3): lowerCAmelCase : Any = f'''decoder.up_blocks.{i}.resnets.{j}.''' lowerCAmelCase : int = f'''decoder.up.{3-i}.block.{j}.''' vae_conversion_map.append((sd_up_prefix, hf_up_prefix)) # this part accounts for mid blocks in both the encoder and the decoder for i in range(2): lowerCAmelCase : str = f'''mid_block.resnets.{i}.''' lowerCAmelCase : Tuple = f'''mid.block_{i+1}.''' vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix)) lowerCAmelCase : List[Any] = [ # (stable-diffusion, HF Diffusers) ("""norm.""", """group_norm."""), ("""q.""", """query."""), ("""k.""", """key."""), ("""v.""", """value."""), ("""proj_out.""", """proj_attn."""), ] def A_ ( _UpperCAmelCase ): # convert HF linear weights to SD conv2d weights return w.reshape(*w.shape , 1 , 1 ) def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[Any] = {k: k for k in vae_state_dict.keys()} for k, v in mapping.items(): for sd_part, hf_part in vae_conversion_map: SCREAMING_SNAKE_CASE_: Union[str, Any] = v.replace(_UpperCAmelCase , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Union[str, Any] = v for k, v in mapping.items(): if "attentions" in k: for sd_part, hf_part in vae_conversion_map_attn: SCREAMING_SNAKE_CASE_: Any = v.replace(_UpperCAmelCase , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: List[str] = v SCREAMING_SNAKE_CASE_: Tuple = {v: vae_state_dict[k] for k, v in mapping.items()} SCREAMING_SNAKE_CASE_: Union[str, Any] = ["q", "k", "v", "proj_out"] for k, v in new_state_dict.items(): for weight_name in weights_to_convert: if f"mid.attn_1.{weight_name}.weight" in k: print(f"Reshaping {k} for SD format" ) SCREAMING_SNAKE_CASE_: List[str] = reshape_weight_for_sd(_UpperCAmelCase ) return new_state_dict # =========================# # Text Encoder Conversion # # =========================# lowerCAmelCase : Optional[Any] = [ # (stable-diffusion, HF Diffusers) ("""resblocks.""", """text_model.encoder.layers."""), ("""ln_1""", """layer_norm1"""), ("""ln_2""", """layer_norm2"""), (""".c_fc.""", """.fc1."""), (""".c_proj.""", """.fc2."""), (""".attn""", """.self_attn"""), ("""ln_final.""", """transformer.text_model.final_layer_norm."""), ("""token_embedding.weight""", """transformer.text_model.embeddings.token_embedding.weight"""), ("""positional_embedding""", """transformer.text_model.embeddings.position_embedding.weight"""), ] lowerCAmelCase : Optional[Any] = {re.escape(x[1]): x[0] for x in textenc_conversion_lst} lowerCAmelCase : Optional[int] = re.compile("""|""".join(protected.keys())) # Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp lowerCAmelCase : str = {"""q""": 0, """k""": 1, """v""": 2} def A_ ( _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: str = {} SCREAMING_SNAKE_CASE_: str = {} SCREAMING_SNAKE_CASE_: List[str] = {} for k, v in text_enc_dict.items(): if ( k.endswith(".self_attn.q_proj.weight" ) or k.endswith(".self_attn.k_proj.weight" ) or k.endswith(".self_attn.v_proj.weight" ) ): SCREAMING_SNAKE_CASE_: str = k[: -len(".q_proj.weight" )] SCREAMING_SNAKE_CASE_: Dict = k[-len("q_proj.weight" )] if k_pre not in capture_qkv_weight: SCREAMING_SNAKE_CASE_: Tuple = [None, None, None] SCREAMING_SNAKE_CASE_: Union[str, Any] = v continue if ( k.endswith(".self_attn.q_proj.bias" ) or k.endswith(".self_attn.k_proj.bias" ) or k.endswith(".self_attn.v_proj.bias" ) ): SCREAMING_SNAKE_CASE_: Union[str, Any] = k[: -len(".q_proj.bias" )] SCREAMING_SNAKE_CASE_: Any = k[-len("q_proj.bias" )] if k_pre not in capture_qkv_bias: SCREAMING_SNAKE_CASE_: List[Any] = [None, None, None] SCREAMING_SNAKE_CASE_: List[str] = v continue SCREAMING_SNAKE_CASE_: int = textenc_pattern.sub(lambda _UpperCAmelCase : protected[re.escape(m.group(0 ) )] , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Dict = v for k_pre, tensors in capture_qkv_weight.items(): if None in tensors: raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" ) SCREAMING_SNAKE_CASE_: str = textenc_pattern.sub(lambda _UpperCAmelCase : protected[re.escape(m.group(0 ) )] , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: int = torch.cat(_UpperCAmelCase ) for k_pre, tensors in capture_qkv_bias.items(): if None in tensors: raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" ) SCREAMING_SNAKE_CASE_: Optional[int] = textenc_pattern.sub(lambda _UpperCAmelCase : protected[re.escape(m.group(0 ) )] , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: List[Any] = torch.cat(_UpperCAmelCase ) return new_state_dict def A_ ( _UpperCAmelCase ): return text_enc_dict if __name__ == "__main__": lowerCAmelCase : int = argparse.ArgumentParser() parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""") parser.add_argument( """--use_safetensors""", action="""store_true""", help="""Save weights use safetensors, default is ckpt.""" ) lowerCAmelCase : Optional[Any] = parser.parse_args() assert args.model_path is not None, "Must provide a model path!" assert args.checkpoint_path is not None, "Must provide a checkpoint path!" # Path for safetensors lowerCAmelCase : int = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.safetensors""") lowerCAmelCase : List[str] = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.safetensors""") lowerCAmelCase : Optional[int] = osp.join(args.model_path, """text_encoder""", """model.safetensors""") # Load models from safetensors if it exists, if it doesn't pytorch if osp.exists(unet_path): lowerCAmelCase : Optional[int] = load_file(unet_path, device="""cpu""") else: lowerCAmelCase : Union[str, Any] = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.bin""") lowerCAmelCase : Optional[Any] = torch.load(unet_path, map_location="""cpu""") if osp.exists(vae_path): lowerCAmelCase : str = load_file(vae_path, device="""cpu""") else: lowerCAmelCase : List[Any] = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.bin""") lowerCAmelCase : Optional[Any] = torch.load(vae_path, map_location="""cpu""") if osp.exists(text_enc_path): lowerCAmelCase : List[Any] = load_file(text_enc_path, device="""cpu""") else: lowerCAmelCase : List[Any] = osp.join(args.model_path, """text_encoder""", """pytorch_model.bin""") lowerCAmelCase : Optional[Any] = torch.load(text_enc_path, map_location="""cpu""") # Convert the UNet model lowerCAmelCase : int = convert_unet_state_dict(unet_state_dict) lowerCAmelCase : Optional[int] = {"""model.diffusion_model.""" + k: v for k, v in unet_state_dict.items()} # Convert the VAE model lowerCAmelCase : Union[str, Any] = convert_vae_state_dict(vae_state_dict) lowerCAmelCase : Optional[int] = {"""first_stage_model.""" + k: v for k, v in vae_state_dict.items()} # Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper lowerCAmelCase : Any = """text_model.encoder.layers.22.layer_norm2.bias""" in text_enc_dict if is_vaa_model: # Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm lowerCAmelCase : Any = {"""transformer.""" + k: v for k, v in text_enc_dict.items()} lowerCAmelCase : str = convert_text_enc_state_dict_vaa(text_enc_dict) lowerCAmelCase : Dict = {"""cond_stage_model.model.""" + k: v for k, v in text_enc_dict.items()} else: lowerCAmelCase : Any = convert_text_enc_state_dict(text_enc_dict) lowerCAmelCase : Optional[Any] = {"""cond_stage_model.transformer.""" + k: v for k, v in text_enc_dict.items()} # Put together new checkpoint lowerCAmelCase : Union[str, Any] = {**unet_state_dict, **vae_state_dict, **text_enc_dict} if args.half: lowerCAmelCase : str = {k: v.half() for k, v in state_dict.items()} if args.use_safetensors: save_file(state_dict, args.checkpoint_path) else: lowerCAmelCase : int = {"""state_dict""": state_dict} torch.save(state_dict, args.checkpoint_path)
671
1
import unittest from transformers import AutoTokenizer, NystromformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, NystromformerModel, ) from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST class __lowercase : """simple docstring""" def __init__( self : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple=13 , lowerCAmelCase__ : str=7 , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : List[Any]=True , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : Any=99 , lowerCAmelCase__ : Optional[int]=32 , lowerCAmelCase__ : Dict=5 , lowerCAmelCase__ : str=4 , lowerCAmelCase__ : Optional[Any]=37 , lowerCAmelCase__ : Optional[Any]="gelu" , lowerCAmelCase__ : Optional[Any]=0.1 , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : Dict=512 , lowerCAmelCase__ : Dict=16 , lowerCAmelCase__ : List[str]=2 , lowerCAmelCase__ : Optional[int]=0.02 , lowerCAmelCase__ : List[str]=3 , lowerCAmelCase__ : List[Any]=4 , lowerCAmelCase__ : Union[str, Any]=None , ): SCREAMING_SNAKE_CASE_: Union[str, Any] = parent SCREAMING_SNAKE_CASE_: int = batch_size SCREAMING_SNAKE_CASE_: int = seq_length SCREAMING_SNAKE_CASE_: Tuple = is_training SCREAMING_SNAKE_CASE_: Dict = use_input_mask SCREAMING_SNAKE_CASE_: Tuple = use_token_type_ids SCREAMING_SNAKE_CASE_: Optional[int] = use_labels SCREAMING_SNAKE_CASE_: List[str] = vocab_size SCREAMING_SNAKE_CASE_: List[Any] = hidden_size SCREAMING_SNAKE_CASE_: int = num_hidden_layers SCREAMING_SNAKE_CASE_: List[Any] = num_attention_heads SCREAMING_SNAKE_CASE_: Optional[int] = intermediate_size SCREAMING_SNAKE_CASE_: int = hidden_act SCREAMING_SNAKE_CASE_: Tuple = hidden_dropout_prob SCREAMING_SNAKE_CASE_: Any = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_: str = max_position_embeddings SCREAMING_SNAKE_CASE_: int = type_vocab_size SCREAMING_SNAKE_CASE_: Dict = type_sequence_label_size SCREAMING_SNAKE_CASE_: Optional[int] = initializer_range SCREAMING_SNAKE_CASE_: Tuple = num_labels SCREAMING_SNAKE_CASE_: List[Any] = num_choices SCREAMING_SNAKE_CASE_: Any = scope def _SCREAMING_SNAKE_CASE ( self : List[str]): SCREAMING_SNAKE_CASE_: str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) SCREAMING_SNAKE_CASE_: Tuple = None if self.use_input_mask: SCREAMING_SNAKE_CASE_: Optional[int] = random_attention_mask([self.batch_size, self.seq_length]) SCREAMING_SNAKE_CASE_: Union[str, Any] = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE_: Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) SCREAMING_SNAKE_CASE_: Optional[Any] = None SCREAMING_SNAKE_CASE_: Optional[Any] = None SCREAMING_SNAKE_CASE_: Optional[int] = None if self.use_labels: SCREAMING_SNAKE_CASE_: int = ids_tensor([self.batch_size] , self.type_sequence_label_size) SCREAMING_SNAKE_CASE_: int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) SCREAMING_SNAKE_CASE_: int = ids_tensor([self.batch_size] , self.num_choices) SCREAMING_SNAKE_CASE_: List[str] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): return NystromformerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , ) def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[Any]): SCREAMING_SNAKE_CASE_: Union[str, Any] = NystromformerModel(config=lowerCAmelCase__) model.to(lowerCAmelCase__) model.eval() SCREAMING_SNAKE_CASE_: Optional[int] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = model(lowerCAmelCase__) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int): SCREAMING_SNAKE_CASE_: Dict = NystromformerForMaskedLM(config=lowerCAmelCase__) model.to(lowerCAmelCase__) model.eval() SCREAMING_SNAKE_CASE_: Optional[Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : str , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : str , lowerCAmelCase__ : Union[str, Any]): SCREAMING_SNAKE_CASE_: int = NystromformerForQuestionAnswering(config=lowerCAmelCase__) model.to(lowerCAmelCase__) model.eval() SCREAMING_SNAKE_CASE_: List[Any] = model( lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str]): SCREAMING_SNAKE_CASE_: int = self.num_labels SCREAMING_SNAKE_CASE_: Optional[int] = NystromformerForSequenceClassification(lowerCAmelCase__) model.to(lowerCAmelCase__) model.eval() SCREAMING_SNAKE_CASE_: Union[str, Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Tuple): SCREAMING_SNAKE_CASE_: int = self.num_labels SCREAMING_SNAKE_CASE_: Optional[int] = NystromformerForTokenClassification(config=lowerCAmelCase__) model.to(lowerCAmelCase__) model.eval() SCREAMING_SNAKE_CASE_: Dict = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Any): SCREAMING_SNAKE_CASE_: Tuple = self.num_choices SCREAMING_SNAKE_CASE_: str = NystromformerForMultipleChoice(config=lowerCAmelCase__) model.to(lowerCAmelCase__) model.eval() SCREAMING_SNAKE_CASE_: Dict = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() SCREAMING_SNAKE_CASE_: Union[str, Any] = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() SCREAMING_SNAKE_CASE_: Tuple = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() SCREAMING_SNAKE_CASE_: str = model( lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def _SCREAMING_SNAKE_CASE ( self : List[Any]): SCREAMING_SNAKE_CASE_: List[str] = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ): Union[str, Any] = config_and_inputs SCREAMING_SNAKE_CASE_: int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" _UpperCAmelCase : Dict = ( ( NystromformerModel, NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, ) if is_torch_available() else () ) _UpperCAmelCase : Dict = ( { '''feature-extraction''': NystromformerModel, '''fill-mask''': NystromformerForMaskedLM, '''question-answering''': NystromformerForQuestionAnswering, '''text-classification''': NystromformerForSequenceClassification, '''token-classification''': NystromformerForTokenClassification, '''zero-shot''': NystromformerForSequenceClassification, } if is_torch_available() else {} ) _UpperCAmelCase : Any = False _UpperCAmelCase : List[str] = False def _SCREAMING_SNAKE_CASE ( self : int): SCREAMING_SNAKE_CASE_: Union[str, Any] = NystromformerModelTester(self) SCREAMING_SNAKE_CASE_: Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37) def _SCREAMING_SNAKE_CASE ( self : List[Any]): self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): SCREAMING_SNAKE_CASE_: Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Dict): SCREAMING_SNAKE_CASE_: Dict = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: SCREAMING_SNAKE_CASE_: Tuple = type self.model_tester.create_and_check_model(*lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Tuple): SCREAMING_SNAKE_CASE_: Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Any): SCREAMING_SNAKE_CASE_: int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): SCREAMING_SNAKE_CASE_: Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : List[Any]): SCREAMING_SNAKE_CASE_: Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Dict): SCREAMING_SNAKE_CASE_: List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__) @slow def _SCREAMING_SNAKE_CASE ( self : Dict): for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE_: Union[str, Any] = NystromformerModel.from_pretrained(lowerCAmelCase__) self.assertIsNotNone(lowerCAmelCase__) @require_torch class __lowercase ( unittest.TestCase ): """simple docstring""" @slow def _SCREAMING_SNAKE_CASE ( self : int): SCREAMING_SNAKE_CASE_: List[Any] = NystromformerModel.from_pretrained("uw-madison/nystromformer-512") SCREAMING_SNAKE_CASE_: Tuple = torch.tensor([[0, 1, 2, 3, 4, 5]]) with torch.no_grad(): SCREAMING_SNAKE_CASE_: Union[str, Any] = model(lowerCAmelCase__)[0] SCREAMING_SNAKE_CASE_: List[Any] = torch.Size((1, 6, 768)) self.assertEqual(output.shape , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Any = torch.tensor( [[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]]) self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=1E-4)) @slow def _SCREAMING_SNAKE_CASE ( self : Optional[int]): SCREAMING_SNAKE_CASE_: Tuple = "the [MASK] of Belgium is Brussels" SCREAMING_SNAKE_CASE_: Tuple = AutoTokenizer.from_pretrained("uw-madison/nystromformer-512") SCREAMING_SNAKE_CASE_: Union[str, Any] = NystromformerForMaskedLM.from_pretrained("uw-madison/nystromformer-512") SCREAMING_SNAKE_CASE_: Any = tokenizer(lowerCAmelCase__ , return_tensors="pt") with torch.no_grad(): SCREAMING_SNAKE_CASE_: Optional[Any] = model(encoding.input_ids).logits SCREAMING_SNAKE_CASE_: Any = token_logits[:, 2, :].argmax(-1)[0] self.assertEqual(tokenizer.decode(lowerCAmelCase__) , "capital")
671
from typing import Callable, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase : int = logging.get_logger(__name__) lowerCAmelCase : Dict = { """microsoft/xprophetnet-large-wiki100-cased""": ( """https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json""" ), } class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Optional[Any] = '''xlm-prophetnet''' _UpperCAmelCase : Any = ['''past_key_values'''] _UpperCAmelCase : Tuple = { '''num_attention_heads''': '''num_encoder_attention_heads''', } def __init__( self : str , lowerCAmelCase__ : Optional[float] = 0.1 , lowerCAmelCase__ : Optional[Union[str, Callable]] = "gelu" , lowerCAmelCase__ : Optional[int] = 3_0522 , lowerCAmelCase__ : Optional[int] = 1024 , lowerCAmelCase__ : Optional[int] = 4096 , lowerCAmelCase__ : Optional[int] = 12 , lowerCAmelCase__ : Optional[int] = 16 , lowerCAmelCase__ : Optional[int] = 4096 , lowerCAmelCase__ : Optional[int] = 12 , lowerCAmelCase__ : Optional[int] = 16 , lowerCAmelCase__ : Optional[float] = 0.1 , lowerCAmelCase__ : Optional[float] = 0.1 , lowerCAmelCase__ : Optional[int] = 512 , lowerCAmelCase__ : Optional[float] = 0.02 , lowerCAmelCase__ : Optional[bool] = True , lowerCAmelCase__ : Optional[bool] = True , lowerCAmelCase__ : Optional[int] = 0 , lowerCAmelCase__ : Optional[int] = 2 , lowerCAmelCase__ : Optional[int] = 32 , lowerCAmelCase__ : Optional[int] = 128 , lowerCAmelCase__ : Optional[bool] = False , lowerCAmelCase__ : Optional[float] = 0.0 , lowerCAmelCase__ : Optional[bool] = True , lowerCAmelCase__ : Optional[int] = 0 , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : Optional[int] = 2 , **lowerCAmelCase__ : List[str] , ): SCREAMING_SNAKE_CASE_: List[Any] = vocab_size SCREAMING_SNAKE_CASE_: int = hidden_size SCREAMING_SNAKE_CASE_: Any = encoder_ffn_dim SCREAMING_SNAKE_CASE_: Tuple = num_encoder_layers SCREAMING_SNAKE_CASE_: List[Any] = num_encoder_attention_heads SCREAMING_SNAKE_CASE_: Dict = decoder_ffn_dim SCREAMING_SNAKE_CASE_: Any = num_decoder_layers SCREAMING_SNAKE_CASE_: Tuple = num_decoder_attention_heads SCREAMING_SNAKE_CASE_: str = max_position_embeddings SCREAMING_SNAKE_CASE_: str = init_std # Normal(0, this parameter) SCREAMING_SNAKE_CASE_: Dict = activation_function # parameters for xlmprophetnet SCREAMING_SNAKE_CASE_: Optional[int] = ngram SCREAMING_SNAKE_CASE_: Tuple = num_buckets SCREAMING_SNAKE_CASE_: Union[str, Any] = relative_max_distance SCREAMING_SNAKE_CASE_: List[str] = disable_ngram_loss SCREAMING_SNAKE_CASE_: Dict = eps # 3 Types of Dropout SCREAMING_SNAKE_CASE_: Any = attention_dropout SCREAMING_SNAKE_CASE_: Optional[int] = activation_dropout SCREAMING_SNAKE_CASE_: str = dropout SCREAMING_SNAKE_CASE_: Optional[int] = use_cache super().__init__( pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , add_cross_attention=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , ) @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any]): return self.num_encoder_layers + self.num_decoder_layers @num_hidden_layers.setter def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : Any): raise NotImplementedError( "This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and" " `num_decoder_layers`.")
671
1
import inspect from typing import Callable, List, Optional, Union import torch from transformers import ( CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, WhisperForConditionalGeneration, WhisperProcessor, ) from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.utils import logging lowerCAmelCase : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name class __lowercase ( UpperCAmelCase_ ): """simple docstring""" def __init__( self : List[Any] , lowerCAmelCase__ : WhisperForConditionalGeneration , lowerCAmelCase__ : WhisperProcessor , lowerCAmelCase__ : AutoencoderKL , lowerCAmelCase__ : CLIPTextModel , lowerCAmelCase__ : CLIPTokenizer , lowerCAmelCase__ : UNetaDConditionModel , lowerCAmelCase__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCAmelCase__ : StableDiffusionSafetyChecker , lowerCAmelCase__ : CLIPImageProcessor , ): super().__init__() if safety_checker is None: logger.warning( F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 .") self.register_modules( speech_model=lowerCAmelCase__ , speech_processor=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Optional[Union[str, int]] = "auto"): if slice_size == "auto": SCREAMING_SNAKE_CASE_: Union[str, Any] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : int): self.enable_attention_slicing(lowerCAmelCase__) @torch.no_grad() def __call__( self : int , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any]=1_6000 , lowerCAmelCase__ : int = 512 , lowerCAmelCase__ : int = 512 , lowerCAmelCase__ : int = 50 , lowerCAmelCase__ : float = 7.5 , lowerCAmelCase__ : Optional[Union[str, List[str]]] = None , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : float = 0.0 , lowerCAmelCase__ : Optional[torch.Generator] = None , lowerCAmelCase__ : Optional[torch.FloatTensor] = None , lowerCAmelCase__ : Optional[str] = "pil" , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCAmelCase__ : int = 1 , **lowerCAmelCase__ : Dict , ): SCREAMING_SNAKE_CASE_: List[str] = self.speech_processor.feature_extractor( lowerCAmelCase__ , return_tensors="pt" , sampling_rate=lowerCAmelCase__).input_features.to(self.device) SCREAMING_SNAKE_CASE_: Optional[int] = self.speech_model.generate(lowerCAmelCase__ , max_length=48_0000) SCREAMING_SNAKE_CASE_: List[Any] = self.speech_processor.tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , normalize=lowerCAmelCase__)[ 0 ] if isinstance(lowerCAmelCase__ , lowerCAmelCase__): SCREAMING_SNAKE_CASE_: str = 1 elif isinstance(lowerCAmelCase__ , lowerCAmelCase__): SCREAMING_SNAKE_CASE_: int = len(lowerCAmelCase__) else: raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(lowerCAmelCase__)}") if height % 8 != 0 or width % 8 != 0: raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(lowerCAmelCase__ , lowerCAmelCase__) or callback_steps <= 0) ): raise ValueError( F"`callback_steps` has to be a positive integer but is {callback_steps} of type" F" {type(lowerCAmelCase__)}.") # get prompt text embeddings SCREAMING_SNAKE_CASE_: Dict = self.tokenizer( lowerCAmelCase__ , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , ) SCREAMING_SNAKE_CASE_: List[Any] = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: SCREAMING_SNAKE_CASE_: Union[str, Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" F" {self.tokenizer.model_max_length} tokens: {removed_text}") SCREAMING_SNAKE_CASE_: Tuple = text_input_ids[:, : self.tokenizer.model_max_length] SCREAMING_SNAKE_CASE_: List[Any] = self.text_encoder(text_input_ids.to(self.device))[0] # duplicate text embeddings for each generation per prompt, using mps friendly method SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = text_embeddings.shape SCREAMING_SNAKE_CASE_: List[Any] = text_embeddings.repeat(1 , lowerCAmelCase__ , 1) SCREAMING_SNAKE_CASE_: Tuple = text_embeddings.view(bs_embed * num_images_per_prompt , lowerCAmelCase__ , -1) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. SCREAMING_SNAKE_CASE_: Any = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: SCREAMING_SNAKE_CASE_: List[str] if negative_prompt is None: SCREAMING_SNAKE_CASE_: Tuple = [""] * batch_size elif type(lowerCAmelCase__) is not type(lowerCAmelCase__): raise TypeError( F"`negative_prompt` should be the same type to `prompt`, but got {type(lowerCAmelCase__)} !=" F" {type(lowerCAmelCase__)}.") elif isinstance(lowerCAmelCase__ , lowerCAmelCase__): SCREAMING_SNAKE_CASE_: Any = [negative_prompt] elif batch_size != len(lowerCAmelCase__): raise ValueError( F"`negative_prompt`: {negative_prompt} has batch size {len(lowerCAmelCase__)}, but `prompt`:" F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`.") else: SCREAMING_SNAKE_CASE_: Tuple = negative_prompt SCREAMING_SNAKE_CASE_: Union[str, Any] = text_input_ids.shape[-1] SCREAMING_SNAKE_CASE_: Dict = self.tokenizer( lowerCAmelCase__ , padding="max_length" , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors="pt" , ) SCREAMING_SNAKE_CASE_: List[Any] = self.text_encoder(uncond_input.input_ids.to(self.device))[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method SCREAMING_SNAKE_CASE_: int = uncond_embeddings.shape[1] SCREAMING_SNAKE_CASE_: Optional[Any] = uncond_embeddings.repeat(1 , lowerCAmelCase__ , 1) SCREAMING_SNAKE_CASE_: Dict = uncond_embeddings.view(batch_size * num_images_per_prompt , lowerCAmelCase__ , -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes SCREAMING_SNAKE_CASE_: List[str] = torch.cat([uncond_embeddings, text_embeddings]) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. SCREAMING_SNAKE_CASE_: List[str] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) SCREAMING_SNAKE_CASE_: Any = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps SCREAMING_SNAKE_CASE_: Tuple = torch.randn(lowerCAmelCase__ , generator=lowerCAmelCase__ , device="cpu" , dtype=lowerCAmelCase__).to( self.device) else: SCREAMING_SNAKE_CASE_: Optional[Any] = torch.randn(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=self.device , dtype=lowerCAmelCase__) else: if latents.shape != latents_shape: raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}") SCREAMING_SNAKE_CASE_: List[str] = latents.to(self.device) # set timesteps self.scheduler.set_timesteps(lowerCAmelCase__) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand SCREAMING_SNAKE_CASE_: int = self.scheduler.timesteps.to(self.device) # scale the initial noise by the standard deviation required by the scheduler SCREAMING_SNAKE_CASE_: Union[str, Any] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] SCREAMING_SNAKE_CASE_: List[str] = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) SCREAMING_SNAKE_CASE_: str = {} if accepts_eta: SCREAMING_SNAKE_CASE_: Tuple = eta for i, t in enumerate(self.progress_bar(lowerCAmelCase__)): # expand the latents if we are doing classifier free guidance SCREAMING_SNAKE_CASE_: List[Any] = torch.cat([latents] * 2) if do_classifier_free_guidance else latents SCREAMING_SNAKE_CASE_: Optional[int] = self.scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__) # predict the noise residual SCREAMING_SNAKE_CASE_: Optional[Any] = self.unet(lowerCAmelCase__ , lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__).sample # perform guidance if do_classifier_free_guidance: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = noise_pred.chunk(2) SCREAMING_SNAKE_CASE_: List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 SCREAMING_SNAKE_CASE_: List[str] = self.scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = 1 / 0.1_8215 * latents SCREAMING_SNAKE_CASE_: Union[str, Any] = self.vae.decode(lowerCAmelCase__).sample SCREAMING_SNAKE_CASE_: Tuple = (image / 2 + 0.5).clamp(0 , 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 SCREAMING_SNAKE_CASE_: Dict = image.cpu().permute(0 , 2 , 3 , 1).float().numpy() if output_type == "pil": SCREAMING_SNAKE_CASE_: Optional[Any] = self.numpy_to_pil(lowerCAmelCase__) if not return_dict: return image return StableDiffusionPipelineOutput(images=lowerCAmelCase__ , nsfw_content_detected=lowerCAmelCase__)
671
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowerCAmelCase : Dict = logging.get_logger(__name__) def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[int] = b.T SCREAMING_SNAKE_CASE_: Dict = np.sum(np.square(_UpperCAmelCase ) , axis=1 ) SCREAMING_SNAKE_CASE_: Tuple = np.sum(np.square(_UpperCAmelCase ) , axis=0 ) SCREAMING_SNAKE_CASE_: List[Any] = np.matmul(_UpperCAmelCase , _UpperCAmelCase ) SCREAMING_SNAKE_CASE_: Dict = aa[:, None] - 2 * ab + ba[None, :] return d def A_ ( _UpperCAmelCase , _UpperCAmelCase ): SCREAMING_SNAKE_CASE_: int = x.reshape(-1 , 3 ) SCREAMING_SNAKE_CASE_: Tuple = squared_euclidean_distance(_UpperCAmelCase , _UpperCAmelCase ) return np.argmin(_UpperCAmelCase , axis=1 ) class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : int = ['''pixel_values'''] def __init__( self : Tuple , lowerCAmelCase__ : Optional[Union[List[List[int]], np.ndarray]] = None , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : bool = True , **lowerCAmelCase__ : List[str] , ): super().__init__(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Any = size if size is not None else {"height": 256, "width": 256} SCREAMING_SNAKE_CASE_: Tuple = get_size_dict(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = np.array(lowerCAmelCase__) if clusters is not None else None SCREAMING_SNAKE_CASE_: Dict = do_resize SCREAMING_SNAKE_CASE_: str = size SCREAMING_SNAKE_CASE_: List[Any] = resample SCREAMING_SNAKE_CASE_: Optional[int] = do_normalize SCREAMING_SNAKE_CASE_: Dict = do_color_quantize def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Optional[Any] , ): SCREAMING_SNAKE_CASE_: List[str] = get_size_dict(lowerCAmelCase__) if "height" not in size or "width" not in size: raise ValueError(F"Size dictionary must contain both height and width keys. Got {size.keys()}") return resize( lowerCAmelCase__ , size=(size["height"], size["width"]) , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , ): SCREAMING_SNAKE_CASE_: str = rescale(image=lowerCAmelCase__ , scale=1 / 127.5 , data_format=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[int] = image - 1 return image def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : ImageInput , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[Union[List[List[int]], np.ndarray]] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **lowerCAmelCase__ : Union[str, Any] , ): SCREAMING_SNAKE_CASE_: Tuple = do_resize if do_resize is not None else self.do_resize SCREAMING_SNAKE_CASE_: Optional[int] = size if size is not None else self.size SCREAMING_SNAKE_CASE_: Dict = get_size_dict(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[str] = resample if resample is not None else self.resample SCREAMING_SNAKE_CASE_: int = do_normalize if do_normalize is not None else self.do_normalize SCREAMING_SNAKE_CASE_: List[str] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize SCREAMING_SNAKE_CASE_: Tuple = clusters if clusters is not None else self.clusters SCREAMING_SNAKE_CASE_: Optional[int] = np.array(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[int] = make_list_of_images(lowerCAmelCase__) if not valid_images(lowerCAmelCase__): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray.") if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True.") if do_color_quantize and clusters is None: raise ValueError("Clusters must be specified if do_color_quantize is True.") # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE_: Union[str, Any] = [to_numpy_array(lowerCAmelCase__) for image in images] if do_resize: SCREAMING_SNAKE_CASE_: Optional[Any] = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__) for image in images] if do_normalize: SCREAMING_SNAKE_CASE_: str = [self.normalize(image=lowerCAmelCase__) for image in images] if do_color_quantize: SCREAMING_SNAKE_CASE_: Any = [to_channel_dimension_format(lowerCAmelCase__ , ChannelDimension.LAST) for image in images] # color quantize from (batch_size, height, width, 3) to (batch_size, height, width) SCREAMING_SNAKE_CASE_: List[Any] = np.array(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[str] = color_quantize(lowerCAmelCase__ , lowerCAmelCase__).reshape(images.shape[:-1]) # flatten to (batch_size, height*width) SCREAMING_SNAKE_CASE_: str = images.shape[0] SCREAMING_SNAKE_CASE_: Tuple = images.reshape(lowerCAmelCase__ , -1) # We need to convert back to a list of images to keep consistent behaviour across processors. SCREAMING_SNAKE_CASE_: str = list(lowerCAmelCase__) else: SCREAMING_SNAKE_CASE_: Dict = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__) for image in images] SCREAMING_SNAKE_CASE_: Optional[Any] = {"input_ids": images} return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__)
671
1
def A_ ( ): return 1 def A_ ( _UpperCAmelCase ): return 0 if x < 0 else two_pence(x - 2 ) + one_pence() def A_ ( _UpperCAmelCase ): return 0 if x < 0 else five_pence(x - 5 ) + two_pence(_UpperCAmelCase ) def A_ ( _UpperCAmelCase ): return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(_UpperCAmelCase ) def A_ ( _UpperCAmelCase ): return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(_UpperCAmelCase ) def A_ ( _UpperCAmelCase ): return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(_UpperCAmelCase ) def A_ ( _UpperCAmelCase ): return 0 if x < 0 else one_pound(x - 1_00 ) + fifty_pence(_UpperCAmelCase ) def A_ ( _UpperCAmelCase ): return 0 if x < 0 else two_pound(x - 2_00 ) + one_pound(_UpperCAmelCase ) def A_ ( _UpperCAmelCase = 2_00 ): return two_pound(_UpperCAmelCase ) if __name__ == "__main__": print(solution(int(input().strip())))
671
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert import BertTokenizer lowerCAmelCase : Optional[int] = logging.get_logger(__name__) lowerCAmelCase : str = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} lowerCAmelCase : Tuple = { """vocab_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } lowerCAmelCase : Union[str, Any] = { """vocab_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json""" ), }, } lowerCAmelCase : List[str] = { """vocab_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json""" ), }, } lowerCAmelCase : int = { """facebook/dpr-ctx_encoder-single-nq-base""": 512, """facebook/dpr-ctx_encoder-multiset-base""": 512, } lowerCAmelCase : int = { """facebook/dpr-question_encoder-single-nq-base""": 512, """facebook/dpr-question_encoder-multiset-base""": 512, } lowerCAmelCase : List[Any] = { """facebook/dpr-reader-single-nq-base""": 512, """facebook/dpr-reader-multiset-base""": 512, } lowerCAmelCase : Optional[int] = { """facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True}, } lowerCAmelCase : Optional[int] = { """facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True}, } lowerCAmelCase : List[str] = { """facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True}, """facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True}, } class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Any = VOCAB_FILES_NAMES _UpperCAmelCase : Optional[Any] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase : List[Any] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase : List[Any] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES _UpperCAmelCase : Optional[int] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase : Any = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase : str = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION lowerCAmelCase : List[Any] = collections.namedtuple( """DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""] ) lowerCAmelCase : Optional[Any] = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""]) lowerCAmelCase : int = R""" Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers), using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)` with the format: ``` [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids> ``` Args: questions (`str` or `List[str]`): The questions to be encoded. You can specify one question for many passages. In this case, the question will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in `titles` or `texts`. titles (`str` or `List[str]`): The passages titles to be encoded. This can be a string or a list of strings if there are several passages. texts (`str` or `List[str]`): The passages texts to be encoded. This can be a string or a list of strings if there are several passages. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. return_attention_mask (`bool`, *optional*): Whether or not to return the attention mask. If not set, will return the attention mask according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) Returns: `Dict[str, List[List[int]]]`: A dictionary with the following keys: - `input_ids`: List of token ids to be fed to a model. - `attention_mask`: List of indices specifying which tokens should be attended to by the model. """ @add_start_docstrings(UpperCAmelCase_ ) class __lowercase : """simple docstring""" def __call__( self : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : Union[bool, str] = False , lowerCAmelCase__ : Union[bool, str] = False , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : Optional[bool] = None , **lowerCAmelCase__ : Tuple , ): if titles is None and texts is None: return super().__call__( lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , ) elif titles is None or texts is None: SCREAMING_SNAKE_CASE_: List[str] = titles if texts is None else texts return super().__call__( lowerCAmelCase__ , lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , ) SCREAMING_SNAKE_CASE_: Optional[int] = titles if not isinstance(lowerCAmelCase__ , lowerCAmelCase__) else [titles] SCREAMING_SNAKE_CASE_: int = texts if not isinstance(lowerCAmelCase__ , lowerCAmelCase__) else [texts] SCREAMING_SNAKE_CASE_: str = len(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = questions if not isinstance(lowerCAmelCase__ , lowerCAmelCase__) else [questions] * n_passages if len(lowerCAmelCase__) != len(lowerCAmelCase__): raise ValueError( F"There should be as many titles than texts but got {len(lowerCAmelCase__)} titles and {len(lowerCAmelCase__)} texts.") SCREAMING_SNAKE_CASE_: Optional[Any] = super().__call__(lowerCAmelCase__ , lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__)["input_ids"] SCREAMING_SNAKE_CASE_: Union[str, Any] = super().__call__(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__)["input_ids"] SCREAMING_SNAKE_CASE_: int = { "input_ids": [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(lowerCAmelCase__ , lowerCAmelCase__) ] } if return_attention_mask is not False: SCREAMING_SNAKE_CASE_: Dict = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids]) SCREAMING_SNAKE_CASE_: int = attention_mask return self.pad(lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : BatchEncoding , lowerCAmelCase__ : DPRReaderOutput , lowerCAmelCase__ : int = 16 , lowerCAmelCase__ : int = 64 , lowerCAmelCase__ : int = 4 , ): SCREAMING_SNAKE_CASE_: int = reader_input["input_ids"] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = reader_output[:3] SCREAMING_SNAKE_CASE_: Tuple = len(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = sorted(range(lowerCAmelCase__) , reverse=lowerCAmelCase__ , key=relevance_logits.__getitem__) SCREAMING_SNAKE_CASE_: List[DPRReaderOutput] = [] for doc_id in sorted_docs: SCREAMING_SNAKE_CASE_: Optional[int] = list(input_ids[doc_id]) # assuming question & title information is at the beginning of the sequence SCREAMING_SNAKE_CASE_: str = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: SCREAMING_SNAKE_CASE_: List[Any] = sequence_ids.index(self.pad_token_id) else: SCREAMING_SNAKE_CASE_: Dict = len(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[Any] = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCAmelCase__ , top_spans=lowerCAmelCase__ , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCAmelCase__ , start_index=lowerCAmelCase__ , end_index=lowerCAmelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1]) , )) if len(lowerCAmelCase__) >= num_spans: break return nbest_spans_predictions[:num_spans] def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , ): SCREAMING_SNAKE_CASE_: Any = [] for start_index, start_score in enumerate(lowerCAmelCase__): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]): scores.append(((start_index, start_index + answer_length), start_score + end_score)) SCREAMING_SNAKE_CASE_: Union[str, Any] = sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__: x[1] , reverse=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[str] = [] for (start_index, end_index), score in scores: if start_index > end_index: raise ValueError(F"Wrong span indices: [{start_index}:{end_index}]") SCREAMING_SNAKE_CASE_: int = end_index - start_index + 1 if length > max_answer_length: raise ValueError(F"Span is too long: {length} > {max_answer_length}") if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals): continue chosen_span_intervals.append((start_index, end_index)) if len(lowerCAmelCase__) == top_spans: break return chosen_span_intervals @add_end_docstrings(UpperCAmelCase_ ) class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Any = VOCAB_FILES_NAMES _UpperCAmelCase : Optional[Any] = READER_PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase : int = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase : Optional[int] = READER_PRETRAINED_INIT_CONFIGURATION _UpperCAmelCase : str = ['''input_ids''', '''attention_mask''']
671
1
def A_ ( _UpperCAmelCase ): if isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise TypeError("'float' object cannot be interpreted as an integer" ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise TypeError("'str' object cannot be interpreted as an integer" ) if num == 0: return "0b0" SCREAMING_SNAKE_CASE_: Dict = False if num < 0: SCREAMING_SNAKE_CASE_: Union[str, Any] = True SCREAMING_SNAKE_CASE_: str = -num SCREAMING_SNAKE_CASE_: list[int] = [] while num > 0: binary.insert(0 , num % 2 ) num >>= 1 if negative: return "-0b" + "".join(str(_UpperCAmelCase ) for e in binary ) return "0b" + "".join(str(_UpperCAmelCase ) for e in binary ) if __name__ == "__main__": import doctest doctest.testmod()
671
from transformers import DistilBertTokenizer, DistilBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class __lowercase ( UpperCAmelCase_ ): """simple docstring""" _UpperCAmelCase : Optional[Any] = DistilBertTokenizer _UpperCAmelCase : Union[str, Any] = DistilBertTokenizerFast _UpperCAmelCase : int = True @slow def _SCREAMING_SNAKE_CASE ( self : Any): SCREAMING_SNAKE_CASE_: Optional[Any] = DistilBertTokenizer.from_pretrained("distilbert-base-uncased") SCREAMING_SNAKE_CASE_: Any = tokenizer.encode("sequence builders" , add_special_tokens=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: int = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ]
671
1