code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from .config import config_command_parser from .config_args import default_config_file, load_config_from_file # noqa: F401 from .default import default_command_parser from .update import update_command_parser def __UpperCamelCase (_SCREAMING_SNAKE_CASE=None ) -> Optional[Any]: lowercase__ = argparse.ArgumentParser(add_help=_SCREAMING_SNAKE_CASE , allow_abbrev=_SCREAMING_SNAKE_CASE ) # The main config parser lowercase__ = config_command_parser(_SCREAMING_SNAKE_CASE ) # The subparser to add commands to lowercase__ = config_parser.add_subparsers(title='subcommands' , dest='subcommand' ) # Then add other parsers with the parent parser default_command_parser(_SCREAMING_SNAKE_CASE , parents=[parent_parser] ) update_command_parser(_SCREAMING_SNAKE_CASE , parents=[parent_parser] ) return config_parser def __UpperCamelCase () -> Optional[int]: lowercase__ = get_config_parser() lowercase__ = config_parser.parse_args() if not hasattr(_SCREAMING_SNAKE_CASE , 'func' ): config_parser.print_help() exit(1 ) # Run args.func(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
45
import math def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]: if 0 not in (x, y): # We use the relation x^y = y*log10(x), where 10 is the base. return y * math.logaa(_SCREAMING_SNAKE_CASE ) else: if x == 0: # 0 raised to any number is 0 return 0 elif y == 0: return 1 # any number raised to 0 is 1 raise AssertionError('This should never happen' ) if __name__ == "__main__": # Main function # Read two numbers from input and typecast them to int using map function. # Here x is the base and y is the power. lowercase_ = """Enter the base and the power separated by a comma: """ lowercase_ , lowercase_ = map(int, input(prompt).split(""",""")) lowercase_ , lowercase_ = map(int, input(prompt).split(""",""")) # We find the log of each number, using the function res(), which takes two # arguments. lowercase_ = res(xa, ya) lowercase_ = res(xa, ya) # We check for the largest number if resa > resa: print("""Largest number is""", xa, """^""", ya) elif resa > resa: print("""Largest number is""", xa, """^""", ya) else: print("""Both are equal""")
45
1
from statistics import mean import numpy as np def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> list: lowercase__ = 0 # Number of processes finished lowercase__ = 0 # Displays the finished process. # If it is 0, the performance is completed if it is 1, before the performance. lowercase__ = [0] * no_of_process # List to include calculation results lowercase__ = [0] * no_of_process # Sort by arrival time. lowercase__ = [burst_time[i] for i in np.argsort(_SCREAMING_SNAKE_CASE )] lowercase__ = [process_name[i] for i in np.argsort(_SCREAMING_SNAKE_CASE )] arrival_time.sort() while no_of_process > finished_process_count: lowercase__ = 0 while finished_process[i] == 1: i += 1 if current_time < arrival_time[i]: lowercase__ = arrival_time[i] lowercase__ = 0 # Index showing the location of the process being performed lowercase__ = 0 # Saves the current response ratio. lowercase__ = 0 for i in range(0 , _SCREAMING_SNAKE_CASE ): if finished_process[i] == 0 and arrival_time[i] <= current_time: lowercase__ = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[ i ] if response_ratio < temp: lowercase__ = temp lowercase__ = i # Calculate the turn around time lowercase__ = current_time + burst_time[loc] - arrival_time[loc] current_time += burst_time[loc] # Indicates that the process has been performed. lowercase__ = 1 # Increase finished_process_count by 1 finished_process_count += 1 return turn_around_time def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> list: lowercase__ = [0] * no_of_process for i in range(0 , _SCREAMING_SNAKE_CASE ): lowercase__ = turn_around_time[i] - burst_time[i] return waiting_time if __name__ == "__main__": lowercase_ = 5 lowercase_ = ["""A""", """B""", """C""", """D""", """E"""] lowercase_ = [1, 2, 3, 4, 5] lowercase_ = [1, 2, 3, 4, 5] lowercase_ = calculate_turn_around_time( process_name, arrival_time, burst_time, no_of_process ) lowercase_ = calculate_waiting_time( process_name, turn_around_time, burst_time, no_of_process ) print("""Process name \tArrival time \tBurst time \tTurn around time \tWaiting time""") for i in range(0, no_of_process): print( f'''{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t''' f'''{turn_around_time[i]}\t\t\t{waiting_time[i]}''' ) print(f'''average waiting time : {mean(waiting_time):.5f}''') print(f'''average turn around time : {mean(turn_around_time):.5f}''')
45
class SCREAMING_SNAKE_CASE : # Public class to implement a graph def __init__( self : int , a : int , a : int , a : list[list[bool]] )-> None: """simple docstring""" lowercase__ = row lowercase__ = col lowercase__ = graph def SCREAMING_SNAKE_CASE_ ( self : Dict , a : int , a : int , a : list[list[bool]] )-> bool: """simple docstring""" return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : int , a : int , a : list[list[bool]] )-> None: """simple docstring""" lowercase__ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order lowercase__ = [-1, 0, 1, -1, 1, -1, 0, 1] lowercase__ = True # Make those cells visited for k in range(8 ): if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , a ): self.diffs(i + row_nbr[k] , j + col_nbr[k] , a ) def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> int: # And finally, count all islands. """simple docstring""" lowercase__ = [[False for j in range(self.COL )] for i in range(self.ROW )] lowercase__ = 0 for i in range(self.ROW ): for j in range(self.COL ): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(a , a , a ) count += 1 return count
45
1
from __future__ import annotations lowercase_ = 1.6021E-19 # units = C def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> tuple[str, float]: if (conductivity, electron_conc, mobility).count(0 ) != 1: raise ValueError('You cannot supply more or less than 2 values' ) elif conductivity < 0: raise ValueError('Conductivity cannot be negative' ) elif electron_conc < 0: raise ValueError('Electron concentration cannot be negative' ) elif mobility < 0: raise ValueError('mobility cannot be negative' ) elif conductivity == 0: return ( "conductivity", mobility * electron_conc * ELECTRON_CHARGE, ) elif electron_conc == 0: return ( "electron_conc", conductivity / (mobility * ELECTRON_CHARGE), ) else: return ( "mobility", conductivity / (electron_conc * ELECTRON_CHARGE), ) if __name__ == "__main__": import doctest doctest.testmod()
45
from string import ascii_uppercase lowercase_ = {str(ord(c) - 55): c for c in ascii_uppercase} def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise TypeError('int() can\'t convert non-string with explicit base' ) if num < 0: raise ValueError('parameter must be positive int' ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise TypeError('\'str\' object cannot be interpreted as an integer' ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise TypeError('\'float\' object cannot be interpreted as an integer' ) if base in (0, 1): raise ValueError('base must be >= 2' ) if base > 36: raise ValueError('base must be <= 36' ) lowercase__ = '' lowercase__ = 0 lowercase__ = 0 while div != 1: lowercase__ , lowercase__ = divmod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if base >= 11 and 9 < mod < 36: lowercase__ = ALPHABET_VALUES[str(_SCREAMING_SNAKE_CASE )] else: lowercase__ = str(_SCREAMING_SNAKE_CASE ) new_value += actual_value lowercase__ = num // base lowercase__ = div if div == 0: return str(new_value[::-1] ) elif div == 1: new_value += str(_SCREAMING_SNAKE_CASE ) return str(new_value[::-1] ) return new_value[::-1] if __name__ == "__main__": import doctest doctest.testmod() for base in range(2, 37): for num in range(1_000): assert int(decimal_to_any(num, base), base) == num, ( num, base, decimal_to_any(num, base), int(decimal_to_any(num, base), base), )
45
1
from timeit import timeit def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> int: if number < 0: raise ValueError('the value of input must not be negative' ) lowercase__ = 0 while number: number &= number - 1 result += 1 return result def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> int: if number < 0: raise ValueError('the value of input must not be negative' ) lowercase__ = 0 while number: if number % 2 == 1: result += 1 number >>= 1 return result def __UpperCamelCase () -> None: def do_benchmark(_SCREAMING_SNAKE_CASE ) -> None: lowercase__ = 'import __main__ as z' print(F"""Benchmark when {number = }:""" ) print(F"""{get_set_bits_count_using_modulo_operator(_SCREAMING_SNAKE_CASE ) = }""" ) lowercase__ = timeit('z.get_set_bits_count_using_modulo_operator(25)' , setup=_SCREAMING_SNAKE_CASE ) print(F"""timeit() runs in {timing} seconds""" ) print(F"""{get_set_bits_count_using_brian_kernighans_algorithm(_SCREAMING_SNAKE_CASE ) = }""" ) lowercase__ = timeit( 'z.get_set_bits_count_using_brian_kernighans_algorithm(25)' , setup=_SCREAMING_SNAKE_CASE , ) print(F"""timeit() runs in {timing} seconds""" ) for number in (25, 37, 58, 0): do_benchmark(_SCREAMING_SNAKE_CASE ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
45
import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class SCREAMING_SNAKE_CASE : def __init__( self : List[Any] , a : Any , a : Optional[int]=13 , a : Tuple=30 , a : Union[str, Any]=2 , a : List[str]=3 , a : Dict=True , a : List[str]=True , a : List[Any]=32 , a : List[str]=5 , a : Optional[int]=4 , a : List[str]=37 , a : Dict="gelu" , a : Dict=0.1 , a : List[str]=0.1 , a : int=10 , a : List[str]=0.02 , a : int=None , a : List[str]=2 , )-> Dict: """simple docstring""" lowercase__ = parent lowercase__ = batch_size lowercase__ = image_size lowercase__ = patch_size lowercase__ = num_channels lowercase__ = is_training lowercase__ = use_labels lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = intermediate_size lowercase__ = hidden_act lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = type_sequence_label_size lowercase__ = initializer_range lowercase__ = scope lowercase__ = encoder_stride # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) lowercase__ = (image_size // patch_size) ** 2 lowercase__ = num_patches + 1 def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Any: """simple docstring""" lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__ = None if self.use_labels: lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__ = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> List[Any]: """simple docstring""" return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : List[Any] , a : List[str] , a : Dict )-> Optional[Any]: """simple docstring""" lowercase__ = ViTModel(config=a ) model.to(a ) model.eval() lowercase__ = model(a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE_ ( self : int , a : Optional[Any] , a : int , a : Tuple )-> Union[str, Any]: """simple docstring""" lowercase__ = ViTForMaskedImageModeling(config=a ) model.to(a ) model.eval() lowercase__ = model(a ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowercase__ = 1 lowercase__ = ViTForMaskedImageModeling(a ) model.to(a ) model.eval() lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowercase__ = model(a ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : List[str] , a : int , a : List[Any] )-> str: """simple docstring""" lowercase__ = self.type_sequence_label_size lowercase__ = ViTForImageClassification(a ) model.to(a ) model.eval() lowercase__ = model(a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowercase__ = 1 lowercase__ = ViTForImageClassification(a ) model.to(a ) model.eval() lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowercase__ = model(a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> List[str]: """simple docstring""" lowercase__ = self.prepare_config_and_inputs() ( ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ) = config_and_inputs lowercase__ = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE (UpperCAmelCase , UpperCAmelCase , unittest.TestCase ): _UpperCamelCase : Any = ( ( ViTModel, ViTForImageClassification, ViTForMaskedImageModeling, ) if is_torch_available() else () ) _UpperCamelCase : Union[str, Any] = ( {'feature-extraction': ViTModel, 'image-classification': ViTForImageClassification} if is_torch_available() else {} ) _UpperCamelCase : int = True _UpperCamelCase : int = False _UpperCamelCase : Union[str, Any] = False _UpperCamelCase : Dict = False def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> List[str]: """simple docstring""" lowercase__ = ViTModelTester(self ) lowercase__ = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=37 ) def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Union[str, Any]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='ViT does not use inputs_embeds' ) def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]: """simple docstring""" pass def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Union[str, Any]: """simple docstring""" lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ = model_class(a ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowercase__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(a , nn.Linear ) ) def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Optional[int]: """simple docstring""" lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ = model_class(a ) lowercase__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ = [*signature.parameters.keys()] lowercase__ = ['pixel_values'] self.assertListEqual(arg_names[:1] , a ) def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Union[str, Any]: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a ) def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Tuple: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*a ) def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> int: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a ) @slow def SCREAMING_SNAKE_CASE_ ( self : Dict )-> List[Any]: """simple docstring""" for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ = ViTModel.from_pretrained(a ) self.assertIsNotNone(a ) def __UpperCamelCase () -> str: lowercase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class SCREAMING_SNAKE_CASE (unittest.TestCase ): @cached_property def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> List[Any]: """simple docstring""" return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None @slow def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Union[str, Any]: """simple docstring""" lowercase__ = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224' ).to(a ) lowercase__ = self.default_image_processor lowercase__ = prepare_img() lowercase__ = image_processor(images=a , return_tensors='pt' ).to(a ) # forward pass with torch.no_grad(): lowercase__ = model(**a ) # verify the logits lowercase__ = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , a ) lowercase__ = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1E-4 ) ) @slow def SCREAMING_SNAKE_CASE_ ( self : int )-> List[str]: """simple docstring""" lowercase__ = ViTModel.from_pretrained('facebook/dino-vits8' ).to(a ) lowercase__ = ViTImageProcessor.from_pretrained('facebook/dino-vits8' , size=480 ) lowercase__ = prepare_img() lowercase__ = image_processor(images=a , return_tensors='pt' ) lowercase__ = inputs.pixel_values.to(a ) # forward pass with torch.no_grad(): lowercase__ = model(a , interpolate_pos_encoding=a ) # verify the logits lowercase__ = torch.Size((1, 3_601, 384) ) self.assertEqual(outputs.last_hidden_state.shape , a ) lowercase__ = torch.tensor( [[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(a ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , a , atol=1E-4 ) ) @slow @require_accelerate @require_torch_gpu def SCREAMING_SNAKE_CASE_ ( self : str )-> str: """simple docstring""" lowercase__ = ViTModel.from_pretrained('facebook/dino-vits8' , torch_dtype=torch.floataa , device_map='auto' ) lowercase__ = self.default_image_processor lowercase__ = prepare_img() lowercase__ = image_processor(images=a , return_tensors='pt' ) lowercase__ = inputs.pixel_values.to(a ) # forward pass to make sure inference works in fp16 with torch.no_grad(): lowercase__ = model(a )
45
1
import io import json import unittest from parameterized import parameterized from transformers import FSMTForConditionalGeneration, FSMTTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device from utils import calculate_bleu lowercase_ = get_tests_dir() + """/test_data/fsmt/fsmt_val_data.json""" with io.open(filename, """r""", encoding="""utf-8""") as f: lowercase_ = json.load(f) @require_torch class SCREAMING_SNAKE_CASE (unittest.TestCase ): def SCREAMING_SNAKE_CASE_ ( self : int , a : Optional[Any] )-> int: """simple docstring""" return FSMTTokenizer.from_pretrained(a ) def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Tuple )-> Optional[int]: """simple docstring""" lowercase__ = FSMTForConditionalGeneration.from_pretrained(a ).to(a ) if torch_device == "cuda": model.half() return model @parameterized.expand( [ ['en-ru', 26.0], ['ru-en', 22.0], ['en-de', 22.0], ['de-en', 29.0], ] ) @slow def SCREAMING_SNAKE_CASE_ ( self : int , a : Optional[Any] , a : Any )-> Dict: """simple docstring""" lowercase__ = f"""facebook/wmt19-{pair}""" lowercase__ = self.get_tokenizer(a ) lowercase__ = self.get_model(a ) lowercase__ = bleu_data[pair]['src'] lowercase__ = bleu_data[pair]['tgt'] lowercase__ = tokenizer(a , return_tensors='pt' , truncation=a , padding='longest' ).to(a ) lowercase__ = model.generate( input_ids=batch.input_ids , num_beams=8 , ) lowercase__ = tokenizer.batch_decode( a , skip_special_tokens=a , clean_up_tokenization_spaces=a ) lowercase__ = calculate_bleu(a , a ) print(a ) self.assertGreaterEqual(scores['bleu'] , a )
45
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[Any]: stooge(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) - 1 ) return arr def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int: if i >= h: return # If first element is smaller than the last then swap them if arr[i] > arr[h]: lowercase__ , lowercase__ = arr[h], arr[i] # If there are more than 2 elements in the array if h - i + 1 > 2: lowercase__ = (int)((h - i + 1) / 3 ) # Recursively sort first 2/3 elements stooge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (h - t) ) # Recursively sort last 2/3 elements stooge(_SCREAMING_SNAKE_CASE , i + t , (_SCREAMING_SNAKE_CASE) ) # Recursively sort first 2/3 elements stooge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (h - t) ) if __name__ == "__main__": lowercase_ = input("""Enter numbers separated by a comma:\n""").strip() lowercase_ = [int(item) for item in user_input.split(""",""")] print(stooge_sort(unsorted))
45
1
import os # Precomputes a list of the 100 first triangular numbers lowercase_ = [int(0.5 * n * (n + 1)) for n in range(1, 101)] def __UpperCamelCase () -> Optional[Any]: lowercase__ = os.path.dirname(os.path.realpath(_SCREAMING_SNAKE_CASE ) ) lowercase__ = os.path.join(_SCREAMING_SNAKE_CASE , 'words.txt' ) lowercase__ = '' with open(_SCREAMING_SNAKE_CASE ) as f: lowercase__ = f.readline() lowercase__ = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )] lowercase__ = [ word for word in [sum(ord(_SCREAMING_SNAKE_CASE ) - 64 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": print(solution())
45
from scipy.stats import spearmanr import datasets lowercase_ = """ The Spearman rank-order correlation coefficient is a measure of the relationship between two datasets. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Positive correlations imply that as data in dataset x increases, so does data in dataset y. Negative correlations imply that as x increases, y decreases. Correlations of -1 or +1 imply an exact monotonic relationship. Unlike the Pearson correlation, the Spearman correlation does not assume that both datasets are normally distributed. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Spearman correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. """ lowercase_ = """ Args: predictions (`List[float]`): Predicted labels, as returned by a model. references (`List[float]`): Ground truth labels. return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns only the spearmanr score. Defaults to `False`. Returns: spearmanr (`float`): Spearman correlation coefficient. p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input. Examples: Example 1: >>> spearmanr_metric = datasets.load_metric(\"spearmanr\") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4]) >>> print(results) {'spearmanr': -0.7} Example 2: >>> spearmanr_metric = datasets.load_metric(\"spearmanr\") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], ... predictions=[10, 9, 2.5, 6, 4], ... return_pvalue=True) >>> print(results['spearmanr']) -0.7 >>> print(round(results['spearmanr_pvalue'], 2)) 0.19 """ lowercase_ = R"""\ @book{kokoska2000crc, title={CRC standard probability and statistics tables and formulae}, author={Kokoska, Stephen and Zwillinger, Daniel}, year={2000}, publisher={Crc Press} } @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE (datasets.Metric ): def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Tuple: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('float' ), 'references': datasets.Value('float' ), } ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , ) def SCREAMING_SNAKE_CASE_ ( self : int , a : str , a : Any , a : str=False )-> Optional[int]: """simple docstring""" lowercase__ = spearmanr(a , a ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
45
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowercase_ = { """configuration_roc_bert""": ["""ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoCBertConfig"""], """tokenization_roc_bert""": ["""RoCBertTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: pass try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """RoCBertForCausalLM""", """RoCBertForMaskedLM""", """RoCBertForMultipleChoice""", """RoCBertForPreTraining""", """RoCBertForQuestionAnswering""", """RoCBertForSequenceClassification""", """RoCBertForTokenClassification""", """RoCBertLayer""", """RoCBertModel""", """RoCBertPreTrainedModel""", """load_tf_weights_in_roc_bert""", ] if TYPE_CHECKING: from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig from .tokenization_roc_bert import RoCBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: raise OptionalDependencyNotAvailable() try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roc_bert import ( ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, RoCBertForCausalLM, RoCBertForMaskedLM, RoCBertForMultipleChoice, RoCBertForPreTraining, RoCBertForQuestionAnswering, RoCBertForSequenceClassification, RoCBertForTokenClassification, RoCBertLayer, RoCBertModel, RoCBertPreTrainedModel, load_tf_weights_in_roc_bert, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
45
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = 50 ) -> int: lowercase__ = [1] * (length + 1) for row_length in range(3 , length + 1 ): for block_length in range(3 , row_length + 1 ): for block_start in range(row_length - block_length ): ways_number[row_length] += ways_number[ row_length - block_start - block_length - 1 ] ways_number[row_length] += 1 return ways_number[length] if __name__ == "__main__": print(f'''{solution() = }''')
45
1
import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowercase_ = logging.get_logger(__name__) lowercase_ = { """vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_config_file""": """tokenizer_config.json""", } lowercase_ = { """vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""}, """merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""}, """tokenizer_config_file""": { """facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json""" }, } lowercase_ = {"""facebook/blenderbot-3B""": 128} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def __UpperCamelCase () -> Any: lowercase__ = ( list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) ) ) lowercase__ = bs[:] lowercase__ = 0 for b in range(2**8 ): if b not in bs: bs.append(_SCREAMING_SNAKE_CASE ) cs.append(2**8 + n ) n += 1 lowercase__ = [chr(_SCREAMING_SNAKE_CASE ) for n in cs] return dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[str]: lowercase__ = set() lowercase__ = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowercase__ = char return pairs class SCREAMING_SNAKE_CASE (UpperCAmelCase ): _UpperCamelCase : Optional[Any] = VOCAB_FILES_NAMES _UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase : Union[str, Any] = ['input_ids', 'attention_mask'] def __init__( self : Union[str, Any] , a : Optional[Any] , a : Optional[Any] , a : str="replace" , a : Tuple="<s>" , a : int="</s>" , a : Tuple="</s>" , a : Optional[Any]="<s>" , a : Dict="<unk>" , a : List[Any]="<pad>" , a : Optional[Any]="<mask>" , a : Tuple=False , **a : Dict , )-> Any: """simple docstring""" lowercase__ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else bos_token lowercase__ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else eos_token lowercase__ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else sep_token lowercase__ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else cls_token lowercase__ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else unk_token lowercase__ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowercase__ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token super().__init__( errors=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , add_prefix_space=a , **a , ) with open(a , encoding='utf-8' ) as vocab_handle: lowercase__ = json.load(a ) lowercase__ = {v: k for k, v in self.encoder.items()} lowercase__ = errors # how to handle errors in decoding lowercase__ = bytes_to_unicode() lowercase__ = {v: k for k, v in self.byte_encoder.items()} with open(a , encoding='utf-8' ) as merges_handle: lowercase__ = merges_handle.read().split('\n' )[1:-1] lowercase__ = [tuple(merge.split() ) for merge in bpe_merges] lowercase__ = dict(zip(a , range(len(a ) ) ) ) lowercase__ = {} lowercase__ = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions lowercase__ = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' ) @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Union[str, Any]: """simple docstring""" return len(self.encoder ) def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Union[str, Any]: """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : List[str] )-> Any: """simple docstring""" if token in self.cache: return self.cache[token] lowercase__ = tuple(a ) lowercase__ = get_pairs(a ) if not pairs: return token while True: lowercase__ = min(a , key=lambda a : self.bpe_ranks.get(a , float('inf' ) ) ) if bigram not in self.bpe_ranks: break lowercase__ , lowercase__ = bigram lowercase__ = [] lowercase__ = 0 while i < len(a ): try: lowercase__ = word.index(a , a ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowercase__ = j if word[i] == first and i < len(a ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowercase__ = tuple(a ) lowercase__ = new_word if len(a ) == 1: break else: lowercase__ = get_pairs(a ) lowercase__ = ' '.join(a ) lowercase__ = word return word def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : Any )-> Tuple: """simple docstring""" lowercase__ = [] for token in re.findall(self.pat , a ): lowercase__ = ''.join( self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(a ).split(' ' ) ) return bpe_tokens def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Optional[int] )-> List[str]: """simple docstring""" return self.encoder.get(a , self.encoder.get(self.unk_token ) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : int )-> int: """simple docstring""" return self.decoder.get(a ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : Any )-> List[str]: """simple docstring""" lowercase__ = ''.join(a ) lowercase__ = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors ) return text def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : str , a : Optional[str] = None )-> Tuple[str]: """simple docstring""" if not os.path.isdir(a ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowercase__ = os.path.join( a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) lowercase__ = os.path.join( a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] ) with open(a , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=a , ensure_ascii=a ) + '\n' ) lowercase__ = 0 with open(a , 'w' , encoding='utf-8' ) as writer: writer.write('#version: 0.2\n' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda a : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ' Please check that the tokenizer is not corrupted!' ) lowercase__ = token_index writer.write(' '.join(a ) + '\n' ) index += 1 return vocab_file, merge_file def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : List[int] , a : Optional[List[int]] = None , a : bool = False )-> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=a , token_ids_a=a , already_has_special_tokens=a ) if token_ids_a is None: return [1] + ([0] * len(a )) + [1] return [1] + ([0] * len(a )) + [1, 1] + ([0] * len(a )) + [1] def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : List[int] , a : Optional[List[int]] = None )-> List[int]: """simple docstring""" lowercase__ = [self.sep_token_id] lowercase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : List[str] , a : List[Any]=False , **a : List[Any] )-> Union[str, Any]: """simple docstring""" lowercase__ = kwargs.pop('add_prefix_space' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(a ) > 0 and not text[0].isspace()): lowercase__ = ' ' + text return (text, kwargs) def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : List[int] , a : Optional[List[int]] = None )-> Optional[Any]: """simple docstring""" return token_ids_a + [self.eos_token_id] def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : "Conversation" )-> List[int]: """simple docstring""" lowercase__ = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(' ' + text ) else: # Generated responses should contain them already. inputs.append(a ) lowercase__ = ' '.join(a ) lowercase__ = self.encode(a ) if len(a ) > self.model_max_length: lowercase__ = input_ids[-self.model_max_length :] logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" ) return input_ids
45
import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() lowercase_ = logging.get_logger(__name__) lowercase_ = { """b0""": efficientnet.EfficientNetBa, """b1""": efficientnet.EfficientNetBa, """b2""": efficientnet.EfficientNetBa, """b3""": efficientnet.EfficientNetBa, """b4""": efficientnet.EfficientNetBa, """b5""": efficientnet.EfficientNetBa, """b6""": efficientnet.EfficientNetBa, """b7""": efficientnet.EfficientNetBa, } lowercase_ = { """b0""": { """hidden_dim""": 1_280, """width_coef""": 1.0, """depth_coef""": 1.0, """image_size""": 224, """dropout_rate""": 0.2, """dw_padding""": [], }, """b1""": { """hidden_dim""": 1_280, """width_coef""": 1.0, """depth_coef""": 1.1, """image_size""": 240, """dropout_rate""": 0.2, """dw_padding""": [16], }, """b2""": { """hidden_dim""": 1_408, """width_coef""": 1.1, """depth_coef""": 1.2, """image_size""": 260, """dropout_rate""": 0.3, """dw_padding""": [5, 8, 16], }, """b3""": { """hidden_dim""": 1_536, """width_coef""": 1.2, """depth_coef""": 1.4, """image_size""": 300, """dropout_rate""": 0.3, """dw_padding""": [5, 18], }, """b4""": { """hidden_dim""": 1_792, """width_coef""": 1.4, """depth_coef""": 1.8, """image_size""": 380, """dropout_rate""": 0.4, """dw_padding""": [6], }, """b5""": { """hidden_dim""": 2_048, """width_coef""": 1.6, """depth_coef""": 2.2, """image_size""": 456, """dropout_rate""": 0.4, """dw_padding""": [13, 27], }, """b6""": { """hidden_dim""": 2_304, """width_coef""": 1.8, """depth_coef""": 2.6, """image_size""": 528, """dropout_rate""": 0.5, """dw_padding""": [31], }, """b7""": { """hidden_dim""": 2_560, """width_coef""": 2.0, """depth_coef""": 3.1, """image_size""": 600, """dropout_rate""": 0.5, """dw_padding""": [18], }, } def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str: lowercase__ = EfficientNetConfig() lowercase__ = CONFIG_MAP[model_name]['hidden_dim'] lowercase__ = CONFIG_MAP[model_name]['width_coef'] lowercase__ = CONFIG_MAP[model_name]['depth_coef'] lowercase__ = CONFIG_MAP[model_name]['image_size'] lowercase__ = CONFIG_MAP[model_name]['dropout_rate'] lowercase__ = CONFIG_MAP[model_name]['dw_padding'] lowercase__ = 'huggingface/label-files' lowercase__ = 'imagenet-1k-id2label.json' lowercase__ = 1000 lowercase__ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) ) lowercase__ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} lowercase__ = idalabel lowercase__ = {v: k for k, v in idalabel.items()} return config def __UpperCamelCase () -> Tuple: lowercase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg' lowercase__ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) return im def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Optional[Any]: lowercase__ = CONFIG_MAP[model_name]['image_size'] lowercase__ = EfficientNetImageProcessor( size={'height': size, 'width': size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=_SCREAMING_SNAKE_CASE , ) return preprocessor def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Tuple: lowercase__ = [v.split('_' )[0].split('block' )[1] for v in original_param_names if v.startswith('block' )] lowercase__ = sorted(set(_SCREAMING_SNAKE_CASE ) ) lowercase__ = len(_SCREAMING_SNAKE_CASE ) lowercase__ = {b: str(_SCREAMING_SNAKE_CASE ) for b, i in zip(_SCREAMING_SNAKE_CASE , range(_SCREAMING_SNAKE_CASE ) )} lowercase__ = [] rename_keys.append(('stem_conv/kernel:0', 'embeddings.convolution.weight') ) rename_keys.append(('stem_bn/gamma:0', 'embeddings.batchnorm.weight') ) rename_keys.append(('stem_bn/beta:0', 'embeddings.batchnorm.bias') ) rename_keys.append(('stem_bn/moving_mean:0', 'embeddings.batchnorm.running_mean') ) rename_keys.append(('stem_bn/moving_variance:0', 'embeddings.batchnorm.running_var') ) for b in block_names: lowercase__ = block_name_mapping[b] rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") ) rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") ) rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") ) rename_keys.append( (F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") ) rename_keys.append( (F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") ) rename_keys.append( (F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") ) rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") ) rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") ) rename_keys.append( (F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") ) rename_keys.append( (F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") ) rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") ) rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") ) rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") ) rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") ) rename_keys.append( (F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") ) rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") ) rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") ) rename_keys.append( (F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") ) rename_keys.append( (F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") ) rename_keys.append(('top_conv/kernel:0', 'encoder.top_conv.weight') ) rename_keys.append(('top_bn/gamma:0', 'encoder.top_bn.weight') ) rename_keys.append(('top_bn/beta:0', 'encoder.top_bn.bias') ) rename_keys.append(('top_bn/moving_mean:0', 'encoder.top_bn.running_mean') ) rename_keys.append(('top_bn/moving_variance:0', 'encoder.top_bn.running_var') ) lowercase__ = {} for item in rename_keys: if item[0] in original_param_names: lowercase__ = 'efficientnet.' + item[1] lowercase__ = 'classifier.weight' lowercase__ = 'classifier.bias' return key_mapping def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: for key, value in tf_params.items(): if "normalization" in key: continue lowercase__ = key_mapping[key] if "_conv" in key and "kernel" in key: lowercase__ = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: lowercase__ = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: lowercase__ = torch.from_numpy(np.transpose(_SCREAMING_SNAKE_CASE ) ) else: lowercase__ = torch.from_numpy(_SCREAMING_SNAKE_CASE ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(_SCREAMING_SNAKE_CASE ) @torch.no_grad() def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: lowercase__ = model_classes[model_name]( include_top=_SCREAMING_SNAKE_CASE , weights='imagenet' , input_tensor=_SCREAMING_SNAKE_CASE , input_shape=_SCREAMING_SNAKE_CASE , pooling=_SCREAMING_SNAKE_CASE , classes=1000 , classifier_activation='softmax' , ) lowercase__ = original_model.trainable_variables lowercase__ = original_model.non_trainable_variables lowercase__ = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: lowercase__ = param.numpy() lowercase__ = list(tf_params.keys() ) # Load HuggingFace model lowercase__ = get_efficientnet_config(_SCREAMING_SNAKE_CASE ) lowercase__ = EfficientNetForImageClassification(_SCREAMING_SNAKE_CASE ).eval() lowercase__ = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print('Converting parameters...' ) lowercase__ = rename_keys(_SCREAMING_SNAKE_CASE ) replace_params(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Initialize preprocessor and preprocess input image lowercase__ = convert_image_processor(_SCREAMING_SNAKE_CASE ) lowercase__ = preprocessor(images=prepare_img() , return_tensors='pt' ) # HF model inference hf_model.eval() with torch.no_grad(): lowercase__ = hf_model(**_SCREAMING_SNAKE_CASE ) lowercase__ = outputs.logits.detach().numpy() # Original model inference lowercase__ = False lowercase__ = CONFIG_MAP[model_name]['image_size'] lowercase__ = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) lowercase__ = image.img_to_array(_SCREAMING_SNAKE_CASE ) lowercase__ = np.expand_dims(_SCREAMING_SNAKE_CASE , axis=0 ) lowercase__ = original_model.predict(_SCREAMING_SNAKE_CASE ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ), "The predicted logits are not the same." print('Model outputs match!' ) if save_model: # Create folder to save model if not os.path.isdir(_SCREAMING_SNAKE_CASE ): os.mkdir(_SCREAMING_SNAKE_CASE ) # Save converted model and image processor hf_model.save_pretrained(_SCREAMING_SNAKE_CASE ) preprocessor.save_pretrained(_SCREAMING_SNAKE_CASE ) if push_to_hub: # Push model and image processor to hub print(F"""Pushing converted {model_name} to the hub...""" ) lowercase__ = F"""efficientnet-{model_name}""" preprocessor.push_to_hub(_SCREAMING_SNAKE_CASE ) hf_model.push_to_hub(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""b0""", type=str, help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""hf_model""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""") parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") lowercase_ = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
45
1
import importlib.metadata import operator import re import sys from typing import Optional from packaging import version lowercase_ = { """<""": operator.lt, """<=""": operator.le, """==""": operator.eq, """!=""": operator.ne, """>=""": operator.ge, """>""": operator.gt, } def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]: if got_ver is None or want_ver is None: raise ValueError( F"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider""" F""" reinstalling {pkg}.""" ) if not ops[op](version.parse(_SCREAMING_SNAKE_CASE ) , version.parse(_SCREAMING_SNAKE_CASE ) ): raise ImportError( F"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" ) def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> None: lowercase__ = F"""\n{hint}""" if hint is not None else '' # non-versioned check if re.match(R'^[\w_\-\d]+$' , _SCREAMING_SNAKE_CASE ): lowercase__ , lowercase__ , lowercase__ = requirement, None, None else: lowercase__ = re.findall(R'^([^!=<>\s]+)([\s!=<>]{1,2}.+)' , _SCREAMING_SNAKE_CASE ) if not match: raise ValueError( 'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but' F""" got {requirement}""" ) lowercase__ , lowercase__ = match[0] lowercase__ = want_full.split(',' ) # there could be multiple requirements lowercase__ = {} for w in want_range: lowercase__ = re.findall(R'^([\s!=<>]{1,2})(.+)' , _SCREAMING_SNAKE_CASE ) if not match: raise ValueError( 'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,' F""" but got {requirement}""" ) lowercase__ , lowercase__ = match[0] lowercase__ = want_ver if op not in ops: raise ValueError(F"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" ) # special case if pkg == "python": lowercase__ = '.'.join([str(_SCREAMING_SNAKE_CASE ) for x in sys.version_info[:3]] ) for op, want_ver in wanted.items(): _compare_versions(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return # check if any version is installed try: lowercase__ = importlib.metadata.version(_SCREAMING_SNAKE_CASE ) except importlib.metadata.PackageNotFoundError: raise importlib.metadata.PackageNotFoundError( F"""The '{requirement}' distribution was not found and is required by this application. {hint}""" ) # check that the right version is installed if version number or a range was provided if want_ver is not None: for op, want_ver in wanted.items(): _compare_versions(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Optional[Any]: lowercase__ = 'Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main' return require_version(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
45
import argparse import json import subprocess def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]: lowercase__ = [] lowercase__ = ( F"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\"""" ' https://api.github.com/repos/huggingface/transformers/actions/runners' ) lowercase__ = subprocess.run(_SCREAMING_SNAKE_CASE , shell=_SCREAMING_SNAKE_CASE , stdout=subprocess.PIPE ) lowercase__ = output.stdout.decode('utf-8' ) lowercase__ = json.loads(_SCREAMING_SNAKE_CASE ) lowercase__ = status['runners'] for runner in runners: if runner["name"] in target_runners: if runner["status"] == "offline": offline_runners.append(_SCREAMING_SNAKE_CASE ) # save the result so we can report them on Slack with open('offline_runners.txt' , 'w' ) as fp: fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) ) if len(_SCREAMING_SNAKE_CASE ) > 0: lowercase__ = '\n'.join([x['name'] for x in offline_runners] ) raise ValueError(F"""The following runners are offline:\n{failed}""" ) if __name__ == "__main__": def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str: return values.split(',' ) lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--target_runners""", default=None, type=list_str, required=True, help="""Comma-separated list of runners to check status.""", ) parser.add_argument( """--token""", default=None, type=str, required=True, help="""A token that has actions:read permission.""" ) lowercase_ = parser.parse_args() get_runner_status(args.target_runners, args.token)
45
1
lowercase_ = { "joule": 1.0, "kilojoule": 1_000, "megajoule": 1_000_000, "gigajoule": 1_000_000_000, "wattsecond": 1.0, "watthour": 3_600, "kilowatthour": 3_600_000, "newtonmeter": 1.0, "calorie_nutr": 4_186.8, "kilocalorie_nutr": 4_186_800.00, "electronvolt": 1.6_0217_6634E-19, "britishthermalunit_it": 1_055.05_585, "footpound": 1.355_818, } def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float: if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: lowercase__ = ( F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n""" F"""Valid values are: {", ".join(_SCREAMING_SNAKE_CASE )}""" ) raise ValueError(_SCREAMING_SNAKE_CASE ) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
45
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class SCREAMING_SNAKE_CASE (UpperCAmelCase ): _UpperCamelCase : Tuple = 'ClapFeatureExtractor' _UpperCamelCase : Union[str, Any] = ('RobertaTokenizer', 'RobertaTokenizerFast') def __init__( self : List[Any] , a : int , a : str )-> Any: """simple docstring""" super().__init__(a , a ) def __call__( self : Any , a : Tuple=None , a : Optional[int]=None , a : int=None , **a : Optional[int] )-> Union[str, Any]: """simple docstring""" lowercase__ = kwargs.pop('sampling_rate' , a ) if text is None and audios is None: raise ValueError('You have to specify either text or audios. Both cannot be none.' ) if text is not None: lowercase__ = self.tokenizer(a , return_tensors=a , **a ) if audios is not None: lowercase__ = self.feature_extractor( a , sampling_rate=a , return_tensors=a , **a ) if text is not None and audios is not None: lowercase__ = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**a ) , tensor_type=a ) def SCREAMING_SNAKE_CASE_ ( self : str , *a : Dict , **a : int )-> Optional[int]: """simple docstring""" return self.tokenizer.batch_decode(*a , **a ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] , *a : int , **a : Dict )-> Dict: """simple docstring""" return self.tokenizer.decode(*a , **a ) @property def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Optional[int]: """simple docstring""" lowercase__ = self.tokenizer.model_input_names lowercase__ = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
45
1
import random def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]: lowercase__ = a[left_index] lowercase__ = left_index + 1 for j in range(left_index + 1 , _SCREAMING_SNAKE_CASE ): if a[j] < pivot: lowercase__ , lowercase__ = a[i], a[j] i += 1 lowercase__ , lowercase__ = a[i - 1], a[left_index] return i - 1 def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]: if left < right: lowercase__ = random.randint(_SCREAMING_SNAKE_CASE , right - 1 ) lowercase__ , lowercase__ = ( a[left], a[pivot], ) # switches the pivot with the left most bound lowercase__ = partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) quick_sort_random( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # recursive quicksort to the left of the pivot point quick_sort_random( _SCREAMING_SNAKE_CASE , pivot_index + 1 , _SCREAMING_SNAKE_CASE ) # recursive quicksort to the right of the pivot point def __UpperCamelCase () -> Dict: lowercase__ = input('Enter numbers separated by a comma:\n' ).strip() lowercase__ = [int(_SCREAMING_SNAKE_CASE ) for item in user_input.split(',' )] quick_sort_random(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) ) print(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
45
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_barthez import BarthezTokenizer else: lowercase_ = None lowercase_ = logging.get_logger(__name__) lowercase_ = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""} lowercase_ = { """vocab_file""": { """moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""", """moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""", """moussaKam/barthez-orangesum-title""": ( """https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model""" ), }, """tokenizer_file""": { """moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json""", """moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json""", """moussaKam/barthez-orangesum-title""": ( """https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json""" ), }, } lowercase_ = { """moussaKam/mbarthez""": 1_024, """moussaKam/barthez""": 1_024, """moussaKam/barthez-orangesum-title""": 1_024, } lowercase_ = """▁""" class SCREAMING_SNAKE_CASE (UpperCAmelCase ): _UpperCamelCase : Dict = VOCAB_FILES_NAMES _UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase : Optional[Any] = ['input_ids', 'attention_mask'] _UpperCamelCase : int = BarthezTokenizer def __init__( self : List[Any] , a : Union[str, Any]=None , a : Optional[Any]=None , a : Dict="<s>" , a : Union[str, Any]="</s>" , a : List[str]="</s>" , a : Optional[Any]="<s>" , a : int="<unk>" , a : str="<pad>" , a : Optional[int]="<mask>" , **a : Union[str, Any] , )-> Tuple: """simple docstring""" lowercase__ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token super().__init__( a , tokenizer_file=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , **a , ) lowercase__ = vocab_file lowercase__ = False if not self.vocab_file else True def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : List[int] , a : Optional[List[int]] = None )-> List[int]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowercase__ = [self.cls_token_id] lowercase__ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : List[int] , a : Optional[List[int]] = None )-> List[int]: """simple docstring""" lowercase__ = [self.sep_token_id] lowercase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def SCREAMING_SNAKE_CASE_ ( self : Dict , a : str , a : Optional[str] = None )-> Tuple[str]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(a ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowercase__ = os.path.join( a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(a ): copyfile(self.vocab_file , a ) return (out_vocab_file,)
45
1
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers lowercase_ = """3""" print("""Python version:""", sys.version) print("""transformers version:""", transformers.__version__) try: import torch print("""Torch version:""", torch.__version__) print("""Cuda available:""", torch.cuda.is_available()) print("""Cuda version:""", torch.version.cuda) print("""CuDNN version:""", torch.backends.cudnn.version()) print("""Number of GPUs available:""", torch.cuda.device_count()) print("""NCCL version:""", torch.cuda.nccl.version()) except ImportError: print("""Torch version:""", None) try: import deepspeed print("""DeepSpeed version:""", deepspeed.__version__) except ImportError: print("""DeepSpeed version:""", None) try: import tensorflow as tf print("""TensorFlow version:""", tf.__version__) print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU"""))) print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU"""))) except ImportError: print("""TensorFlow version:""", None)
45
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionSAGPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class SCREAMING_SNAKE_CASE (UpperCAmelCase , UpperCAmelCase , unittest.TestCase ): _UpperCamelCase : List[Any] = StableDiffusionSAGPipeline _UpperCamelCase : str = TEXT_TO_IMAGE_PARAMS _UpperCamelCase : Dict = TEXT_TO_IMAGE_BATCH_PARAMS _UpperCamelCase : str = TEXT_TO_IMAGE_IMAGE_PARAMS _UpperCamelCase : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS _UpperCamelCase : Union[str, Any] = False def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Dict: """simple docstring""" torch.manual_seed(0 ) lowercase__ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) lowercase__ = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=a , set_alpha_to_one=a , ) torch.manual_seed(0 ) lowercase__ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) torch.manual_seed(0 ) lowercase__ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) lowercase__ = CLIPTextModel(a ) lowercase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) lowercase__ = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Optional[int] , a : Any=0 )-> Union[str, Any]: """simple docstring""" if str(a ).startswith('mps' ): lowercase__ = torch.manual_seed(a ) else: lowercase__ = torch.Generator(device=a ).manual_seed(a ) lowercase__ = { 'prompt': '.', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 1.0, 'sag_scale': 1.0, 'output_type': 'numpy', } return inputs def SCREAMING_SNAKE_CASE_ ( self : Any )-> Any: """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class SCREAMING_SNAKE_CASE (unittest.TestCase ): def SCREAMING_SNAKE_CASE_ ( self : str )-> int: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]: """simple docstring""" lowercase__ = StableDiffusionSAGPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' ) lowercase__ = sag_pipe.to(a ) sag_pipe.set_progress_bar_config(disable=a ) lowercase__ = '.' lowercase__ = torch.manual_seed(0 ) lowercase__ = sag_pipe( [prompt] , generator=a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' ) lowercase__ = output.images lowercase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowercase__ = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2 def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]: """simple docstring""" lowercase__ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' ) lowercase__ = sag_pipe.to(a ) sag_pipe.set_progress_bar_config(disable=a ) lowercase__ = '.' lowercase__ = torch.manual_seed(0 ) lowercase__ = sag_pipe( [prompt] , generator=a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' ) lowercase__ = output.images lowercase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowercase__ = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2 def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> List[Any]: """simple docstring""" lowercase__ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' ) lowercase__ = sag_pipe.to(a ) sag_pipe.set_progress_bar_config(disable=a ) lowercase__ = '.' lowercase__ = torch.manual_seed(0 ) lowercase__ = sag_pipe( [prompt] , width=768 , height=512 , generator=a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' , ) lowercase__ = output.images assert image.shape == (1, 512, 768, 3)
45
1
import importlib import os import sys # This is required to make the module import works (when the python process is running from the root of the repo) sys.path.append(""".""") def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> int: lowercase__ = test_file.split(os.path.sep ) if components[0:2] != ["tests", "models"]: raise ValueError( '`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got ' F"""{test_file} instead.""" ) lowercase__ = components[-1] if not test_fn.endswith('py' ): raise ValueError(F"""`test_file` should be a python file. Got {test_fn} instead.""" ) if not test_fn.startswith('test_modeling_' ): raise ValueError( F"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" ) lowercase__ = components[:-1] + [test_fn.replace('.py' , '' )] lowercase__ = '.'.join(_SCREAMING_SNAKE_CASE ) return test_module_path def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[str]: lowercase__ = get_module_path(_SCREAMING_SNAKE_CASE ) lowercase__ = importlib.import_module(_SCREAMING_SNAKE_CASE ) return test_module def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Tuple: lowercase__ = [] lowercase__ = get_test_module(_SCREAMING_SNAKE_CASE ) for attr in dir(_SCREAMING_SNAKE_CASE ): if attr.endswith('ModelTester' ): tester_classes.append(getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) # sort with class names return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ ) def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Tuple: lowercase__ = [] lowercase__ = get_test_module(_SCREAMING_SNAKE_CASE ) for attr in dir(_SCREAMING_SNAKE_CASE ): lowercase__ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking # `all_model_classes` is not empty (which also excludes other special classes). lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'all_model_classes' , [] ) if len(_SCREAMING_SNAKE_CASE ) > 0: test_classes.append(_SCREAMING_SNAKE_CASE ) # sort with class names return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ ) def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Union[str, Any]: lowercase__ = get_test_classes(_SCREAMING_SNAKE_CASE ) lowercase__ = set() for test_class in test_classes: model_classes.update(test_class.all_model_classes ) # sort with class names return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ ) def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Union[str, Any]: lowercase__ = test_class() if hasattr(_SCREAMING_SNAKE_CASE , 'setUp' ): test.setUp() lowercase__ = None if hasattr(_SCREAMING_SNAKE_CASE , 'model_tester' ): # `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case. if test.model_tester is not None: lowercase__ = test.model_tester.__class__ return model_tester def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]: lowercase__ = get_test_classes(_SCREAMING_SNAKE_CASE ) lowercase__ = [] for test_class in test_classes: if model_class in test_class.all_model_classes: target_test_classes.append(_SCREAMING_SNAKE_CASE ) # sort with class names return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ ) def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict: lowercase__ = get_test_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) lowercase__ = [] for test_class in test_classes: lowercase__ = get_model_tester_from_test_class(_SCREAMING_SNAKE_CASE ) if tester_class is not None: tester_classes.append(_SCREAMING_SNAKE_CASE ) # sort with class names return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ ) def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[Any]: lowercase__ = get_test_classes(_SCREAMING_SNAKE_CASE ) lowercase__ = {test_class: get_model_tester_from_test_class(_SCREAMING_SNAKE_CASE ) for test_class in test_classes} return test_tester_mapping def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Dict: lowercase__ = get_model_classes(_SCREAMING_SNAKE_CASE ) lowercase__ = { model_class: get_test_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for model_class in model_classes } return model_test_mapping def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Optional[Any]: lowercase__ = get_model_classes(_SCREAMING_SNAKE_CASE ) lowercase__ = { model_class: get_tester_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for model_class in model_classes } return model_to_tester_mapping def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Optional[Any]: if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): return o elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): return o.__name__ elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ): return [to_json(_SCREAMING_SNAKE_CASE ) for x in o] elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): return {to_json(_SCREAMING_SNAKE_CASE ): to_json(_SCREAMING_SNAKE_CASE ) for k, v in o.items()} else: return o
45
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { """facebook/deit-base-distilled-patch16-224""": ( """https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json""" ), # See all DeiT models at https://huggingface.co/models?filter=deit } class SCREAMING_SNAKE_CASE (UpperCAmelCase ): _UpperCamelCase : Any = 'deit' def __init__( self : Any , a : Union[str, Any]=768 , a : Optional[Any]=12 , a : Union[str, Any]=12 , a : Optional[int]=3_072 , a : Optional[int]="gelu" , a : Optional[Any]=0.0 , a : List[Any]=0.0 , a : int=0.02 , a : List[str]=1E-1_2 , a : Optional[int]=224 , a : Tuple=16 , a : List[Any]=3 , a : List[str]=True , a : Any=16 , **a : Union[str, Any] , )-> int: """simple docstring""" super().__init__(**a ) lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = intermediate_size lowercase__ = hidden_act lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = initializer_range lowercase__ = layer_norm_eps lowercase__ = image_size lowercase__ = patch_size lowercase__ = num_channels lowercase__ = qkv_bias lowercase__ = encoder_stride class SCREAMING_SNAKE_CASE (UpperCAmelCase ): _UpperCamelCase : List[Any] = version.parse('1.11' ) @property def SCREAMING_SNAKE_CASE_ ( self : int )-> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def SCREAMING_SNAKE_CASE_ ( self : Any )-> float: """simple docstring""" return 1E-4
45
1
import os from distutils.util import strtobool def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: for e in env_keys: lowercase__ = int(os.environ.get(_SCREAMING_SNAKE_CASE , -1 ) ) if val >= 0: return val return default def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Optional[int]: lowercase__ = os.environ.get(_SCREAMING_SNAKE_CASE , str(_SCREAMING_SNAKE_CASE ) ) return strtobool(_SCREAMING_SNAKE_CASE ) == 1 # As its name indicates `strtobool` actually returns an int... def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="no" ) -> Union[str, Any]: lowercase__ = os.environ.get(_SCREAMING_SNAKE_CASE , str(_SCREAMING_SNAKE_CASE ) ) return value
45
import os import zipfile import requests from get_ci_error_statistics import download_artifact, get_artifacts_links def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 ) -> List[Any]: lowercase__ = None if token is not None: lowercase__ = {'Accept': 'application/vnd.github+json', 'Authorization': F"""Bearer {token}"""} # The id of a workflow (not of a workflow run) lowercase__ = '636036' lowercase__ = F"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs""" # On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results url += F"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}""" lowercase__ = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json() return result["workflow_runs"] def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Union[str, Any]: lowercase__ = get_daily_ci_runs(_SCREAMING_SNAKE_CASE ) lowercase__ = None for workflow_run in workflow_runs: if workflow_run["status"] == "completed": lowercase__ = workflow_run['id'] break return workflow_run_id def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: lowercase__ = get_last_daily_ci_runs(_SCREAMING_SNAKE_CASE ) if workflow_run_id is not None: lowercase__ = get_artifacts_links(worflow_run_id=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE ) for artifact_name in artifact_names: if artifact_name in artifacts_links: lowercase__ = artifacts_links[artifact_name] download_artifact( artifact_name=_SCREAMING_SNAKE_CASE , artifact_url=_SCREAMING_SNAKE_CASE , output_dir=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE ) def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict: get_last_daily_ci_artifacts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) lowercase__ = {} for artifact_name in artifact_names: lowercase__ = os.path.join(_SCREAMING_SNAKE_CASE , F"""{artifact_name}.zip""" ) if os.path.isfile(_SCREAMING_SNAKE_CASE ): lowercase__ = {} with zipfile.ZipFile(_SCREAMING_SNAKE_CASE ) as z: for filename in z.namelist(): if not os.path.isdir(_SCREAMING_SNAKE_CASE ): # read the file with z.open(_SCREAMING_SNAKE_CASE ) as f: lowercase__ = f.read().decode('UTF-8' ) return results
45
1
import torch from diffusers import KDPMaDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class SCREAMING_SNAKE_CASE (UpperCAmelCase ): _UpperCamelCase : Any = (KDPMaDiscreteScheduler,) _UpperCamelCase : int = 10 def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , **a : Optional[Any] )-> Union[str, Any]: """simple docstring""" lowercase__ = { 'num_train_timesteps': 1_100, 'beta_start': 0.0001, 'beta_end': 0.02, 'beta_schedule': 'linear', } config.update(**a ) return config def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Tuple: """simple docstring""" for timesteps in [10, 50, 100, 1_000]: self.check_over_configs(num_train_timesteps=a ) def SCREAMING_SNAKE_CASE_ ( self : str )-> Union[str, Any]: """simple docstring""" for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=a , beta_end=a ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> Dict: """simple docstring""" for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=a ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Optional[Any]: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=a ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Tuple: """simple docstring""" lowercase__ = self.scheduler_classes[0] lowercase__ = self.get_scheduler_config(prediction_type='v_prediction' ) lowercase__ = scheduler_class(**a ) scheduler.set_timesteps(self.num_inference_steps ) lowercase__ = self.dummy_model() lowercase__ = self.dummy_sample_deter * scheduler.init_noise_sigma lowercase__ = sample.to(a ) for i, t in enumerate(scheduler.timesteps ): lowercase__ = scheduler.scale_model_input(a , a ) lowercase__ = model(a , a ) lowercase__ = scheduler.step(a , a , a ) lowercase__ = output.prev_sample lowercase__ = torch.sum(torch.abs(a ) ) lowercase__ = torch.mean(torch.abs(a ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 4.6_9_3_4E-0_7 ) < 1E-2 assert abs(result_mean.item() - 6.1_1_1_2E-1_0 ) < 1E-3 else: # CUDA assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2E-0_7 ) < 1E-2 assert abs(result_mean.item() - 0.0002 ) < 1E-3 def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> List[Any]: """simple docstring""" if torch_device == "mps": return lowercase__ = self.scheduler_classes[0] lowercase__ = self.get_scheduler_config() lowercase__ = scheduler_class(**a ) scheduler.set_timesteps(self.num_inference_steps ) lowercase__ = self.dummy_model() lowercase__ = self.dummy_sample_deter * scheduler.init_noise_sigma lowercase__ = sample.to(a ) for i, t in enumerate(scheduler.timesteps ): lowercase__ = scheduler.scale_model_input(a , a ) lowercase__ = model(a , a ) lowercase__ = scheduler.step(a , a , a ) lowercase__ = output.prev_sample lowercase__ = torch.sum(torch.abs(a ) ) lowercase__ = torch.mean(torch.abs(a ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 20.4125 ) < 1E-2 assert abs(result_mean.item() - 0.0266 ) < 1E-3 else: # CUDA assert abs(result_sum.item() - 20.4125 ) < 1E-2 assert abs(result_mean.item() - 0.0266 ) < 1E-3 def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Optional[Any]: """simple docstring""" if torch_device == "mps": return lowercase__ = self.scheduler_classes[0] lowercase__ = self.get_scheduler_config() lowercase__ = scheduler_class(**a ) scheduler.set_timesteps(self.num_inference_steps , device=a ) lowercase__ = self.dummy_model() lowercase__ = self.dummy_sample_deter.to(a ) * scheduler.init_noise_sigma for t in scheduler.timesteps: lowercase__ = scheduler.scale_model_input(a , a ) lowercase__ = model(a , a ) lowercase__ = scheduler.step(a , a , a ) lowercase__ = output.prev_sample lowercase__ = torch.sum(torch.abs(a ) ) lowercase__ = torch.mean(torch.abs(a ) ) if str(a ).startswith('cpu' ): # The following sum varies between 148 and 156 on mps. Why? assert abs(result_sum.item() - 20.4125 ) < 1E-2 assert abs(result_mean.item() - 0.0266 ) < 1E-3 else: # CUDA assert abs(result_sum.item() - 20.4125 ) < 1E-2 assert abs(result_mean.item() - 0.0266 ) < 1E-3
45
import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device lowercase_ = False class SCREAMING_SNAKE_CASE (unittest.TestCase ): pass @nightly @require_torch_gpu class SCREAMING_SNAKE_CASE (unittest.TestCase ): def SCREAMING_SNAKE_CASE_ ( self : str )-> Optional[int]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Union[str, Any]: """simple docstring""" lowercase__ = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa ) pipe.to(a ) pipe.set_progress_bar_config(disable=a ) lowercase__ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' ) lowercase__ = torch.manual_seed(0 ) lowercase__ = pipe.dual_guided( prompt='first prompt' , image=a , text_to_image_strength=0.75 , generator=a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(a ) lowercase__ = VersatileDiffusionPipeline.from_pretrained(a , torch_dtype=torch.floataa ) pipe.to(a ) pipe.set_progress_bar_config(disable=a ) lowercase__ = generator.manual_seed(0 ) lowercase__ = pipe.dual_guided( prompt='first prompt' , image=a , text_to_image_strength=0.75 , generator=a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass" def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> List[str]: """simple docstring""" lowercase__ = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa ) pipe.to(a ) pipe.set_progress_bar_config(disable=a ) lowercase__ = 'cyberpunk 2077' lowercase__ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' ) lowercase__ = torch.manual_seed(0 ) lowercase__ = pipe.dual_guided( prompt=a , image=a , text_to_image_strength=0.75 , generator=a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images lowercase__ = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) lowercase__ = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 lowercase__ = 'A painting of a squirrel eating a burger ' lowercase__ = torch.manual_seed(0 ) lowercase__ = pipe.text_to_image( prompt=a , generator=a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images lowercase__ = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) lowercase__ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 lowercase__ = pipe.image_variation(a , generator=a , output_type='numpy' ).images lowercase__ = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) lowercase__ = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
45
1
import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py lowercase_ = """\ @INPROCEEDINGS{Papineni02bleu:a, author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu}, title = {BLEU: a Method for Automatic Evaluation of Machine Translation}, booktitle = {}, year = {2002}, pages = {311--318} } @inproceedings{lin-och-2004-orange, title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\", author = \"Lin, Chin-Yew and Och, Franz Josef\", booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\", month = \"aug 23{--}aug 27\", year = \"2004\", address = \"Geneva, Switzerland\", publisher = \"COLING\", url = \"https://www.aclweb.org/anthology/C04-1072\", pages = \"501--507\", } """ lowercase_ = """\ BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another. Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation, the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and remains one of the most popular automated and inexpensive metrics. Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations. Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness are not taken into account[citation needed]. BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1 representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional reference translations will increase the BLEU score. """ lowercase_ = """ Computes BLEU score of translated segments against one or more references. Args: predictions: list of translations to score. Each translation should be tokenized into a list of tokens. references: list of lists of references for each translation. Each reference should be tokenized into a list of tokens. max_order: Maximum n-gram order to use when computing BLEU score. smooth: Whether or not to apply Lin et al. 2004 smoothing. Returns: 'bleu': bleu score, 'precisions': geometric mean of n-gram precisions, 'brevity_penalty': brevity penalty, 'length_ratio': ratio of lengths, 'translation_length': translation_length, 'reference_length': reference_length Examples: >>> predictions = [ ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample ... ] >>> references = [ ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references) ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference) ... ] >>> bleu = datasets.load_metric(\"bleu\") >>> results = bleu.compute(predictions=predictions, references=references) >>> print(results[\"bleu\"]) 1.0 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE (datasets.Metric ): def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Tuple: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ), 'references': datasets.Sequence( datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ), } ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[ 'https://en.wikipedia.org/wiki/BLEU', 'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213', ] , ) def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : Any , a : Union[str, Any] , a : Any=4 , a : str=False )-> int: """simple docstring""" lowercase__ = compute_bleu( reference_corpus=a , translation_corpus=a , max_order=a , smooth=a ) ((lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__)) = score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
45
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> bool: if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise ValueError('Input series is not valid, valid series - [2, 4, 6]' ) if len(_SCREAMING_SNAKE_CASE ) == 0: raise ValueError('Input list must be a non empty list' ) if len(_SCREAMING_SNAKE_CASE ) == 1: return True lowercase__ = series[1] - series[0] for index in range(len(_SCREAMING_SNAKE_CASE ) - 1 ): if series[index + 1] - series[index] != common_diff: return False return True def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> float: if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise ValueError('Input series is not valid, valid series - [2, 4, 6]' ) if len(_SCREAMING_SNAKE_CASE ) == 0: raise ValueError('Input list must be a non empty list' ) lowercase__ = 0 for val in series: answer += val return answer / len(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod()
45
1
import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class SCREAMING_SNAKE_CASE : def __init__( self : Optional[int] , a : Optional[int] , a : List[str]=3 , a : List[str]=32 , a : Optional[Any]=3 , a : List[Any]=10 , a : Union[str, Any]=[8, 16, 32, 64] , a : List[str]=[1, 1, 2, 1] , a : List[str]=True , a : Optional[int]=True , a : Any="relu" , a : Union[str, Any]=3 , a : int=None , a : str=["stage2", "stage3", "stage4"] , a : List[Any]=[2, 3, 4] , a : Union[str, Any]=1 , )-> List[Any]: """simple docstring""" lowercase__ = parent lowercase__ = batch_size lowercase__ = image_size lowercase__ = num_channels lowercase__ = embeddings_size lowercase__ = hidden_sizes lowercase__ = depths lowercase__ = is_training lowercase__ = use_labels lowercase__ = hidden_act lowercase__ = num_labels lowercase__ = scope lowercase__ = len(a ) lowercase__ = out_features lowercase__ = out_indices lowercase__ = num_groups def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Optional[int]: """simple docstring""" lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__ = None if self.use_labels: lowercase__ = ids_tensor([self.batch_size] , self.num_labels ) lowercase__ = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> Optional[int]: """simple docstring""" return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def SCREAMING_SNAKE_CASE_ ( self : str , a : Any , a : Dict , a : Optional[int] )-> List[Any]: """simple docstring""" lowercase__ = BitModel(config=a ) model.to(a ) model.eval() lowercase__ = model(a ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def SCREAMING_SNAKE_CASE_ ( self : Dict , a : List[str] , a : Optional[int] , a : List[str] )-> List[str]: """simple docstring""" lowercase__ = self.num_labels lowercase__ = BitForImageClassification(a ) model.to(a ) model.eval() lowercase__ = model(a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : List[Any] , a : List[Any] , a : List[str] )-> Union[str, Any]: """simple docstring""" lowercase__ = BitBackbone(config=a ) model.to(a ) model.eval() lowercase__ = model(a ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None lowercase__ = None lowercase__ = BitBackbone(config=a ) model.to(a ) model.eval() lowercase__ = model(a ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> List[str]: """simple docstring""" lowercase__ = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ = config_and_inputs lowercase__ = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE (UpperCAmelCase , UpperCAmelCase , unittest.TestCase ): _UpperCamelCase : Union[str, Any] = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () _UpperCamelCase : Dict = ( {'feature-extraction': BitModel, 'image-classification': BitForImageClassification} if is_torch_available() else {} ) _UpperCamelCase : Dict = False _UpperCamelCase : Union[str, Any] = False _UpperCamelCase : Optional[Any] = False _UpperCamelCase : Union[str, Any] = False _UpperCamelCase : Any = False def SCREAMING_SNAKE_CASE_ ( self : Any )-> Optional[int]: """simple docstring""" lowercase__ = BitModelTester(self ) lowercase__ = ConfigTester(self , config_class=a , has_text_modality=a ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> List[str]: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def SCREAMING_SNAKE_CASE_ ( self : int )-> List[str]: """simple docstring""" return @unittest.skip(reason='Bit does not output attentions' ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Optional[Any]: """simple docstring""" pass @unittest.skip(reason='Bit does not use inputs_embeds' ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Optional[int]: """simple docstring""" pass @unittest.skip(reason='Bit does not support input and output embeddings' ) def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Dict: """simple docstring""" pass def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Optional[Any]: """simple docstring""" lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ = model_class(a ) lowercase__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ = [*signature.parameters.keys()] lowercase__ = ['pixel_values'] self.assertListEqual(arg_names[:1] , a ) def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Union[str, Any]: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a ) def SCREAMING_SNAKE_CASE_ ( self : str )-> str: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*a ) def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Union[str, Any]: """simple docstring""" lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ = model_class(config=a ) for name, module in model.named_modules(): if isinstance(a , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> List[Any]: """simple docstring""" def check_hidden_states_output(a : int , a : Optional[Any] , a : Dict ): lowercase__ = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowercase__ = model(**self._prepare_for_class(a , a ) ) lowercase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowercase__ = self.model_tester.num_stages self.assertEqual(len(a ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ = ['preactivation', 'bottleneck'] for model_class in self.all_model_classes: for layer_type in layers_type: lowercase__ = layer_type lowercase__ = True check_hidden_states_output(a , a , a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ = True check_hidden_states_output(a , a , a ) @unittest.skip(reason='Bit does not use feedforward chunking' ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> int: """simple docstring""" pass def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Dict: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a ) @slow def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Optional[Any]: """simple docstring""" for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ = BitModel.from_pretrained(a ) self.assertIsNotNone(a ) def __UpperCamelCase () -> Dict: lowercase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class SCREAMING_SNAKE_CASE (unittest.TestCase ): @cached_property def SCREAMING_SNAKE_CASE_ ( self : Any )-> Dict: """simple docstring""" return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Dict: """simple docstring""" lowercase__ = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(a ) lowercase__ = self.default_image_processor lowercase__ = prepare_img() lowercase__ = image_processor(images=a , return_tensors='pt' ).to(a ) # forward pass with torch.no_grad(): lowercase__ = model(**a ) # verify the logits lowercase__ = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , a ) lowercase__ = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1E-4 ) ) @require_torch class SCREAMING_SNAKE_CASE (UpperCAmelCase , unittest.TestCase ): _UpperCamelCase : List[str] = (BitBackbone,) if is_torch_available() else () _UpperCamelCase : Any = BitConfig _UpperCamelCase : Optional[int] = False def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Union[str, Any]: """simple docstring""" lowercase__ = BitModelTester(self )
45
from __future__ import annotations import math from collections.abc import Callable def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 100 , ) -> float: lowercase__ = x_start lowercase__ = fnc(_SCREAMING_SNAKE_CASE ) lowercase__ = 0.0 for _ in range(_SCREAMING_SNAKE_CASE ): # Approximates curve as a sequence of linear lines and sums their length lowercase__ = (x_end - x_start) / steps + xa lowercase__ = fnc(_SCREAMING_SNAKE_CASE ) length += math.hypot(xa - xa , fxa - fxa ) # Increment step lowercase__ = xa lowercase__ = fxa return length if __name__ == "__main__": def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str: return math.sin(10 * x ) print("""f(x) = sin(10 * x)""") print("""The length of the curve from x = -10 to x = 10 is:""") lowercase_ = 10 while i <= 100_000: print(f'''With {i} steps: {line_length(f, -10, 10, i)}''') i *= 10
45
1
import argparse import json import subprocess def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]: lowercase__ = [] lowercase__ = ( F"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\"""" ' https://api.github.com/repos/huggingface/transformers/actions/runners' ) lowercase__ = subprocess.run(_SCREAMING_SNAKE_CASE , shell=_SCREAMING_SNAKE_CASE , stdout=subprocess.PIPE ) lowercase__ = output.stdout.decode('utf-8' ) lowercase__ = json.loads(_SCREAMING_SNAKE_CASE ) lowercase__ = status['runners'] for runner in runners: if runner["name"] in target_runners: if runner["status"] == "offline": offline_runners.append(_SCREAMING_SNAKE_CASE ) # save the result so we can report them on Slack with open('offline_runners.txt' , 'w' ) as fp: fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) ) if len(_SCREAMING_SNAKE_CASE ) > 0: lowercase__ = '\n'.join([x['name'] for x in offline_runners] ) raise ValueError(F"""The following runners are offline:\n{failed}""" ) if __name__ == "__main__": def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str: return values.split(',' ) lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--target_runners""", default=None, type=list_str, required=True, help="""Comma-separated list of runners to check status.""", ) parser.add_argument( """--token""", default=None, type=str, required=True, help="""A token that has actions:read permission.""" ) lowercase_ = parser.parse_args() get_runner_status(args.target_runners, args.token)
45
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowercase_ = { """configuration_squeezebert""": [ """SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SqueezeBertConfig""", """SqueezeBertOnnxConfig""", ], """tokenization_squeezebert""": ["""SqueezeBertTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["""SqueezeBertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """SqueezeBertForMaskedLM""", """SqueezeBertForMultipleChoice""", """SqueezeBertForQuestionAnswering""", """SqueezeBertForSequenceClassification""", """SqueezeBertForTokenClassification""", """SqueezeBertModel""", """SqueezeBertModule""", """SqueezeBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_squeezebert import ( SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, SqueezeBertConfig, SqueezeBertOnnxConfig, ) from .tokenization_squeezebert import SqueezeBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_squeezebert import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, SqueezeBertModule, SqueezeBertPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
45
1
import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[Any]: lowercase__ = {} lowercase__ = tokenizer(example['content'] , truncation=_SCREAMING_SNAKE_CASE )['input_ids'] lowercase__ = len(example['content'] ) / len(output['input_ids'] ) return output lowercase_ = HfArgumentParser(PretokenizationArguments) lowercase_ = parser.parse_args() if args.num_workers is None: lowercase_ = multiprocessing.cpu_count() lowercase_ = AutoTokenizer.from_pretrained(args.tokenizer_dir) lowercase_ = time.time() lowercase_ = load_dataset(args.dataset_name, split="""train""") print(f'''Dataset loaded in {time.time()-t_start:.2f}s''') lowercase_ = time.time() lowercase_ = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ """repo_name""", """path""", """copies""", """size""", """content""", """license""", """hash""", """line_mean""", """line_max""", """alpha_frac""", """autogenerated""", ], ) print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''') lowercase_ = time.time() ds.push_to_hub(args.tokenized_data_repo) print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
45
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = 50 ) -> int: lowercase__ = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(f'''{solution() = }''')
45
1
def __UpperCamelCase () -> int: return 1 def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> int: return 0 if x < 0 else two_pence(x - 2 ) + one_pence() def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> int: return 0 if x < 0 else five_pence(x - 5 ) + two_pence(_SCREAMING_SNAKE_CASE ) def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> int: return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(_SCREAMING_SNAKE_CASE ) def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> int: return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(_SCREAMING_SNAKE_CASE ) def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> int: return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(_SCREAMING_SNAKE_CASE ) def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> int: return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(_SCREAMING_SNAKE_CASE ) def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> int: return 0 if x < 0 else two_pound(x - 200 ) + one_pound(_SCREAMING_SNAKE_CASE ) def __UpperCamelCase (_SCREAMING_SNAKE_CASE = 200 ) -> int: return two_pound(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": print(solution(int(input().strip())))
45
import numpy as np import torch import tqdm from ...models.unet_ad import UNetaDModel from ...pipelines import DiffusionPipeline from ...utils import randn_tensor from ...utils.dummy_pt_objects import DDPMScheduler class SCREAMING_SNAKE_CASE (UpperCAmelCase ): def __init__( self : Optional[Any] , a : UNetaDModel , a : UNetaDModel , a : DDPMScheduler , a : Any , )-> Dict: """simple docstring""" super().__init__() lowercase__ = value_function lowercase__ = unet lowercase__ = scheduler lowercase__ = env lowercase__ = env.get_dataset() lowercase__ = {} for key in self.data.keys(): try: lowercase__ = self.data[key].mean() except: # noqa: E722 pass lowercase__ = {} for key in self.data.keys(): try: lowercase__ = self.data[key].std() except: # noqa: E722 pass lowercase__ = env.observation_space.shape[0] lowercase__ = env.action_space.shape[0] def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Any , a : int )-> Dict: """simple docstring""" return (x_in - self.means[key]) / self.stds[key] def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : str , a : List[str] )-> str: """simple docstring""" return x_in * self.stds[key] + self.means[key] def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : Tuple )-> Tuple: """simple docstring""" if type(a ) is dict: return {k: self.to_torch(a ) for k, v in x_in.items()} elif torch.is_tensor(a ): return x_in.to(self.unet.device ) return torch.tensor(a , device=self.unet.device ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Optional[int] , a : Dict , a : Optional[Any] )-> List[Any]: """simple docstring""" for key, val in cond.items(): lowercase__ = val.clone() return x_in def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Optional[Any] , a : Any , a : Optional[Any] , a : Optional[int] )-> List[Any]: """simple docstring""" lowercase__ = x.shape[0] lowercase__ = None for i in tqdm.tqdm(self.scheduler.timesteps ): # create batch of timesteps to pass into model lowercase__ = torch.full((batch_size,) , a , device=self.unet.device , dtype=torch.long ) for _ in range(a ): with torch.enable_grad(): x.requires_grad_() # permute to match dimension for pre-trained models lowercase__ = self.value_function(x.permute(0 , 2 , 1 ) , a ).sample lowercase__ = torch.autograd.grad([y.sum()] , [x] )[0] lowercase__ = self.scheduler._get_variance(a ) lowercase__ = torch.exp(0.5 * posterior_variance ) lowercase__ = model_std * grad lowercase__ = 0 lowercase__ = x.detach() lowercase__ = x + scale * grad lowercase__ = self.reset_xa(a , a , self.action_dim ) lowercase__ = self.unet(x.permute(0 , 2 , 1 ) , a ).sample.permute(0 , 2 , 1 ) # TODO: verify deprecation of this kwarg lowercase__ = self.scheduler.step(a , a , a , predict_epsilon=a )['prev_sample'] # apply conditions to the trajectory (set the initial state) lowercase__ = self.reset_xa(a , a , self.action_dim ) lowercase__ = self.to_torch(a ) return x, y def __call__( self : Any , a : Tuple , a : int=64 , a : Tuple=32 , a : List[Any]=2 , a : List[str]=0.1 )-> List[Any]: """simple docstring""" lowercase__ = self.normalize(a , 'observations' ) lowercase__ = obs[None].repeat(a , axis=0 ) lowercase__ = {0: self.to_torch(a )} lowercase__ = (batch_size, planning_horizon, self.state_dim + self.action_dim) # generate initial noise and apply our conditions (to make the trajectories start at current state) lowercase__ = randn_tensor(a , device=self.unet.device ) lowercase__ = self.reset_xa(a , a , self.action_dim ) lowercase__ = self.to_torch(a ) # run the diffusion process lowercase__ , lowercase__ = self.run_diffusion(a , a , a , a ) # sort output trajectories by value lowercase__ = y.argsort(0 , descending=a ).squeeze() lowercase__ = x[sorted_idx] lowercase__ = sorted_values[:, :, : self.action_dim] lowercase__ = actions.detach().cpu().numpy() lowercase__ = self.de_normalize(a , key='actions' ) # select the action with the highest value if y is not None: lowercase__ = 0 else: # if we didn't run value guiding, select a random action lowercase__ = np.random.randint(0 , a ) lowercase__ = denorm_actions[selected_index, 0] return denorm_actions
45
1
from decimal import Decimal, getcontext from math import ceil, factorial def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str: if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise TypeError('Undefined for non-integers' ) elif precision < 1: raise ValueError('Undefined for non-natural numbers' ) lowercase__ = precision lowercase__ = ceil(precision / 14 ) lowercase__ = 426880 * Decimal(10005 ).sqrt() lowercase__ = 1 lowercase__ = 13591409 lowercase__ = Decimal(_SCREAMING_SNAKE_CASE ) for k in range(1 , _SCREAMING_SNAKE_CASE ): lowercase__ = factorial(6 * k ) // (factorial(3 * k ) * factorial(_SCREAMING_SNAKE_CASE ) ** 3) linear_term += 545140134 exponential_term *= -262537412640768000 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": lowercase_ = 50 print(f'''The first {n} digits of pi is: {pi(n)}''')
45
from PIL import Image def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Image: def brightness(_SCREAMING_SNAKE_CASE ) -> float: return 128 + level + (c - 128) if not -2_5_5.0 <= level <= 2_5_5.0: raise ValueError('level must be between -255.0 (black) and 255.0 (white)' ) return img.point(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": # Load image with Image.open("""image_data/lena.jpg""") as img: # Change brightness to 100 lowercase_ = change_brightness(img, 100) brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
45
1
import inspect import unittest from transformers import DecisionTransformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import DecisionTransformerModel from transformers.models.decision_transformer.modeling_decision_transformer import ( DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) class SCREAMING_SNAKE_CASE : def __init__( self : Optional[Any] , a : Optional[Any] , a : str=13 , a : Any=7 , a : Union[str, Any]=6 , a : Optional[Any]=17 , a : Optional[Any]=23 , a : Optional[int]=11 , a : Any=True , )-> Dict: """simple docstring""" lowercase__ = parent lowercase__ = batch_size lowercase__ = seq_length lowercase__ = act_dim lowercase__ = state_dim lowercase__ = hidden_size lowercase__ = max_length lowercase__ = is_training def SCREAMING_SNAKE_CASE_ ( self : Any )-> Tuple: """simple docstring""" lowercase__ = floats_tensor((self.batch_size, self.seq_length, self.state_dim) ) lowercase__ = floats_tensor((self.batch_size, self.seq_length, self.act_dim) ) lowercase__ = floats_tensor((self.batch_size, self.seq_length, 1) ) lowercase__ = floats_tensor((self.batch_size, self.seq_length, 1) ) lowercase__ = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_000 ) lowercase__ = random_attention_mask((self.batch_size, self.seq_length) ) lowercase__ = self.get_config() return ( config, states, actions, rewards, returns_to_go, timesteps, attention_mask, ) def SCREAMING_SNAKE_CASE_ ( self : Any )-> Optional[Any]: """simple docstring""" return DecisionTransformerConfig( batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : Any , a : str , a : Any , a : Tuple , a : List[str] , a : str , a : List[str] , )-> Optional[Any]: """simple docstring""" lowercase__ = DecisionTransformerModel(config=a ) model.to(a ) model.eval() lowercase__ = model(a , a , a , a , a , a ) self.parent.assertEqual(result.state_preds.shape , states.shape ) self.parent.assertEqual(result.action_preds.shape , actions.shape ) self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions def SCREAMING_SNAKE_CASE_ ( self : str )-> List[str]: """simple docstring""" lowercase__ = self.prepare_config_and_inputs() ( ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ) = config_and_inputs lowercase__ = { 'states': states, 'actions': actions, 'rewards': rewards, 'returns_to_go': returns_to_go, 'timesteps': timesteps, 'attention_mask': attention_mask, } return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE (UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ): _UpperCamelCase : str = (DecisionTransformerModel,) if is_torch_available() else () _UpperCamelCase : Optional[int] = () _UpperCamelCase : int = {'feature-extraction': DecisionTransformerModel} if is_torch_available() else {} # Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids _UpperCamelCase : List[Any] = False # Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features _UpperCamelCase : List[str] = False _UpperCamelCase : int = False _UpperCamelCase : Optional[Any] = False _UpperCamelCase : Optional[int] = False _UpperCamelCase : List[str] = False _UpperCamelCase : List[Any] = False _UpperCamelCase : Any = False _UpperCamelCase : Optional[Any] = False _UpperCamelCase : Optional[Any] = False def SCREAMING_SNAKE_CASE_ ( self : Dict )-> str: """simple docstring""" lowercase__ = DecisionTransformerModelTester(self ) lowercase__ = ConfigTester(self , config_class=a , hidden_size=37 ) def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Any: """simple docstring""" self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Dict: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a ) @slow def SCREAMING_SNAKE_CASE_ ( self : str )-> List[str]: """simple docstring""" for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ = DecisionTransformerModel.from_pretrained(a ) self.assertIsNotNone(a ) def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Union[str, Any]: """simple docstring""" lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ = model_class(a ) lowercase__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ = [*signature.parameters.keys()] lowercase__ = [ 'states', 'actions', 'rewards', 'returns_to_go', 'timesteps', 'attention_mask', ] self.assertListEqual(arg_names[: len(a )] , a ) @require_torch class SCREAMING_SNAKE_CASE (unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> List[Any]: """simple docstring""" lowercase__ = 2 # number of steps of autoregressive prediction we will perform lowercase__ = 10 # defined by the RL environment, may be normalized lowercase__ = DecisionTransformerModel.from_pretrained('edbeeching/decision-transformer-gym-hopper-expert' ) lowercase__ = model.to(a ) lowercase__ = model.config torch.manual_seed(0 ) lowercase__ = torch.randn(1 , 1 , config.state_dim ).to(device=a , dtype=torch.floataa ) # env.reset() lowercase__ = torch.tensor( [[0.242793, -0.28693074, 0.8742613], [0.67815274, -0.08101085, -0.12952147]] , device=a ) lowercase__ = torch.tensor(a , device=a , dtype=torch.floataa ).reshape(1 , 1 , 1 ) lowercase__ = state lowercase__ = torch.zeros(1 , 0 , config.act_dim , device=a , dtype=torch.floataa ) lowercase__ = torch.zeros(1 , 0 , device=a , dtype=torch.floataa ) lowercase__ = torch.tensor(0 , device=a , dtype=torch.long ).reshape(1 , 1 ) for step in range(a ): lowercase__ = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=a )] , dim=1 ) lowercase__ = torch.cat([rewards, torch.zeros(1 , 1 , device=a )] , dim=1 ) lowercase__ = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device ) with torch.no_grad(): lowercase__ , lowercase__ , lowercase__ = model( states=a , actions=a , rewards=a , returns_to_go=a , timesteps=a , attention_mask=a , return_dict=a , ) self.assertEqual(action_pred.shape , actions.shape ) self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) ) lowercase__ , lowercase__ , lowercase__ , lowercase__ = ( # env.step(action) torch.randn(1 , 1 , config.state_dim ).to(device=a , dtype=torch.floataa ), 1.0, False, {}, ) lowercase__ = action_pred[0, -1] lowercase__ = torch.cat([states, state] , dim=1 ) lowercase__ = returns_to_go[0, -1] - reward lowercase__ = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 ) lowercase__ = torch.cat( [timesteps, torch.ones((1, 1) , device=a , dtype=torch.long ) * (step + 1)] , dim=1 )
45
import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class SCREAMING_SNAKE_CASE (unittest.TestCase ): def __init__( self : Any , a : str , a : List[Any]=7 , a : int=3 , a : int=18 , a : Optional[Any]=30 , a : Optional[int]=400 , a : int=True , a : Tuple=None , a : Optional[Any]=True , a : str=False , a : str=True , a : int=True , a : Tuple=[0.5, 0.5, 0.5] , a : Any=[0.5, 0.5, 0.5] , )-> Optional[int]: """simple docstring""" lowercase__ = parent lowercase__ = batch_size lowercase__ = num_channels lowercase__ = image_size lowercase__ = min_resolution lowercase__ = max_resolution lowercase__ = do_resize lowercase__ = size if size is not None else {'height': 18, 'width': 20} lowercase__ = do_thumbnail lowercase__ = do_align_axis lowercase__ = do_pad lowercase__ = do_normalize lowercase__ = image_mean lowercase__ = image_std def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Tuple: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class SCREAMING_SNAKE_CASE (UpperCAmelCase , unittest.TestCase ): _UpperCamelCase : Optional[Any] = DonutImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE_ ( self : int )-> List[Any]: """simple docstring""" lowercase__ = DonutImageProcessingTester(self ) @property def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Optional[int]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE_ ( self : Any )-> int: """simple docstring""" lowercase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(a , 'do_resize' ) ) self.assertTrue(hasattr(a , 'size' ) ) self.assertTrue(hasattr(a , 'do_thumbnail' ) ) self.assertTrue(hasattr(a , 'do_align_long_axis' ) ) self.assertTrue(hasattr(a , 'do_pad' ) ) self.assertTrue(hasattr(a , 'do_normalize' ) ) self.assertTrue(hasattr(a , 'image_mean' ) ) self.assertTrue(hasattr(a , 'image_std' ) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Dict: """simple docstring""" lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'height': 18, 'width': 20} ) lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'height': 42, 'width': 42} ) # Previous config had dimensions in (width, height) order lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) ) self.assertEqual(image_processor.size , {'height': 84, 'width': 42} ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Dict: """simple docstring""" pass @is_flaky() def SCREAMING_SNAKE_CASE_ ( self : str )-> Optional[int]: """simple docstring""" lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a ) for image in image_inputs: self.assertIsInstance(a , Image.Image ) # Test not batched input lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched lowercase__ = image_processing(a , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) @is_flaky() def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Tuple: """simple docstring""" lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a ) for image in image_inputs: self.assertIsInstance(a , np.ndarray ) # Test not batched input lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched lowercase__ = image_processing(a , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) @is_flaky() def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Dict: """simple docstring""" lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a ) for image in image_inputs: self.assertIsInstance(a , torch.Tensor ) # Test not batched input lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched lowercase__ = image_processing(a , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , )
45
1
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_barthez import BarthezTokenizer else: lowercase_ = None lowercase_ = logging.get_logger(__name__) lowercase_ = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""} lowercase_ = { """vocab_file""": { """moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""", """moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""", """moussaKam/barthez-orangesum-title""": ( """https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model""" ), }, """tokenizer_file""": { """moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json""", """moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json""", """moussaKam/barthez-orangesum-title""": ( """https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json""" ), }, } lowercase_ = { """moussaKam/mbarthez""": 1_024, """moussaKam/barthez""": 1_024, """moussaKam/barthez-orangesum-title""": 1_024, } lowercase_ = """▁""" class SCREAMING_SNAKE_CASE (UpperCAmelCase ): _UpperCamelCase : Dict = VOCAB_FILES_NAMES _UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase : Optional[Any] = ['input_ids', 'attention_mask'] _UpperCamelCase : int = BarthezTokenizer def __init__( self : List[Any] , a : Union[str, Any]=None , a : Optional[Any]=None , a : Dict="<s>" , a : Union[str, Any]="</s>" , a : List[str]="</s>" , a : Optional[Any]="<s>" , a : int="<unk>" , a : str="<pad>" , a : Optional[int]="<mask>" , **a : Union[str, Any] , )-> Tuple: """simple docstring""" lowercase__ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token super().__init__( a , tokenizer_file=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , **a , ) lowercase__ = vocab_file lowercase__ = False if not self.vocab_file else True def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : List[int] , a : Optional[List[int]] = None )-> List[int]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowercase__ = [self.cls_token_id] lowercase__ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : List[int] , a : Optional[List[int]] = None )-> List[int]: """simple docstring""" lowercase__ = [self.sep_token_id] lowercase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def SCREAMING_SNAKE_CASE_ ( self : Dict , a : str , a : Optional[str] = None )-> Tuple[str]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(a ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowercase__ = os.path.join( a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(a ): copyfile(self.vocab_file , a ) return (out_vocab_file,)
45
import math def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]: if 0 not in (x, y): # We use the relation x^y = y*log10(x), where 10 is the base. return y * math.logaa(_SCREAMING_SNAKE_CASE ) else: if x == 0: # 0 raised to any number is 0 return 0 elif y == 0: return 1 # any number raised to 0 is 1 raise AssertionError('This should never happen' ) if __name__ == "__main__": # Main function # Read two numbers from input and typecast them to int using map function. # Here x is the base and y is the power. lowercase_ = """Enter the base and the power separated by a comma: """ lowercase_ , lowercase_ = map(int, input(prompt).split(""",""")) lowercase_ , lowercase_ = map(int, input(prompt).split(""",""")) # We find the log of each number, using the function res(), which takes two # arguments. lowercase_ = res(xa, ya) lowercase_ = res(xa, ya) # We check for the largest number if resa > resa: print("""Largest number is""", xa, """^""", ya) elif resa > resa: print("""Largest number is""", xa, """^""", ya) else: print("""Both are equal""")
45
1
class SCREAMING_SNAKE_CASE : def __init__( self : List[Any] )-> Dict: """simple docstring""" lowercase__ = '' lowercase__ = '' lowercase__ = [] def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : int , a : int )-> int: """simple docstring""" if m == -1: return n + 1 elif n == -1: return m + 1 elif self.dp[m][n] > -1: return self.dp[m][n] else: if self.worda[m] == self.worda[n]: lowercase__ = self.__min_dist_top_down_dp(m - 1 , n - 1 ) else: lowercase__ = self.__min_dist_top_down_dp(a , n - 1 ) lowercase__ = self.__min_dist_top_down_dp(m - 1 , a ) lowercase__ = self.__min_dist_top_down_dp(m - 1 , n - 1 ) lowercase__ = 1 + min(a , a , a ) return self.dp[m][n] def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : str , a : str )-> int: """simple docstring""" lowercase__ = worda lowercase__ = worda lowercase__ = [[-1 for _ in range(len(a ) )] for _ in range(len(a ) )] return self.__min_dist_top_down_dp(len(a ) - 1 , len(a ) - 1 ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : str , a : str )-> int: """simple docstring""" lowercase__ = worda lowercase__ = worda lowercase__ = len(a ) lowercase__ = len(a ) lowercase__ = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )] for i in range(m + 1 ): for j in range(n + 1 ): if i == 0: # first string is empty lowercase__ = j elif j == 0: # second string is empty lowercase__ = i elif worda[i - 1] == worda[j - 1]: # last characters are equal lowercase__ = self.dp[i - 1][j - 1] else: lowercase__ = self.dp[i][j - 1] lowercase__ = self.dp[i - 1][j] lowercase__ = self.dp[i - 1][j - 1] lowercase__ = 1 + min(a , a , a ) return self.dp[m][n] if __name__ == "__main__": lowercase_ = EditDistance() print("""****************** Testing Edit Distance DP Algorithm ******************""") print() lowercase_ = input("""Enter the first string: """).strip() lowercase_ = input("""Enter the second string: """).strip() print() print(f'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''') print(f'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''') print() print("""*************** End of Testing Edit Distance DP Algorithm ***************""")
45
class SCREAMING_SNAKE_CASE : # Public class to implement a graph def __init__( self : int , a : int , a : int , a : list[list[bool]] )-> None: """simple docstring""" lowercase__ = row lowercase__ = col lowercase__ = graph def SCREAMING_SNAKE_CASE_ ( self : Dict , a : int , a : int , a : list[list[bool]] )-> bool: """simple docstring""" return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : int , a : int , a : list[list[bool]] )-> None: """simple docstring""" lowercase__ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order lowercase__ = [-1, 0, 1, -1, 1, -1, 0, 1] lowercase__ = True # Make those cells visited for k in range(8 ): if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , a ): self.diffs(i + row_nbr[k] , j + col_nbr[k] , a ) def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> int: # And finally, count all islands. """simple docstring""" lowercase__ = [[False for j in range(self.COL )] for i in range(self.ROW )] lowercase__ = 0 for i in range(self.ROW ): for j in range(self.COL ): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(a , a , a ) count += 1 return count
45
1
import copy from typing import Dict, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING from ..detr import DetrConfig from ..swin import SwinConfig lowercase_ = { """facebook/maskformer-swin-base-ade""": ( """https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json""" ) # See all MaskFormer models at https://huggingface.co/models?filter=maskformer } lowercase_ = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE (UpperCAmelCase ): _UpperCamelCase : List[str] = 'maskformer' _UpperCamelCase : List[str] = {'hidden_size': 'mask_feature_size'} _UpperCamelCase : Optional[int] = ['resnet', 'swin'] _UpperCamelCase : List[Any] = ['detr'] def __init__( self : List[str] , a : int = 256 , a : int = 256 , a : float = 0.1 , a : bool = False , a : Optional[Dict] = None , a : Optional[Dict] = None , a : float = 0.02 , a : float = 1.0 , a : float = 1.0 , a : float = 1.0 , a : float = 20.0 , a : Optional[bool] = None , **a : Tuple , )-> str: """simple docstring""" if backbone_config is None: # fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k lowercase__ = SwinConfig( image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , ) if isinstance(a , a ): lowercase__ = backbone_config.pop('model_type' ) lowercase__ = CONFIG_MAPPING[backbone_model_type] lowercase__ = config_class.from_dict(a ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """ f"""Supported model types: {",".join(self.backbones_supported )}""" ) if decoder_config is None: # fall back to https://huggingface.co/facebook/detr-resnet-50 lowercase__ = DetrConfig() else: # verify that the decoder is supported lowercase__ = ( decoder_config.pop('model_type' ) if isinstance(a , a ) else decoder_config.model_type ) if decoder_type not in self.decoders_supported: raise ValueError( f"""Transformer Decoder {decoder_type} not supported, please use one of""" f""" {",".join(self.decoders_supported )}""" ) if isinstance(a , a ): lowercase__ = CONFIG_MAPPING[decoder_type] lowercase__ = config_class.from_dict(a ) lowercase__ = backbone_config lowercase__ = decoder_config # main feature dimension for the model lowercase__ = fpn_feature_size lowercase__ = mask_feature_size # initializer lowercase__ = init_std lowercase__ = init_xavier_std # Hungarian matcher && loss lowercase__ = cross_entropy_weight lowercase__ = dice_weight lowercase__ = mask_weight lowercase__ = use_auxiliary_loss lowercase__ = no_object_weight lowercase__ = output_auxiliary_logits lowercase__ = self.decoder_config.encoder_attention_heads lowercase__ = self.decoder_config.num_hidden_layers super().__init__(**a ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , a : PretrainedConfig , a : PretrainedConfig , **a : Union[str, Any] )-> List[str]: """simple docstring""" return cls( backbone_config=a , decoder_config=a , **a , ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Dict[str, any]: """simple docstring""" lowercase__ = copy.deepcopy(self.__dict__ ) lowercase__ = self.backbone_config.to_dict() lowercase__ = self.decoder_config.to_dict() lowercase__ = self.__class__.model_type return output
45
from string import ascii_uppercase lowercase_ = {str(ord(c) - 55): c for c in ascii_uppercase} def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise TypeError('int() can\'t convert non-string with explicit base' ) if num < 0: raise ValueError('parameter must be positive int' ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise TypeError('\'str\' object cannot be interpreted as an integer' ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise TypeError('\'float\' object cannot be interpreted as an integer' ) if base in (0, 1): raise ValueError('base must be >= 2' ) if base > 36: raise ValueError('base must be <= 36' ) lowercase__ = '' lowercase__ = 0 lowercase__ = 0 while div != 1: lowercase__ , lowercase__ = divmod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if base >= 11 and 9 < mod < 36: lowercase__ = ALPHABET_VALUES[str(_SCREAMING_SNAKE_CASE )] else: lowercase__ = str(_SCREAMING_SNAKE_CASE ) new_value += actual_value lowercase__ = num // base lowercase__ = div if div == 0: return str(new_value[::-1] ) elif div == 1: new_value += str(_SCREAMING_SNAKE_CASE ) return str(new_value[::-1] ) return new_value[::-1] if __name__ == "__main__": import doctest doctest.testmod() for base in range(2, 37): for num in range(1_000): assert int(decimal_to_any(num, base), base) == num, ( num, base, decimal_to_any(num, base), int(decimal_to_any(num, base), base), )
45
1
from math import atan, cos, radians, sin, tan from .haversine_distance import haversine_distance lowercase_ = 6_378_137.0 lowercase_ = 6_356_752.314_245 lowercase_ = 6_378_137 def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float: lowercase__ = (AXIS_A - AXIS_B) / AXIS_A # Parametric latitudes # https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude lowercase__ = atan((1 - flattening) * tan(radians(_SCREAMING_SNAKE_CASE ) ) ) lowercase__ = atan((1 - flattening) * tan(radians(_SCREAMING_SNAKE_CASE ) ) ) # Compute central angle between two points # using haversine theta. sigma = haversine_distance / equatorial radius lowercase__ = haversine_distance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) / EQUATORIAL_RADIUS # Intermediate P and Q values lowercase__ = (b_lata + b_lata) / 2 lowercase__ = (b_lata - b_lata) / 2 # Intermediate X value # X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2) lowercase__ = (sin(_SCREAMING_SNAKE_CASE ) ** 2) * (cos(_SCREAMING_SNAKE_CASE ) ** 2) lowercase__ = cos(sigma / 2 ) ** 2 lowercase__ = (sigma - sin(_SCREAMING_SNAKE_CASE )) * (x_numerator / x_demonimator) # Intermediate Y value # Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2) lowercase__ = (cos(_SCREAMING_SNAKE_CASE ) ** 2) * (sin(_SCREAMING_SNAKE_CASE ) ** 2) lowercase__ = sin(sigma / 2 ) ** 2 lowercase__ = (sigma + sin(_SCREAMING_SNAKE_CASE )) * (y_numerator / y_denominator) return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value))) if __name__ == "__main__": import doctest doctest.testmod()
45
import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class SCREAMING_SNAKE_CASE : def __init__( self : List[Any] , a : Any , a : Optional[int]=13 , a : Tuple=30 , a : Union[str, Any]=2 , a : List[str]=3 , a : Dict=True , a : List[str]=True , a : List[Any]=32 , a : List[str]=5 , a : Optional[int]=4 , a : List[str]=37 , a : Dict="gelu" , a : Dict=0.1 , a : List[str]=0.1 , a : int=10 , a : List[str]=0.02 , a : int=None , a : List[str]=2 , )-> Dict: """simple docstring""" lowercase__ = parent lowercase__ = batch_size lowercase__ = image_size lowercase__ = patch_size lowercase__ = num_channels lowercase__ = is_training lowercase__ = use_labels lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = intermediate_size lowercase__ = hidden_act lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = type_sequence_label_size lowercase__ = initializer_range lowercase__ = scope lowercase__ = encoder_stride # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) lowercase__ = (image_size // patch_size) ** 2 lowercase__ = num_patches + 1 def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Any: """simple docstring""" lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__ = None if self.use_labels: lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__ = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> List[Any]: """simple docstring""" return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : List[Any] , a : List[str] , a : Dict )-> Optional[Any]: """simple docstring""" lowercase__ = ViTModel(config=a ) model.to(a ) model.eval() lowercase__ = model(a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE_ ( self : int , a : Optional[Any] , a : int , a : Tuple )-> Union[str, Any]: """simple docstring""" lowercase__ = ViTForMaskedImageModeling(config=a ) model.to(a ) model.eval() lowercase__ = model(a ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowercase__ = 1 lowercase__ = ViTForMaskedImageModeling(a ) model.to(a ) model.eval() lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowercase__ = model(a ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : List[str] , a : int , a : List[Any] )-> str: """simple docstring""" lowercase__ = self.type_sequence_label_size lowercase__ = ViTForImageClassification(a ) model.to(a ) model.eval() lowercase__ = model(a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowercase__ = 1 lowercase__ = ViTForImageClassification(a ) model.to(a ) model.eval() lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowercase__ = model(a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> List[str]: """simple docstring""" lowercase__ = self.prepare_config_and_inputs() ( ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ) = config_and_inputs lowercase__ = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE (UpperCAmelCase , UpperCAmelCase , unittest.TestCase ): _UpperCamelCase : Any = ( ( ViTModel, ViTForImageClassification, ViTForMaskedImageModeling, ) if is_torch_available() else () ) _UpperCamelCase : Union[str, Any] = ( {'feature-extraction': ViTModel, 'image-classification': ViTForImageClassification} if is_torch_available() else {} ) _UpperCamelCase : int = True _UpperCamelCase : int = False _UpperCamelCase : Union[str, Any] = False _UpperCamelCase : Dict = False def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> List[str]: """simple docstring""" lowercase__ = ViTModelTester(self ) lowercase__ = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=37 ) def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Union[str, Any]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='ViT does not use inputs_embeds' ) def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]: """simple docstring""" pass def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Union[str, Any]: """simple docstring""" lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ = model_class(a ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowercase__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(a , nn.Linear ) ) def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Optional[int]: """simple docstring""" lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ = model_class(a ) lowercase__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ = [*signature.parameters.keys()] lowercase__ = ['pixel_values'] self.assertListEqual(arg_names[:1] , a ) def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Union[str, Any]: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a ) def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Tuple: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*a ) def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> int: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a ) @slow def SCREAMING_SNAKE_CASE_ ( self : Dict )-> List[Any]: """simple docstring""" for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ = ViTModel.from_pretrained(a ) self.assertIsNotNone(a ) def __UpperCamelCase () -> str: lowercase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class SCREAMING_SNAKE_CASE (unittest.TestCase ): @cached_property def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> List[Any]: """simple docstring""" return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None @slow def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Union[str, Any]: """simple docstring""" lowercase__ = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224' ).to(a ) lowercase__ = self.default_image_processor lowercase__ = prepare_img() lowercase__ = image_processor(images=a , return_tensors='pt' ).to(a ) # forward pass with torch.no_grad(): lowercase__ = model(**a ) # verify the logits lowercase__ = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , a ) lowercase__ = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1E-4 ) ) @slow def SCREAMING_SNAKE_CASE_ ( self : int )-> List[str]: """simple docstring""" lowercase__ = ViTModel.from_pretrained('facebook/dino-vits8' ).to(a ) lowercase__ = ViTImageProcessor.from_pretrained('facebook/dino-vits8' , size=480 ) lowercase__ = prepare_img() lowercase__ = image_processor(images=a , return_tensors='pt' ) lowercase__ = inputs.pixel_values.to(a ) # forward pass with torch.no_grad(): lowercase__ = model(a , interpolate_pos_encoding=a ) # verify the logits lowercase__ = torch.Size((1, 3_601, 384) ) self.assertEqual(outputs.last_hidden_state.shape , a ) lowercase__ = torch.tensor( [[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(a ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , a , atol=1E-4 ) ) @slow @require_accelerate @require_torch_gpu def SCREAMING_SNAKE_CASE_ ( self : str )-> str: """simple docstring""" lowercase__ = ViTModel.from_pretrained('facebook/dino-vits8' , torch_dtype=torch.floataa , device_map='auto' ) lowercase__ = self.default_image_processor lowercase__ = prepare_img() lowercase__ = image_processor(images=a , return_tensors='pt' ) lowercase__ = inputs.pixel_values.to(a ) # forward pass to make sure inference works in fp16 with torch.no_grad(): lowercase__ = model(a )
45
1
import os from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen, xsplitext from ..table import array_cast from ..utils.py_utils import no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: from .features import FeatureType lowercase_ , lowercase_ , lowercase_ = False, False, False @dataclass class SCREAMING_SNAKE_CASE : _UpperCamelCase : Optional[int] = None _UpperCamelCase : bool = True _UpperCamelCase : bool = True _UpperCamelCase : Optional[str] = None # Automatically constructed _UpperCamelCase : ClassVar[str] = "dict" _UpperCamelCase : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()} ) _UpperCamelCase : str = field(default='Audio' , init=UpperCAmelCase , repr=UpperCAmelCase ) def __call__( self : Tuple )-> List[Any]: """simple docstring""" return self.pa_type def SCREAMING_SNAKE_CASE_ ( self : Dict , a : Union[str, bytes, dict] )-> dict: """simple docstring""" try: import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files. except ImportError as err: raise ImportError('To support encoding audio data, please install \'soundfile\'.' ) from err if isinstance(a , a ): return {"bytes": None, "path": value} elif isinstance(a , a ): return {"bytes": value, "path": None} elif "array" in value: # convert the audio array to wav bytes lowercase__ = BytesIO() sf.write(a , value['array'] , value['sampling_rate'] , format='wav' ) return {"bytes": buffer.getvalue(), "path": None} elif value.get('path' ) is not None and os.path.isfile(value['path'] ): # we set "bytes": None to not duplicate the data if they're already available locally if value["path"].endswith('pcm' ): # "PCM" only has raw audio bytes if value.get('sampling_rate' ) is None: # At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate raise KeyError('To use PCM files, please specify a \'sampling_rate\' in Audio object' ) if value.get('bytes' ): # If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!) lowercase__ = np.frombuffer(value['bytes'] , dtype=np.intaa ).astype(np.floataa ) / 32_767 else: lowercase__ = np.memmap(value['path'] , dtype='h' , mode='r' ).astype(np.floataa ) / 32_767 lowercase__ = BytesIO(bytes() ) sf.write(a , a , value['sampling_rate'] , format='wav' ) return {"bytes": buffer.getvalue(), "path": None} else: return {"bytes": None, "path": value.get('path' )} elif value.get('bytes' ) is not None or value.get('path' ) is not None: # store the audio bytes, and path is used to infer the audio format using the file extension return {"bytes": value.get('bytes' ), "path": value.get('path' )} else: raise ValueError( f"""An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" ) def SCREAMING_SNAKE_CASE_ ( self : Dict , a : dict , a : Optional[Dict[str, Union[str, bool, None]]] = None )-> dict: """simple docstring""" if not self.decode: raise RuntimeError('Decoding is disabled for this feature. Please use Audio(decode=True) instead.' ) lowercase__ , lowercase__ = (value['path'], BytesIO(value['bytes'] )) if value['bytes'] is not None else (value['path'], None) if path is None and file is None: raise ValueError(f"""An audio sample should have one of 'path' or 'bytes' but both are None in {value}.""" ) try: import librosa import soundfile as sf except ImportError as err: raise ImportError('To support decoding audio files, please install \'librosa\' and \'soundfile\'.' ) from err lowercase__ = xsplitext(a )[1][1:].lower() if path is not None else None if not config.IS_OPUS_SUPPORTED and audio_format == "opus": raise RuntimeError( 'Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, ' 'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ' ) elif not config.IS_MP3_SUPPORTED and audio_format == "mp3": raise RuntimeError( 'Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, ' 'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ' ) if file is None: lowercase__ = token_per_repo_id or {} lowercase__ = path.split('::' )[-1] try: lowercase__ = string_to_dict(a , config.HUB_DATASETS_URL )['repo_id'] lowercase__ = token_per_repo_id[repo_id] except (ValueError, KeyError): lowercase__ = None with xopen(a , 'rb' , use_auth_token=a ) as f: lowercase__ , lowercase__ = sf.read(a ) else: lowercase__ , lowercase__ = sf.read(a ) lowercase__ = array.T if self.mono: lowercase__ = librosa.to_mono(a ) if self.sampling_rate and self.sampling_rate != sampling_rate: lowercase__ = librosa.resample(a , orig_sr=a , target_sr=self.sampling_rate ) lowercase__ = self.sampling_rate return {"path": path, "array": array, "sampling_rate": sampling_rate} def SCREAMING_SNAKE_CASE_ ( self : int )-> Union["FeatureType", Dict[str, "FeatureType"]]: """simple docstring""" from .features import Value if self.decode: raise ValueError('Cannot flatten a decoded Audio feature.' ) return { "bytes": Value('binary' ), "path": Value('string' ), } def SCREAMING_SNAKE_CASE_ ( self : Dict , a : Union[pa.StringArray, pa.StructArray] )-> pa.StructArray: """simple docstring""" if pa.types.is_string(storage.type ): lowercase__ = pa.array([None] * len(a ) , type=pa.binary() ) lowercase__ = pa.StructArray.from_arrays([bytes_array, storage] , ['bytes', 'path'] , mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): lowercase__ = pa.array([None] * len(a ) , type=pa.string() ) lowercase__ = pa.StructArray.from_arrays([storage, path_array] , ['bytes', 'path'] , mask=storage.is_null() ) elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('array' ): lowercase__ = pa.array([Audio().encode_example(a ) if x is not None else None for x in storage.to_pylist()] ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index('bytes' ) >= 0: lowercase__ = storage.field('bytes' ) else: lowercase__ = pa.array([None] * len(a ) , type=pa.binary() ) if storage.type.get_field_index('path' ) >= 0: lowercase__ = storage.field('path' ) else: lowercase__ = pa.array([None] * len(a ) , type=pa.string() ) lowercase__ = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=storage.is_null() ) return array_cast(a , self.pa_type ) def SCREAMING_SNAKE_CASE_ ( self : Any , a : pa.StructArray )-> pa.StructArray: """simple docstring""" @no_op_if_value_is_null def path_to_bytes(a : Any ): with xopen(a , 'rb' ) as f: lowercase__ = f.read() return bytes_ lowercase__ = pa.array( [ (path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None for x in storage.to_pylist() ] , type=pa.binary() , ) lowercase__ = pa.array( [os.path.basename(a ) if path is not None else None for path in storage.field('path' ).to_pylist()] , type=pa.string() , ) lowercase__ = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() ) return array_cast(a , self.pa_type )
45
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[Any]: stooge(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) - 1 ) return arr def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int: if i >= h: return # If first element is smaller than the last then swap them if arr[i] > arr[h]: lowercase__ , lowercase__ = arr[h], arr[i] # If there are more than 2 elements in the array if h - i + 1 > 2: lowercase__ = (int)((h - i + 1) / 3 ) # Recursively sort first 2/3 elements stooge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (h - t) ) # Recursively sort last 2/3 elements stooge(_SCREAMING_SNAKE_CASE , i + t , (_SCREAMING_SNAKE_CASE) ) # Recursively sort first 2/3 elements stooge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (h - t) ) if __name__ == "__main__": lowercase_ = input("""Enter numbers separated by a comma:\n""").strip() lowercase_ = [int(item) for item in user_input.split(""",""")] print(stooge_sort(unsorted))
45
1
import argparse import json import os from tensorflow.core.protobuf.saved_model_pba import SavedModel # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py lowercase_ = """.""" # Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model) lowercase_ = [ """Assert""", """AssignVariableOp""", """EmptyTensorList""", """MergeV2Checkpoints""", """ReadVariableOp""", """ResourceGather""", """RestoreV2""", """SaveV2""", """ShardedFilename""", """StatefulPartitionedCall""", """StaticRegexFullMatch""", """VarHandleOp""", ] def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int: lowercase__ = SavedModel() lowercase__ = [] with open(os.path.join(_SCREAMING_SNAKE_CASE , 'utils' , 'tf_ops' , 'onnx.json' ) ) as f: lowercase__ = json.load(_SCREAMING_SNAKE_CASE )['opsets'] for i in range(1 , opset + 1 ): onnx_ops.extend(onnx_opsets[str(_SCREAMING_SNAKE_CASE )] ) with open(_SCREAMING_SNAKE_CASE , 'rb' ) as f: saved_model.ParseFromString(f.read() ) lowercase__ = set() # Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs) for meta_graph in saved_model.meta_graphs: # Add operations in the graph definition model_op_names.update(node.op for node in meta_graph.graph_def.node ) # Go through the functions in the graph definition for func in meta_graph.graph_def.library.function: # Add operations in each function model_op_names.update(node.op for node in func.node_def ) # Convert to list, sorted if you want lowercase__ = sorted(_SCREAMING_SNAKE_CASE ) lowercase__ = [] for op in model_op_names: if op not in onnx_ops and op not in INTERNAL_OPS: incompatible_ops.append(_SCREAMING_SNAKE_CASE ) if strict and len(_SCREAMING_SNAKE_CASE ) > 0: raise Exception(F"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops ) elif len(_SCREAMING_SNAKE_CASE ) > 0: print(F"""Found the following incompatible ops for the opset {opset}:""" ) print(*_SCREAMING_SNAKE_CASE , sep='\n' ) else: print(F"""The saved model {saved_model_path} can properly be converted with ONNX.""" ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument("""--saved_model_path""", help="""Path of the saved model to check (the .pb file).""") parser.add_argument( """--opset""", default=12, type=int, help="""The ONNX opset against which the model has to be tested.""" ) parser.add_argument( """--framework""", choices=["""onnx"""], default="""onnx""", help="""Frameworks against which to test the saved model.""" ) parser.add_argument( """--strict""", action="""store_true""", help="""Whether make the checking strict (raise errors) or not (raise warnings)""" ) lowercase_ = parser.parse_args() if args.framework == "onnx": onnx_compliancy(args.saved_model_path, args.strict, args.opset)
45
from scipy.stats import spearmanr import datasets lowercase_ = """ The Spearman rank-order correlation coefficient is a measure of the relationship between two datasets. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Positive correlations imply that as data in dataset x increases, so does data in dataset y. Negative correlations imply that as x increases, y decreases. Correlations of -1 or +1 imply an exact monotonic relationship. Unlike the Pearson correlation, the Spearman correlation does not assume that both datasets are normally distributed. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Spearman correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. """ lowercase_ = """ Args: predictions (`List[float]`): Predicted labels, as returned by a model. references (`List[float]`): Ground truth labels. return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns only the spearmanr score. Defaults to `False`. Returns: spearmanr (`float`): Spearman correlation coefficient. p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input. Examples: Example 1: >>> spearmanr_metric = datasets.load_metric(\"spearmanr\") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4]) >>> print(results) {'spearmanr': -0.7} Example 2: >>> spearmanr_metric = datasets.load_metric(\"spearmanr\") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], ... predictions=[10, 9, 2.5, 6, 4], ... return_pvalue=True) >>> print(results['spearmanr']) -0.7 >>> print(round(results['spearmanr_pvalue'], 2)) 0.19 """ lowercase_ = R"""\ @book{kokoska2000crc, title={CRC standard probability and statistics tables and formulae}, author={Kokoska, Stephen and Zwillinger, Daniel}, year={2000}, publisher={Crc Press} } @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE (datasets.Metric ): def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Tuple: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('float' ), 'references': datasets.Value('float' ), } ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , ) def SCREAMING_SNAKE_CASE_ ( self : int , a : str , a : Any , a : str=False )-> Optional[int]: """simple docstring""" lowercase__ = spearmanr(a , a ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
45
1
import string import numpy def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int: return b if a == 0 else greatest_common_divisor(b % a , _SCREAMING_SNAKE_CASE ) class SCREAMING_SNAKE_CASE : _UpperCamelCase : Any = string.ascii_uppercase + string.digits # This cipher takes alphanumerics into account # i.e. a total of 36 characters # take x and return x % len(key_string) _UpperCamelCase : Dict = numpy.vectorize(lambda UpperCAmelCase : x % 36 ) _UpperCamelCase : List[Any] = numpy.vectorize(UpperCAmelCase ) def __init__( self : Dict , a : numpy.ndarray )-> None: """simple docstring""" lowercase__ = self.modulus(a ) # mod36 calc's on the encrypt key self.check_determinant() # validate the determinant of the encryption key lowercase__ = encrypt_key.shape[0] def SCREAMING_SNAKE_CASE_ ( self : str , a : str )-> int: """simple docstring""" return self.key_string.index(a ) def SCREAMING_SNAKE_CASE_ ( self : Any , a : int )-> str: """simple docstring""" return self.key_string[round(a )] def SCREAMING_SNAKE_CASE_ ( self : Dict )-> None: """simple docstring""" lowercase__ = round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: lowercase__ = det % len(self.key_string ) lowercase__ = len(self.key_string ) if greatest_common_divisor(a , len(self.key_string ) ) != 1: lowercase__ = ( f"""determinant modular {req_l} of encryption key({det}) """ f"""is not co prime w.r.t {req_l}.\nTry another key.""" ) raise ValueError(a ) def SCREAMING_SNAKE_CASE_ ( self : Dict , a : str )-> str: """simple docstring""" lowercase__ = [char for char in text.upper() if char in self.key_string] lowercase__ = chars[-1] while len(a ) % self.break_key != 0: chars.append(a ) return "".join(a ) def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : str )-> str: """simple docstring""" lowercase__ = self.process_text(text.upper() ) lowercase__ = '' for i in range(0 , len(a ) - self.break_key + 1 , self.break_key ): lowercase__ = text[i : i + self.break_key] lowercase__ = [self.replace_letters(a ) for char in batch] lowercase__ = numpy.array([vec] ).T lowercase__ = self.modulus(self.encrypt_key.dot(a ) ).T.tolist()[ 0 ] lowercase__ = ''.join( self.replace_digits(a ) for num in batch_encrypted ) encrypted += encrypted_batch return encrypted def SCREAMING_SNAKE_CASE_ ( self : int )-> numpy.ndarray: """simple docstring""" lowercase__ = round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: lowercase__ = det % len(self.key_string ) lowercase__ = None for i in range(len(self.key_string ) ): if (det * i) % len(self.key_string ) == 1: lowercase__ = i break lowercase__ = ( det_inv * numpy.linalg.det(self.encrypt_key ) * numpy.linalg.inv(self.encrypt_key ) ) return self.to_int(self.modulus(a ) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : str )-> str: """simple docstring""" lowercase__ = self.make_decrypt_key() lowercase__ = self.process_text(text.upper() ) lowercase__ = '' for i in range(0 , len(a ) - self.break_key + 1 , self.break_key ): lowercase__ = text[i : i + self.break_key] lowercase__ = [self.replace_letters(a ) for char in batch] lowercase__ = numpy.array([vec] ).T lowercase__ = self.modulus(decrypt_key.dot(a ) ).T.tolist()[0] lowercase__ = ''.join( self.replace_digits(a ) for num in batch_decrypted ) decrypted += decrypted_batch return decrypted def __UpperCamelCase () -> None: lowercase__ = int(input('Enter the order of the encryption key: ' ) ) lowercase__ = [] print('Enter each row of the encryption key with space separated integers' ) for _ in range(_SCREAMING_SNAKE_CASE ): lowercase__ = [int(_SCREAMING_SNAKE_CASE ) for x in input().split()] hill_matrix.append(_SCREAMING_SNAKE_CASE ) lowercase__ = HillCipher(numpy.array(_SCREAMING_SNAKE_CASE ) ) print('Would you like to encrypt or decrypt some text? (1 or 2)' ) lowercase__ = input('\n1. Encrypt\n2. Decrypt\n' ) if option == "1": lowercase__ = input('What text would you like to encrypt?: ' ) print('Your encrypted text is:' ) print(hc.encrypt(_SCREAMING_SNAKE_CASE ) ) elif option == "2": lowercase__ = input('What text would you like to decrypt?: ' ) print('Your decrypted text is:' ) print(hc.decrypt(_SCREAMING_SNAKE_CASE ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
45
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = 50 ) -> int: lowercase__ = [1] * (length + 1) for row_length in range(3 , length + 1 ): for block_length in range(3 , row_length + 1 ): for block_start in range(row_length - block_length ): ways_number[row_length] += ways_number[ row_length - block_start - block_length - 1 ] ways_number[row_length] += 1 return ways_number[length] if __name__ == "__main__": print(f'''{solution() = }''')
45
1
import argparse import json import os import torch from torch import nn from transformers import NllbMoeConfig, NllbMoeModel from transformers.modeling_utils import dtype_byte_size from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Any: lowercase__ = [ 'encoder.version', 'decoder.version', 'model.encoder.version', 'model.decoder.version', 'decoder.output_projection.weight', '_float_tensor', 'encoder.embed_positions._float_tensor', 'decoder.embed_positions._float_tensor', ] for k in ignore_keys: state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Optional[Any]: lowercase__ , lowercase__ = emb.weight.shape lowercase__ = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE ) lowercase__ = emb.weight.data return lin_layer def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> int: lowercase__ = {} for old_key in state_dict.keys(): lowercase__ = old_key if "moe_layer.experts." in key: if expert_idx is not None: lowercase__ = key.replace('moe_layer.experts.0' , F"""ffn.experts.expert_{expert_idx}""" ) else: lowercase__ = key.replace('moe_layer.experts.' , 'ffn.experts.expert_' ) if "gate" in key: lowercase__ = key.replace('.moe_layer.gate.wg' , '.ffn.router.classifier' ) if "fc2" and "experts" not in key: lowercase__ = key.replace('.fc2.' , '.ffn.fc2.' ) if "fc1" and "experts" not in key: lowercase__ = key.replace('.fc1.' , '.ffn.fc1.' ) if ".encoder_attn." in key: lowercase__ = key.replace('.encoder_attn.' , '.cross_attention.' ) if "encoder_attn_layer_norm" in key: lowercase__ = key.replace('encoder_attn_layer_norm' , 'cross_attention_layer_norm' ) if "final_layer_norm" in key: lowercase__ = key.replace('final_layer_norm' , 'ff_layer_norm' ) lowercase__ = state_dict[old_key] return new_dict def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = WEIGHTS_NAME ) -> List[Any]: lowercase__ = [] lowercase__ = 0 os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE ) for expert in range(_SCREAMING_SNAKE_CASE ): lowercase__ = switch_checkpoint_path + F"""-rank-{expert}.pt""" if os.path.isfile(_SCREAMING_SNAKE_CASE ): lowercase__ = torch.load(_SCREAMING_SNAKE_CASE )['model'] remove_ignore_keys_(_SCREAMING_SNAKE_CASE ) lowercase__ = rename_fairseq_keys(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) lowercase__ = os.path.join( _SCREAMING_SNAKE_CASE , weights_name.replace('.bin' , F"""-{len(_SCREAMING_SNAKE_CASE )+1:05d}-of-???.bin""" ) ) torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) sharded_state_dicts.append(expert_state.keys() ) total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size( expert_state[list(_SCREAMING_SNAKE_CASE )[0]].dtype ) # Add the last block lowercase__ = os.path.join(_SCREAMING_SNAKE_CASE , weights_name.replace('.bin' , F"""-{len(_SCREAMING_SNAKE_CASE )+1:05d}-of-???.bin""" ) ) lowercase__ = torch.load(switch_checkpoint_path + '-shared.pt' )['model'] remove_ignore_keys_(_SCREAMING_SNAKE_CASE ) lowercase__ = rename_fairseq_keys(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) lowercase__ = shared_weights['decoder.embed_tokens.weight'] sharded_state_dicts.append(shared_weights.keys() ) # If we only have the shared weights (dummy model/experts saved on the same file) if len(_SCREAMING_SNAKE_CASE ) == 1: lowercase__ = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return {weights_name: sharded_state_dicts[0]}, None else: torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Otherwise, let's build the index lowercase__ = {} for idx, shard in enumerate(_SCREAMING_SNAKE_CASE ): lowercase__ = weights_name.replace('.bin' , F"""-{idx+1:05d}-of-{len(_SCREAMING_SNAKE_CASE ):05d}.bin""" ) lowercase__ = os.path.join(_SCREAMING_SNAKE_CASE , weights_name.replace('.bin' , F"""-{idx+1:05d}-of-???.bin""" ) ) os.rename(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) for key in shard: lowercase__ = shard_file # Add the metadata lowercase__ = {'total_size': total_size} lowercase__ = {'metadata': metadata, 'weight_map': weight_map} with open(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , 'w' , encoding='utf-8' ) as f: lowercase__ = json.dumps(_SCREAMING_SNAKE_CASE , indent=2 , sort_keys=_SCREAMING_SNAKE_CASE ) + '\n' f.write(_SCREAMING_SNAKE_CASE ) return metadata, index if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--nllb_moe_checkpoint_path""", default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""", type=str, required=False, help="""Path to a directory containing a folder per layer. Follows the original Google format.""", ) parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""") parser.add_argument( """--pytorch_dump_folder_path""", default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""", type=str, required=False, help="""Path to the output pytorch model.""", ) lowercase_ = parser.parse_args() lowercase_ , lowercase_ = shard_on_the_fly( args.nllb_moe_checkpoint_path, args.pytorch_dump_folder_path, 128, args.dtype, ) lowercase_ = NllbMoeConfig.from_pretrained( """facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128 ) config.save_pretrained(args.pytorch_dump_folder_path) lowercase_ = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path) print("""Done""") model.save_pretrained(args.pytorch_dump_folder_path)
45
import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() lowercase_ = logging.get_logger(__name__) lowercase_ = { """b0""": efficientnet.EfficientNetBa, """b1""": efficientnet.EfficientNetBa, """b2""": efficientnet.EfficientNetBa, """b3""": efficientnet.EfficientNetBa, """b4""": efficientnet.EfficientNetBa, """b5""": efficientnet.EfficientNetBa, """b6""": efficientnet.EfficientNetBa, """b7""": efficientnet.EfficientNetBa, } lowercase_ = { """b0""": { """hidden_dim""": 1_280, """width_coef""": 1.0, """depth_coef""": 1.0, """image_size""": 224, """dropout_rate""": 0.2, """dw_padding""": [], }, """b1""": { """hidden_dim""": 1_280, """width_coef""": 1.0, """depth_coef""": 1.1, """image_size""": 240, """dropout_rate""": 0.2, """dw_padding""": [16], }, """b2""": { """hidden_dim""": 1_408, """width_coef""": 1.1, """depth_coef""": 1.2, """image_size""": 260, """dropout_rate""": 0.3, """dw_padding""": [5, 8, 16], }, """b3""": { """hidden_dim""": 1_536, """width_coef""": 1.2, """depth_coef""": 1.4, """image_size""": 300, """dropout_rate""": 0.3, """dw_padding""": [5, 18], }, """b4""": { """hidden_dim""": 1_792, """width_coef""": 1.4, """depth_coef""": 1.8, """image_size""": 380, """dropout_rate""": 0.4, """dw_padding""": [6], }, """b5""": { """hidden_dim""": 2_048, """width_coef""": 1.6, """depth_coef""": 2.2, """image_size""": 456, """dropout_rate""": 0.4, """dw_padding""": [13, 27], }, """b6""": { """hidden_dim""": 2_304, """width_coef""": 1.8, """depth_coef""": 2.6, """image_size""": 528, """dropout_rate""": 0.5, """dw_padding""": [31], }, """b7""": { """hidden_dim""": 2_560, """width_coef""": 2.0, """depth_coef""": 3.1, """image_size""": 600, """dropout_rate""": 0.5, """dw_padding""": [18], }, } def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str: lowercase__ = EfficientNetConfig() lowercase__ = CONFIG_MAP[model_name]['hidden_dim'] lowercase__ = CONFIG_MAP[model_name]['width_coef'] lowercase__ = CONFIG_MAP[model_name]['depth_coef'] lowercase__ = CONFIG_MAP[model_name]['image_size'] lowercase__ = CONFIG_MAP[model_name]['dropout_rate'] lowercase__ = CONFIG_MAP[model_name]['dw_padding'] lowercase__ = 'huggingface/label-files' lowercase__ = 'imagenet-1k-id2label.json' lowercase__ = 1000 lowercase__ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) ) lowercase__ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} lowercase__ = idalabel lowercase__ = {v: k for k, v in idalabel.items()} return config def __UpperCamelCase () -> Tuple: lowercase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg' lowercase__ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) return im def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Optional[Any]: lowercase__ = CONFIG_MAP[model_name]['image_size'] lowercase__ = EfficientNetImageProcessor( size={'height': size, 'width': size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=_SCREAMING_SNAKE_CASE , ) return preprocessor def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Tuple: lowercase__ = [v.split('_' )[0].split('block' )[1] for v in original_param_names if v.startswith('block' )] lowercase__ = sorted(set(_SCREAMING_SNAKE_CASE ) ) lowercase__ = len(_SCREAMING_SNAKE_CASE ) lowercase__ = {b: str(_SCREAMING_SNAKE_CASE ) for b, i in zip(_SCREAMING_SNAKE_CASE , range(_SCREAMING_SNAKE_CASE ) )} lowercase__ = [] rename_keys.append(('stem_conv/kernel:0', 'embeddings.convolution.weight') ) rename_keys.append(('stem_bn/gamma:0', 'embeddings.batchnorm.weight') ) rename_keys.append(('stem_bn/beta:0', 'embeddings.batchnorm.bias') ) rename_keys.append(('stem_bn/moving_mean:0', 'embeddings.batchnorm.running_mean') ) rename_keys.append(('stem_bn/moving_variance:0', 'embeddings.batchnorm.running_var') ) for b in block_names: lowercase__ = block_name_mapping[b] rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") ) rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") ) rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") ) rename_keys.append( (F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") ) rename_keys.append( (F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") ) rename_keys.append( (F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") ) rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") ) rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") ) rename_keys.append( (F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") ) rename_keys.append( (F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") ) rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") ) rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") ) rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") ) rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") ) rename_keys.append( (F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") ) rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") ) rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") ) rename_keys.append( (F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") ) rename_keys.append( (F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") ) rename_keys.append(('top_conv/kernel:0', 'encoder.top_conv.weight') ) rename_keys.append(('top_bn/gamma:0', 'encoder.top_bn.weight') ) rename_keys.append(('top_bn/beta:0', 'encoder.top_bn.bias') ) rename_keys.append(('top_bn/moving_mean:0', 'encoder.top_bn.running_mean') ) rename_keys.append(('top_bn/moving_variance:0', 'encoder.top_bn.running_var') ) lowercase__ = {} for item in rename_keys: if item[0] in original_param_names: lowercase__ = 'efficientnet.' + item[1] lowercase__ = 'classifier.weight' lowercase__ = 'classifier.bias' return key_mapping def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: for key, value in tf_params.items(): if "normalization" in key: continue lowercase__ = key_mapping[key] if "_conv" in key and "kernel" in key: lowercase__ = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: lowercase__ = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: lowercase__ = torch.from_numpy(np.transpose(_SCREAMING_SNAKE_CASE ) ) else: lowercase__ = torch.from_numpy(_SCREAMING_SNAKE_CASE ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(_SCREAMING_SNAKE_CASE ) @torch.no_grad() def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: lowercase__ = model_classes[model_name]( include_top=_SCREAMING_SNAKE_CASE , weights='imagenet' , input_tensor=_SCREAMING_SNAKE_CASE , input_shape=_SCREAMING_SNAKE_CASE , pooling=_SCREAMING_SNAKE_CASE , classes=1000 , classifier_activation='softmax' , ) lowercase__ = original_model.trainable_variables lowercase__ = original_model.non_trainable_variables lowercase__ = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: lowercase__ = param.numpy() lowercase__ = list(tf_params.keys() ) # Load HuggingFace model lowercase__ = get_efficientnet_config(_SCREAMING_SNAKE_CASE ) lowercase__ = EfficientNetForImageClassification(_SCREAMING_SNAKE_CASE ).eval() lowercase__ = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print('Converting parameters...' ) lowercase__ = rename_keys(_SCREAMING_SNAKE_CASE ) replace_params(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Initialize preprocessor and preprocess input image lowercase__ = convert_image_processor(_SCREAMING_SNAKE_CASE ) lowercase__ = preprocessor(images=prepare_img() , return_tensors='pt' ) # HF model inference hf_model.eval() with torch.no_grad(): lowercase__ = hf_model(**_SCREAMING_SNAKE_CASE ) lowercase__ = outputs.logits.detach().numpy() # Original model inference lowercase__ = False lowercase__ = CONFIG_MAP[model_name]['image_size'] lowercase__ = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) lowercase__ = image.img_to_array(_SCREAMING_SNAKE_CASE ) lowercase__ = np.expand_dims(_SCREAMING_SNAKE_CASE , axis=0 ) lowercase__ = original_model.predict(_SCREAMING_SNAKE_CASE ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ), "The predicted logits are not the same." print('Model outputs match!' ) if save_model: # Create folder to save model if not os.path.isdir(_SCREAMING_SNAKE_CASE ): os.mkdir(_SCREAMING_SNAKE_CASE ) # Save converted model and image processor hf_model.save_pretrained(_SCREAMING_SNAKE_CASE ) preprocessor.save_pretrained(_SCREAMING_SNAKE_CASE ) if push_to_hub: # Push model and image processor to hub print(F"""Pushing converted {model_name} to the hub...""" ) lowercase__ = F"""efficientnet-{model_name}""" preprocessor.push_to_hub(_SCREAMING_SNAKE_CASE ) hf_model.push_to_hub(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""b0""", type=str, help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""hf_model""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""") parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") lowercase_ = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
45
1
from scipy.stats import spearmanr import datasets lowercase_ = """ The Spearman rank-order correlation coefficient is a measure of the relationship between two datasets. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Positive correlations imply that as data in dataset x increases, so does data in dataset y. Negative correlations imply that as x increases, y decreases. Correlations of -1 or +1 imply an exact monotonic relationship. Unlike the Pearson correlation, the Spearman correlation does not assume that both datasets are normally distributed. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Spearman correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. """ lowercase_ = """ Args: predictions (`List[float]`): Predicted labels, as returned by a model. references (`List[float]`): Ground truth labels. return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns only the spearmanr score. Defaults to `False`. Returns: spearmanr (`float`): Spearman correlation coefficient. p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input. Examples: Example 1: >>> spearmanr_metric = datasets.load_metric(\"spearmanr\") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4]) >>> print(results) {'spearmanr': -0.7} Example 2: >>> spearmanr_metric = datasets.load_metric(\"spearmanr\") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], ... predictions=[10, 9, 2.5, 6, 4], ... return_pvalue=True) >>> print(results['spearmanr']) -0.7 >>> print(round(results['spearmanr_pvalue'], 2)) 0.19 """ lowercase_ = R"""\ @book{kokoska2000crc, title={CRC standard probability and statistics tables and formulae}, author={Kokoska, Stephen and Zwillinger, Daniel}, year={2000}, publisher={Crc Press} } @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE (datasets.Metric ): def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Tuple: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('float' ), 'references': datasets.Value('float' ), } ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , ) def SCREAMING_SNAKE_CASE_ ( self : int , a : str , a : Any , a : str=False )-> Optional[int]: """simple docstring""" lowercase__ = spearmanr(a , a ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
45
import argparse import json import subprocess def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]: lowercase__ = [] lowercase__ = ( F"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\"""" ' https://api.github.com/repos/huggingface/transformers/actions/runners' ) lowercase__ = subprocess.run(_SCREAMING_SNAKE_CASE , shell=_SCREAMING_SNAKE_CASE , stdout=subprocess.PIPE ) lowercase__ = output.stdout.decode('utf-8' ) lowercase__ = json.loads(_SCREAMING_SNAKE_CASE ) lowercase__ = status['runners'] for runner in runners: if runner["name"] in target_runners: if runner["status"] == "offline": offline_runners.append(_SCREAMING_SNAKE_CASE ) # save the result so we can report them on Slack with open('offline_runners.txt' , 'w' ) as fp: fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) ) if len(_SCREAMING_SNAKE_CASE ) > 0: lowercase__ = '\n'.join([x['name'] for x in offline_runners] ) raise ValueError(F"""The following runners are offline:\n{failed}""" ) if __name__ == "__main__": def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str: return values.split(',' ) lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--target_runners""", default=None, type=list_str, required=True, help="""Comma-separated list of runners to check status.""", ) parser.add_argument( """--token""", default=None, type=str, required=True, help="""A token that has actions:read permission.""" ) lowercase_ = parser.parse_args() get_runner_status(args.target_runners, args.token)
45
1
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer lowercase_ = logging.get_logger(__name__) lowercase_ = {"""vocab_file""": """vocab.txt"""} lowercase_ = { """vocab_file""": { """YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""", """YituTech/conv-bert-medium-small""": ( """https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt""" ), """YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""", } } lowercase_ = { """YituTech/conv-bert-base""": 512, """YituTech/conv-bert-medium-small""": 512, """YituTech/conv-bert-small""": 512, } lowercase_ = { """YituTech/conv-bert-base""": {"""do_lower_case""": True}, """YituTech/conv-bert-medium-small""": {"""do_lower_case""": True}, """YituTech/conv-bert-small""": {"""do_lower_case""": True}, } class SCREAMING_SNAKE_CASE (UpperCAmelCase ): _UpperCamelCase : Optional[int] = VOCAB_FILES_NAMES _UpperCamelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase : Any = PRETRAINED_INIT_CONFIGURATION _UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase : Any = ConvBertTokenizer def __init__( self : Any , a : int=None , a : Union[str, Any]=None , a : List[Any]=True , a : Optional[Any]="[UNK]" , a : Optional[Any]="[SEP]" , a : int="[PAD]" , a : Any="[CLS]" , a : List[str]="[MASK]" , a : Optional[Any]=True , a : Optional[int]=None , **a : Optional[Any] , )-> Dict: """simple docstring""" super().__init__( a , tokenizer_file=a , do_lower_case=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , tokenize_chinese_chars=a , strip_accents=a , **a , ) lowercase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , a ) != do_lower_case or normalizer_state.get('strip_accents' , a ) != strip_accents or normalizer_state.get('handle_chinese_chars' , a ) != tokenize_chinese_chars ): lowercase__ = getattr(a , normalizer_state.pop('type' ) ) lowercase__ = do_lower_case lowercase__ = strip_accents lowercase__ = tokenize_chinese_chars lowercase__ = normalizer_class(**a ) lowercase__ = do_lower_case def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : Any , a : List[str]=None )-> str: """simple docstring""" lowercase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : List[int] , a : Optional[List[int]] = None )-> List[int]: """simple docstring""" lowercase__ = [self.sep_token_id] lowercase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : str , a : Optional[str] = None )-> Tuple[str]: """simple docstring""" lowercase__ = self._tokenizer.model.save(a , name=a ) return tuple(a )
45
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class SCREAMING_SNAKE_CASE (UpperCAmelCase ): _UpperCamelCase : Tuple = 'ClapFeatureExtractor' _UpperCamelCase : Union[str, Any] = ('RobertaTokenizer', 'RobertaTokenizerFast') def __init__( self : List[Any] , a : int , a : str )-> Any: """simple docstring""" super().__init__(a , a ) def __call__( self : Any , a : Tuple=None , a : Optional[int]=None , a : int=None , **a : Optional[int] )-> Union[str, Any]: """simple docstring""" lowercase__ = kwargs.pop('sampling_rate' , a ) if text is None and audios is None: raise ValueError('You have to specify either text or audios. Both cannot be none.' ) if text is not None: lowercase__ = self.tokenizer(a , return_tensors=a , **a ) if audios is not None: lowercase__ = self.feature_extractor( a , sampling_rate=a , return_tensors=a , **a ) if text is not None and audios is not None: lowercase__ = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**a ) , tensor_type=a ) def SCREAMING_SNAKE_CASE_ ( self : str , *a : Dict , **a : int )-> Optional[int]: """simple docstring""" return self.tokenizer.batch_decode(*a , **a ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] , *a : int , **a : Dict )-> Dict: """simple docstring""" return self.tokenizer.decode(*a , **a ) @property def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Optional[int]: """simple docstring""" lowercase__ = self.tokenizer.model_input_names lowercase__ = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
45
1
import copy from ...configuration_utils import PretrainedConfig from ...utils import add_start_docstrings lowercase_ = R""" [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: title_sep (`str`, *optional*, defaults to `\" / \"`): Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`]. doc_sep (`str`, *optional*, defaults to `\" // \"`): Separator inserted between the text of the retrieved document and the original input when calling [`RagRetriever`]. n_docs (`int`, *optional*, defaults to 5): Number of documents to retrieve. max_combined_length (`int`, *optional*, defaults to 300): Max length of contextualized input returned by [`~RagRetriever.__call__`]. retrieval_vector_size (`int`, *optional*, defaults to 768): Dimensionality of the document embeddings indexed by [`RagRetriever`]. retrieval_batch_size (`int`, *optional*, defaults to 8): Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated [`RagRetriever`]. dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`): A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids using `datasets.list_datasets()`). dataset_split (`str`, *optional*, defaults to `\"train\"`) Which split of the `dataset` to load. index_name (`str`, *optional*, defaults to `\"compressed\"`) The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and `\"compressed\"`. index_path (`str`, *optional*) The path to the serialized faiss index on disk. passages_path (`str`, *optional*): A path to text passages compatible with the faiss index. Required if using [`~models.rag.retrieval_rag.LegacyIndex`] use_dummy_dataset (`bool`, *optional*, defaults to `False`) Whether to load a \"dummy\" variant of the dataset specified by `dataset`. label_smoothing (`float`, *optional*, defaults to 0.0): Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing in the loss calculation. If set to 0, no label smoothing is performed. do_marginalize (`bool`, *optional*, defaults to `False`): If `True`, the logits are marginalized over all documents by making use of `torch.nn.functional.log_softmax`. reduce_loss (`bool`, *optional*, defaults to `False`): Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation. do_deduplication (`bool`, *optional*, defaults to `True`): Whether or not to deduplicate the generations from different context documents for a given input. Has to be set to `False` if used while training with distributed backend. exclude_bos_score (`bool`, *optional*, defaults to `False`): Whether or not to disregard the BOS token when computing the loss. output_retrieved(`bool`, *optional*, defaults to `False`): If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and `context_attention_mask` are returned. See returned tensors for more detail. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). forced_eos_token_id (`int`, *optional*): The id of the token to force as the last generated token when `max_length` is reached. Usually set to `eos_token_id`. """ @add_start_docstrings(UpperCAmelCase ) class SCREAMING_SNAKE_CASE (UpperCAmelCase ): _UpperCamelCase : Union[str, Any] = 'rag' _UpperCamelCase : Optional[int] = True def __init__( self : List[Any] , a : List[str]=None , a : Dict=True , a : Any=None , a : List[Any]=None , a : Union[str, Any]=None , a : str=None , a : Tuple=None , a : List[Any]=" / " , a : int=" // " , a : str=5 , a : List[Any]=300 , a : Dict=768 , a : int=8 , a : str="wiki_dpr" , a : List[Any]="train" , a : str="compressed" , a : Tuple=None , a : Union[str, Any]=None , a : Optional[int]=False , a : Dict=False , a : List[Any]=0.0 , a : int=True , a : Optional[int]=False , a : str=False , a : str=False , a : List[str]=True , a : Union[str, Any]=None , **a : List[Any] , )-> List[Any]: """simple docstring""" super().__init__( bos_token_id=a , pad_token_id=a , eos_token_id=a , decoder_start_token_id=a , forced_eos_token_id=a , is_encoder_decoder=a , prefix=a , vocab_size=a , **a , ) assert ( "question_encoder" in kwargs and "generator" in kwargs ), "Config has to be initialized with question_encoder and generator config" lowercase__ = kwargs.pop('question_encoder' ) lowercase__ = question_encoder_config.pop('model_type' ) lowercase__ = kwargs.pop('generator' ) lowercase__ = decoder_config.pop('model_type' ) from ..auto.configuration_auto import AutoConfig lowercase__ = AutoConfig.for_model(a , **a ) lowercase__ = AutoConfig.for_model(a , **a ) lowercase__ = reduce_loss lowercase__ = label_smoothing lowercase__ = exclude_bos_score lowercase__ = do_marginalize lowercase__ = title_sep lowercase__ = doc_sep lowercase__ = n_docs lowercase__ = max_combined_length lowercase__ = dataset lowercase__ = dataset_split lowercase__ = index_name lowercase__ = retrieval_vector_size lowercase__ = retrieval_batch_size lowercase__ = passages_path lowercase__ = index_path lowercase__ = use_dummy_dataset lowercase__ = output_retrieved lowercase__ = do_deduplication lowercase__ = use_cache if self.forced_eos_token_id is None: lowercase__ = getattr(self.generator , 'forced_eos_token_id' , a ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , a : PretrainedConfig , a : PretrainedConfig , **a : Any )-> PretrainedConfig: """simple docstring""" return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **a ) def SCREAMING_SNAKE_CASE_ ( self : Dict )-> str: """simple docstring""" lowercase__ = copy.deepcopy(self.__dict__ ) lowercase__ = self.question_encoder.to_dict() lowercase__ = self.generator.to_dict() lowercase__ = self.__class__.model_type return output
45
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_barthez import BarthezTokenizer else: lowercase_ = None lowercase_ = logging.get_logger(__name__) lowercase_ = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""} lowercase_ = { """vocab_file""": { """moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""", """moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""", """moussaKam/barthez-orangesum-title""": ( """https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model""" ), }, """tokenizer_file""": { """moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json""", """moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json""", """moussaKam/barthez-orangesum-title""": ( """https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json""" ), }, } lowercase_ = { """moussaKam/mbarthez""": 1_024, """moussaKam/barthez""": 1_024, """moussaKam/barthez-orangesum-title""": 1_024, } lowercase_ = """▁""" class SCREAMING_SNAKE_CASE (UpperCAmelCase ): _UpperCamelCase : Dict = VOCAB_FILES_NAMES _UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase : Optional[Any] = ['input_ids', 'attention_mask'] _UpperCamelCase : int = BarthezTokenizer def __init__( self : List[Any] , a : Union[str, Any]=None , a : Optional[Any]=None , a : Dict="<s>" , a : Union[str, Any]="</s>" , a : List[str]="</s>" , a : Optional[Any]="<s>" , a : int="<unk>" , a : str="<pad>" , a : Optional[int]="<mask>" , **a : Union[str, Any] , )-> Tuple: """simple docstring""" lowercase__ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token super().__init__( a , tokenizer_file=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , **a , ) lowercase__ = vocab_file lowercase__ = False if not self.vocab_file else True def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : List[int] , a : Optional[List[int]] = None )-> List[int]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowercase__ = [self.cls_token_id] lowercase__ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : List[int] , a : Optional[List[int]] = None )-> List[int]: """simple docstring""" lowercase__ = [self.sep_token_id] lowercase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def SCREAMING_SNAKE_CASE_ ( self : Dict , a : str , a : Optional[str] = None )-> Tuple[str]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(a ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowercase__ = os.path.join( a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(a ): copyfile(self.vocab_file , a ) return (out_vocab_file,)
45
1
from math import factorial def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int: # If either of the conditions are true, the function is being asked # to calculate a factorial of a negative number, which is not possible if n < k or k < 0: raise ValueError('Please enter positive integers for n and k where n >= k' ) return factorial(_SCREAMING_SNAKE_CASE ) // (factorial(_SCREAMING_SNAKE_CASE ) * factorial(n - k )) if __name__ == "__main__": print( """The number of five-card hands possible from a standard""", f'''fifty-two card deck is: {combinations(52, 5)}\n''', ) print( """If a class of 40 students must be arranged into groups of""", f'''4 for group projects, there are {combinations(40, 4)} ways''', """to arrange them.\n""", ) print( """If 10 teams are competing in a Formula One race, there""", f'''are {combinations(10, 3)} ways that first, second and''', """third place can be awarded.""", )
45
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionSAGPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class SCREAMING_SNAKE_CASE (UpperCAmelCase , UpperCAmelCase , unittest.TestCase ): _UpperCamelCase : List[Any] = StableDiffusionSAGPipeline _UpperCamelCase : str = TEXT_TO_IMAGE_PARAMS _UpperCamelCase : Dict = TEXT_TO_IMAGE_BATCH_PARAMS _UpperCamelCase : str = TEXT_TO_IMAGE_IMAGE_PARAMS _UpperCamelCase : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS _UpperCamelCase : Union[str, Any] = False def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Dict: """simple docstring""" torch.manual_seed(0 ) lowercase__ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) lowercase__ = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=a , set_alpha_to_one=a , ) torch.manual_seed(0 ) lowercase__ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) torch.manual_seed(0 ) lowercase__ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) lowercase__ = CLIPTextModel(a ) lowercase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) lowercase__ = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Optional[int] , a : Any=0 )-> Union[str, Any]: """simple docstring""" if str(a ).startswith('mps' ): lowercase__ = torch.manual_seed(a ) else: lowercase__ = torch.Generator(device=a ).manual_seed(a ) lowercase__ = { 'prompt': '.', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 1.0, 'sag_scale': 1.0, 'output_type': 'numpy', } return inputs def SCREAMING_SNAKE_CASE_ ( self : Any )-> Any: """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class SCREAMING_SNAKE_CASE (unittest.TestCase ): def SCREAMING_SNAKE_CASE_ ( self : str )-> int: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]: """simple docstring""" lowercase__ = StableDiffusionSAGPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' ) lowercase__ = sag_pipe.to(a ) sag_pipe.set_progress_bar_config(disable=a ) lowercase__ = '.' lowercase__ = torch.manual_seed(0 ) lowercase__ = sag_pipe( [prompt] , generator=a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' ) lowercase__ = output.images lowercase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowercase__ = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2 def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]: """simple docstring""" lowercase__ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' ) lowercase__ = sag_pipe.to(a ) sag_pipe.set_progress_bar_config(disable=a ) lowercase__ = '.' lowercase__ = torch.manual_seed(0 ) lowercase__ = sag_pipe( [prompt] , generator=a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' ) lowercase__ = output.images lowercase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowercase__ = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2 def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> List[Any]: """simple docstring""" lowercase__ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' ) lowercase__ = sag_pipe.to(a ) sag_pipe.set_progress_bar_config(disable=a ) lowercase__ = '.' lowercase__ = torch.manual_seed(0 ) lowercase__ = sag_pipe( [prompt] , width=768 , height=512 , generator=a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' , ) lowercase__ = output.images assert image.shape == (1, 512, 768, 3)
45
1
import argparse import torch from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel from transformers.utils import logging logging.set_verbosity_info() def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: # Initialise PyTorch model lowercase__ = FunnelConfig.from_json_file(_SCREAMING_SNAKE_CASE ) print(F"""Building PyTorch model from configuration: {config}""" ) lowercase__ = FunnelBaseModel(_SCREAMING_SNAKE_CASE ) if base_model else FunnelModel(_SCREAMING_SNAKE_CASE ) # Load weights from tf checkpoint load_tf_weights_in_funnel(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Save pytorch-model print(F"""Save PyTorch model to {pytorch_dump_path}""" ) torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help="""The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.""", ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--base_model""", action="""store_true""", help="""Whether you want just the base model (no decoder) or not.""" ) lowercase_ = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model )
45
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { """facebook/deit-base-distilled-patch16-224""": ( """https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json""" ), # See all DeiT models at https://huggingface.co/models?filter=deit } class SCREAMING_SNAKE_CASE (UpperCAmelCase ): _UpperCamelCase : Any = 'deit' def __init__( self : Any , a : Union[str, Any]=768 , a : Optional[Any]=12 , a : Union[str, Any]=12 , a : Optional[int]=3_072 , a : Optional[int]="gelu" , a : Optional[Any]=0.0 , a : List[Any]=0.0 , a : int=0.02 , a : List[str]=1E-1_2 , a : Optional[int]=224 , a : Tuple=16 , a : List[Any]=3 , a : List[str]=True , a : Any=16 , **a : Union[str, Any] , )-> int: """simple docstring""" super().__init__(**a ) lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = intermediate_size lowercase__ = hidden_act lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = initializer_range lowercase__ = layer_norm_eps lowercase__ = image_size lowercase__ = patch_size lowercase__ = num_channels lowercase__ = qkv_bias lowercase__ = encoder_stride class SCREAMING_SNAKE_CASE (UpperCAmelCase ): _UpperCamelCase : List[Any] = version.parse('1.11' ) @property def SCREAMING_SNAKE_CASE_ ( self : int )-> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def SCREAMING_SNAKE_CASE_ ( self : Any )-> float: """simple docstring""" return 1E-4
45
1
from __future__ import annotations import copy import inspect import unittest import numpy as np from transformers import is_tf_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, ) if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class SCREAMING_SNAKE_CASE : def __init__( self : Dict , a : Union[str, Any] , a : int=2 , a : Optional[int]=3 , a : Optional[int]=4 , a : Dict=2 , a : List[Any]=7 , a : Optional[int]=True , a : Dict=True , a : Optional[int]=True , a : List[Any]=True , a : List[Any]=99 , a : Optional[Any]=36 , a : int=2 , a : Optional[int]=4 , a : Tuple=37 , a : Any="gelu" , a : Optional[int]=0.1 , a : Any=0.1 , a : Dict=512 , a : Optional[Any]=16 , a : List[str]=2 , a : List[Any]=0.02 , a : Any=6 , a : Tuple=6 , a : List[Any]=3 , a : Tuple=4 , a : Tuple=None , a : List[str]=1_000 , )-> List[str]: """simple docstring""" lowercase__ = parent lowercase__ = batch_size lowercase__ = num_channels lowercase__ = image_size lowercase__ = patch_size lowercase__ = is_training lowercase__ = use_input_mask lowercase__ = use_token_type_ids lowercase__ = use_labels lowercase__ = vocab_size lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = intermediate_size lowercase__ = hidden_act lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = max_position_embeddings lowercase__ = type_vocab_size lowercase__ = type_sequence_label_size lowercase__ = initializer_range lowercase__ = coordinate_size lowercase__ = shape_size lowercase__ = num_labels lowercase__ = num_choices lowercase__ = scope lowercase__ = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) lowercase__ = text_seq_length lowercase__ = (image_size // patch_size) ** 2 + 1 lowercase__ = self.text_seq_length + self.image_seq_length def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Optional[int]: """simple docstring""" lowercase__ = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) lowercase__ = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) lowercase__ = bbox.numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: lowercase__ = bbox[i, j, 3] lowercase__ = bbox[i, j, 1] lowercase__ = tmp_coordinate if bbox[i, j, 2] < bbox[i, j, 0]: lowercase__ = bbox[i, j, 2] lowercase__ = bbox[i, j, 0] lowercase__ = tmp_coordinate lowercase__ = tf.constant(a ) lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__ = None if self.use_input_mask: lowercase__ = random_attention_mask([self.batch_size, self.text_seq_length] ) lowercase__ = None if self.use_token_type_ids: lowercase__ = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) lowercase__ = None lowercase__ = None if self.use_labels: lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__ = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) lowercase__ = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def SCREAMING_SNAKE_CASE_ ( self : str , a : Optional[Any] , a : Optional[Any] , a : Optional[Any] , a : Optional[Any] , a : Union[str, Any] , a : Any )-> Tuple: """simple docstring""" lowercase__ = TFLayoutLMvaModel(config=a ) # text + image lowercase__ = model(a , pixel_values=a , training=a ) lowercase__ = model( a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , training=a , ) lowercase__ = model(a , bbox=a , pixel_values=a , training=a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only lowercase__ = model(a , training=a ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only lowercase__ = model({'pixel_values': pixel_values} , training=a ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Union[str, Any] , a : List[Any] , a : int , a : str , a : str , a : Any , a : Optional[Any] )-> Tuple: """simple docstring""" lowercase__ = self.num_labels lowercase__ = TFLayoutLMvaForSequenceClassification(config=a ) lowercase__ = model( a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , labels=a , training=a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE_ ( self : int , a : Dict , a : Tuple , a : str , a : List[Any] , a : str , a : Any , a : Union[str, Any] )-> Tuple: """simple docstring""" lowercase__ = self.num_labels lowercase__ = TFLayoutLMvaForTokenClassification(config=a ) lowercase__ = model( a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , labels=a , training=a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE_ ( self : Any , a : List[Any] , a : Tuple , a : Union[str, Any] , a : Tuple , a : List[str] , a : Union[str, Any] , a : Union[str, Any] )-> Union[str, Any]: """simple docstring""" lowercase__ = 2 lowercase__ = TFLayoutLMvaForQuestionAnswering(config=a ) lowercase__ = model( a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , start_positions=a , end_positions=a , training=a , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE_ ( self : Dict )-> List[Any]: """simple docstring""" lowercase__ = self.prepare_config_and_inputs() ((lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__)) = config_and_inputs lowercase__ = { 'input_ids': input_ids, 'bbox': bbox, 'pixel_values': pixel_values, 'token_type_ids': token_type_ids, 'attention_mask': input_mask, } return config, inputs_dict @require_tf class SCREAMING_SNAKE_CASE (UpperCAmelCase , UpperCAmelCase , unittest.TestCase ): _UpperCamelCase : Tuple = ( ( TFLayoutLMvaModel, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, ) if is_tf_available() else () ) _UpperCamelCase : List[Any] = ( {'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel} if is_tf_available() else {} ) _UpperCamelCase : Dict = False _UpperCamelCase : str = False _UpperCamelCase : str = False def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : Optional[int] , a : Optional[int] , a : Union[str, Any] , a : str , a : List[Any] )-> List[Any]: """simple docstring""" return True def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : str , a : Tuple , a : Optional[Any]=False )-> dict: """simple docstring""" lowercase__ = copy.deepcopy(a ) if model_class in get_values(a ): lowercase__ = { k: tf.tile(tf.expand_dims(a , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) ) if isinstance(a , tf.Tensor ) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(a ): lowercase__ = tf.ones(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(a ): lowercase__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) lowercase__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(a ): lowercase__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) elif model_class in get_values(a ): lowercase__ = tf.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa ) return inputs_dict def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Tuple: """simple docstring""" lowercase__ = TFLayoutLMvaModelTester(self ) lowercase__ = ConfigTester(self , config_class=a , hidden_size=37 ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Union[str, Any]: """simple docstring""" self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Union[str, Any]: """simple docstring""" lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ = model_class(a ) if getattr(a , 'hf_compute_loss' , a ): # The number of elements in the loss should be the same as the number of elements in the label lowercase__ = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a ) lowercase__ = prepared_for_class[ sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=a )[0] ] lowercase__ = added_label.shape.as_list()[:1] # Test that model correctly compute the loss with kwargs lowercase__ = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a ) lowercase__ = prepared_for_class.pop('input_ids' ) lowercase__ = model(a , **a )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss when we mask some positions lowercase__ = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a ) lowercase__ = prepared_for_class.pop('input_ids' ) if "labels" in prepared_for_class: lowercase__ = prepared_for_class['labels'].numpy() if len(labels.shape ) > 1 and labels.shape[1] != 1: lowercase__ = -100 lowercase__ = tf.convert_to_tensor(a ) lowercase__ = model(a , **a )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) ) # Test that model correctly compute the loss with a dict lowercase__ = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a ) lowercase__ = model(a )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) # Test that model correctly compute the loss with a tuple lowercase__ = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a ) # Get keys that were added with the _prepare_for_class function lowercase__ = prepared_for_class.keys() - inputs_dict.keys() lowercase__ = inspect.signature(model.call ).parameters lowercase__ = list(signature.keys() ) # Create a dictionary holding the location of the tensors in the tuple lowercase__ = {0: 'input_ids'} for label_key in label_keys: lowercase__ = signature_names.index(a ) lowercase__ = label_key lowercase__ = sorted(tuple_index_mapping.items() ) # Initialize a list with their default values, update the values and convert to a tuple lowercase__ = [] for name in signature_names: if name != "kwargs": list_input.append(signature[name].default ) for index, value in sorted_tuple_index_mapping: lowercase__ = prepared_for_class[value] lowercase__ = tuple(a ) # Send to model lowercase__ = model(tuple_input[:-1] )[0] self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] ) def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Tuple: """simple docstring""" ( ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ) = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(a , a , a , a , a , a ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Union[str, Any]: """simple docstring""" ( ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ) = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowercase__ = type self.model_tester.create_and_check_model(a , a , a , a , a , a ) def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Any: """simple docstring""" ( ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ) = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification( a , a , a , a , a , a , a ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> List[str]: """simple docstring""" ( ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ) = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification( a , a , a , a , a , a , a ) def SCREAMING_SNAKE_CASE_ ( self : int )-> Optional[Any]: """simple docstring""" ( ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ) = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering( a , a , a , a , a , a , a ) @slow def SCREAMING_SNAKE_CASE_ ( self : str )-> int: """simple docstring""" for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ = TFLayoutLMvaModel.from_pretrained(a ) self.assertIsNotNone(a ) def __UpperCamelCase () -> Optional[Any]: lowercase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf class SCREAMING_SNAKE_CASE (unittest.TestCase ): @cached_property def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> List[Any]: """simple docstring""" return LayoutLMvaImageProcessor(apply_ocr=a ) if is_vision_available() else None @slow def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> List[Any]: """simple docstring""" lowercase__ = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ) lowercase__ = self.default_image_processor lowercase__ = prepare_img() lowercase__ = image_processor(images=a , return_tensors='tf' ).pixel_values lowercase__ = tf.constant([[1, 2]] ) lowercase__ = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 ) # forward pass lowercase__ = model(input_ids=a , bbox=a , pixel_values=a , training=a ) # verify the logits lowercase__ = (1, 199, 768) self.assertEqual(outputs.last_hidden_state.shape , a ) lowercase__ = tf.constant( [[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , a , atol=1E-4 ) )
45
import os import zipfile import requests from get_ci_error_statistics import download_artifact, get_artifacts_links def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 ) -> List[Any]: lowercase__ = None if token is not None: lowercase__ = {'Accept': 'application/vnd.github+json', 'Authorization': F"""Bearer {token}"""} # The id of a workflow (not of a workflow run) lowercase__ = '636036' lowercase__ = F"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs""" # On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results url += F"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}""" lowercase__ = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json() return result["workflow_runs"] def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Union[str, Any]: lowercase__ = get_daily_ci_runs(_SCREAMING_SNAKE_CASE ) lowercase__ = None for workflow_run in workflow_runs: if workflow_run["status"] == "completed": lowercase__ = workflow_run['id'] break return workflow_run_id def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: lowercase__ = get_last_daily_ci_runs(_SCREAMING_SNAKE_CASE ) if workflow_run_id is not None: lowercase__ = get_artifacts_links(worflow_run_id=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE ) for artifact_name in artifact_names: if artifact_name in artifacts_links: lowercase__ = artifacts_links[artifact_name] download_artifact( artifact_name=_SCREAMING_SNAKE_CASE , artifact_url=_SCREAMING_SNAKE_CASE , output_dir=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE ) def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict: get_last_daily_ci_artifacts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) lowercase__ = {} for artifact_name in artifact_names: lowercase__ = os.path.join(_SCREAMING_SNAKE_CASE , F"""{artifact_name}.zip""" ) if os.path.isfile(_SCREAMING_SNAKE_CASE ): lowercase__ = {} with zipfile.ZipFile(_SCREAMING_SNAKE_CASE ) as z: for filename in z.namelist(): if not os.path.isdir(_SCREAMING_SNAKE_CASE ): # read the file with z.open(_SCREAMING_SNAKE_CASE ) as f: lowercase__ = f.read().decode('UTF-8' ) return results
45
1
from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { """snap-research/efficientformer-l1-300""": ( """https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json""" ), } class SCREAMING_SNAKE_CASE (UpperCAmelCase ): _UpperCamelCase : Any = 'efficientformer' def __init__( self : Any , a : List[int] = [3, 2, 6, 4] , a : List[int] = [48, 96, 224, 448] , a : List[bool] = [True, True, True, True] , a : int = 448 , a : int = 32 , a : int = 4 , a : int = 7 , a : int = 5 , a : int = 8 , a : int = 4 , a : float = 0.0 , a : int = 16 , a : int = 3 , a : int = 3 , a : int = 3 , a : int = 2 , a : int = 1 , a : float = 0.0 , a : int = 1 , a : bool = True , a : bool = True , a : float = 1E-5 , a : str = "gelu" , a : float = 0.02 , a : float = 1E-1_2 , a : int = 224 , a : float = 1E-0_5 , **a : Tuple , )-> None: """simple docstring""" super().__init__(**a ) lowercase__ = hidden_act lowercase__ = hidden_dropout_prob lowercase__ = hidden_sizes lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = initializer_range lowercase__ = layer_norm_eps lowercase__ = patch_size lowercase__ = num_channels lowercase__ = depths lowercase__ = mlp_expansion_ratio lowercase__ = downsamples lowercase__ = dim lowercase__ = key_dim lowercase__ = attention_ratio lowercase__ = resolution lowercase__ = pool_size lowercase__ = downsample_patch_size lowercase__ = downsample_stride lowercase__ = downsample_pad lowercase__ = drop_path_rate lowercase__ = num_metaad_blocks lowercase__ = distillation lowercase__ = use_layer_scale lowercase__ = layer_scale_init_value lowercase__ = image_size lowercase__ = batch_norm_eps
45
import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device lowercase_ = False class SCREAMING_SNAKE_CASE (unittest.TestCase ): pass @nightly @require_torch_gpu class SCREAMING_SNAKE_CASE (unittest.TestCase ): def SCREAMING_SNAKE_CASE_ ( self : str )-> Optional[int]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Union[str, Any]: """simple docstring""" lowercase__ = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa ) pipe.to(a ) pipe.set_progress_bar_config(disable=a ) lowercase__ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' ) lowercase__ = torch.manual_seed(0 ) lowercase__ = pipe.dual_guided( prompt='first prompt' , image=a , text_to_image_strength=0.75 , generator=a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(a ) lowercase__ = VersatileDiffusionPipeline.from_pretrained(a , torch_dtype=torch.floataa ) pipe.to(a ) pipe.set_progress_bar_config(disable=a ) lowercase__ = generator.manual_seed(0 ) lowercase__ = pipe.dual_guided( prompt='first prompt' , image=a , text_to_image_strength=0.75 , generator=a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass" def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> List[str]: """simple docstring""" lowercase__ = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa ) pipe.to(a ) pipe.set_progress_bar_config(disable=a ) lowercase__ = 'cyberpunk 2077' lowercase__ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' ) lowercase__ = torch.manual_seed(0 ) lowercase__ = pipe.dual_guided( prompt=a , image=a , text_to_image_strength=0.75 , generator=a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images lowercase__ = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) lowercase__ = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 lowercase__ = 'A painting of a squirrel eating a burger ' lowercase__ = torch.manual_seed(0 ) lowercase__ = pipe.text_to_image( prompt=a , generator=a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images lowercase__ = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) lowercase__ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 lowercase__ = pipe.image_variation(a , generator=a , output_type='numpy' ).images lowercase__ = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) lowercase__ = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
45
1
import unittest from transformers import GPTSwaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin lowercase_ = get_tests_dir("""fixtures/test_sentencepiece_with_bytefallback.model""") @require_sentencepiece @require_tokenizers class SCREAMING_SNAKE_CASE (UpperCAmelCase , unittest.TestCase ): _UpperCamelCase : List[Any] = GPTSwaTokenizer _UpperCamelCase : str = False _UpperCamelCase : List[str] = True _UpperCamelCase : List[str] = False def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Optional[int]: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing lowercase__ = GPTSwaTokenizer(a , eos_token='<unk>' , bos_token='<unk>' , pad_token='<unk>' ) tokenizer.save_pretrained(self.tmpdirname ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : str )-> Dict: """simple docstring""" lowercase__ = 'This is a test' lowercase__ = 'This is a test' return input_text, output_text def SCREAMING_SNAKE_CASE_ ( self : str )-> int: """simple docstring""" lowercase__ = '<s>' lowercase__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Dict: """simple docstring""" lowercase__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<unk>' ) self.assertEqual(vocab_keys[1] , '<s>' ) self.assertEqual(vocab_keys[-1] , 'j' ) self.assertEqual(len(a ) , 2_000 ) def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Optional[Any]: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 2_000 ) def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> str: """simple docstring""" lowercase__ = GPTSwaTokenizer(a ) lowercase__ = tokenizer.tokenize('This is a test' ) self.assertListEqual(a , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , [465, 287, 265, 631, 842] ) lowercase__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) # fmt: off self.assertListEqual( a , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] , ) # fmt: on lowercase__ = tokenizer.convert_tokens_to_ids(a ) self.assertListEqual( a , [262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , ) lowercase__ = tokenizer.convert_ids_to_tokens(a ) # fmt: off self.assertListEqual( a , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] ) # fmt: on def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> List[str]: """simple docstring""" lowercase__ = GPTSwaTokenizer(a ) lowercase__ = ['This is a test', 'I was born in 92000, and this is falsé.'] lowercase__ = [ [465, 287, 265, 631, 842], [262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260], ] # Test that encode_fast returns the same as tokenize + convert_tokens_to_ids for text, expected_ids in zip(a , a ): self.assertListEqual(tokenizer.encode_fast(a ) , a ) # Test that decode_fast returns the input text for text, token_ids in zip(a , a ): self.assertEqual(tokenizer.decode_fast(a ) , a ) @slow def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]: """simple docstring""" lowercase__ = [ '<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')', 'Hey there, how are you doing this fine day?', 'This is a text with a trailing spaces followed by a dot .', 'Häj sväjs lillebrör! =)', 'Det är inget fel på Mr. Cool', ] # fmt: off lowercase__ = {'input_ids': [[63_423, 5, 6_811, 14_954, 282, 816, 3_821, 63_466, 63_425, 63_462, 18, 63_978, 678, 301, 1_320, 63_423, 63_455, 63_458, 18, 63_982, 4_246, 3_940, 1_901, 47_789, 5_547, 18_994], [19_630, 1_100, 63_446, 1_342, 633, 544, 4_488, 593, 5_102, 2_416, 63_495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_652, 428, 268, 1_936, 515, 268, 58_593, 22_413, 9_106, 546, 268, 33_213, 63_979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55_130, 63_450, 924, 63_449, 2_249, 4_062, 1_558, 318, 63_504, 21_498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2_827, 2_559, 332, 6_575, 63_443, 26_801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: on self.tokenizer_integration_test_util( expected_encoding=a , model_name='AI-Sweden/gpt-sw3-126m' , sequences=a , )
45
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> bool: if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise ValueError('Input series is not valid, valid series - [2, 4, 6]' ) if len(_SCREAMING_SNAKE_CASE ) == 0: raise ValueError('Input list must be a non empty list' ) if len(_SCREAMING_SNAKE_CASE ) == 1: return True lowercase__ = series[1] - series[0] for index in range(len(_SCREAMING_SNAKE_CASE ) - 1 ): if series[index + 1] - series[index] != common_diff: return False return True def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> float: if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise ValueError('Input series is not valid, valid series - [2, 4, 6]' ) if len(_SCREAMING_SNAKE_CASE ) == 0: raise ValueError('Input list must be a non empty list' ) lowercase__ = 0 for val in series: answer += val return answer / len(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod()
45
1
import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .tokenization_wavaveca import WavaVecaCTCTokenizer class SCREAMING_SNAKE_CASE (UpperCAmelCase ): _UpperCamelCase : Dict = 'Wav2Vec2FeatureExtractor' _UpperCamelCase : Any = 'AutoTokenizer' def __init__( self : str , a : Dict , a : List[str] )-> Tuple: """simple docstring""" super().__init__(a , a ) lowercase__ = self.feature_extractor lowercase__ = False @classmethod def SCREAMING_SNAKE_CASE_ ( cls : int , a : List[Any] , **a : int )-> Dict: """simple docstring""" try: return super().from_pretrained(a , **a ) except OSError: warnings.warn( f"""Loading a tokenizer inside {cls.__name__} from a config that does not""" ' include a `tokenizer_class` attribute is deprecated and will be ' 'removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`' ' attribute to either your `config.json` or `tokenizer_config.json` ' 'file to suppress this warning: ' , a , ) lowercase__ = WavaVecaFeatureExtractor.from_pretrained(a , **a ) lowercase__ = WavaVecaCTCTokenizer.from_pretrained(a , **a ) return cls(feature_extractor=a , tokenizer=a ) def __call__( self : Dict , *a : Optional[Any] , **a : Optional[Any] )-> List[Any]: """simple docstring""" if self._in_target_context_manager: return self.current_processor(*a , **a ) if "raw_speech" in kwargs: warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' ) lowercase__ = kwargs.pop('raw_speech' ) else: lowercase__ = kwargs.pop('audio' , a ) lowercase__ = kwargs.pop('sampling_rate' , a ) lowercase__ = kwargs.pop('text' , a ) if len(a ) > 0: lowercase__ = args[0] lowercase__ = args[1:] if audio is None and text is None: raise ValueError('You need to specify either an `audio` or `text` input to process.' ) if audio is not None: lowercase__ = self.feature_extractor(a , *a , sampling_rate=a , **a ) if text is not None: lowercase__ = self.tokenizer(a , **a ) if text is None: return inputs elif audio is None: return encodings else: lowercase__ = encodings['input_ids'] return inputs def SCREAMING_SNAKE_CASE_ ( self : Any , *a : Tuple , **a : Any )-> Optional[int]: """simple docstring""" if self._in_target_context_manager: return self.current_processor.pad(*a , **a ) lowercase__ = kwargs.pop('input_features' , a ) lowercase__ = kwargs.pop('labels' , a ) if len(a ) > 0: lowercase__ = args[0] lowercase__ = args[1:] if input_features is not None: lowercase__ = self.feature_extractor.pad(a , *a , **a ) if labels is not None: lowercase__ = self.tokenizer.pad(a , **a ) if labels is None: return input_features elif input_features is None: return labels else: lowercase__ = labels['input_ids'] return input_features def SCREAMING_SNAKE_CASE_ ( self : Any , *a : Optional[Any] , **a : Optional[Any] )-> Any: """simple docstring""" return self.tokenizer.batch_decode(*a , **a ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , *a : str , **a : str )-> int: """simple docstring""" return self.tokenizer.decode(*a , **a ) @contextmanager def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Any: """simple docstring""" warnings.warn( '`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ' 'labels by using the argument `text` of the regular `__call__` method (either in the same call as ' 'your audio inputs, or in a separate call.' ) lowercase__ = True lowercase__ = self.tokenizer yield lowercase__ = self.feature_extractor lowercase__ = False
45
from __future__ import annotations import math from collections.abc import Callable def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 100 , ) -> float: lowercase__ = x_start lowercase__ = fnc(_SCREAMING_SNAKE_CASE ) lowercase__ = 0.0 for _ in range(_SCREAMING_SNAKE_CASE ): # Approximates curve as a sequence of linear lines and sums their length lowercase__ = (x_end - x_start) / steps + xa lowercase__ = fnc(_SCREAMING_SNAKE_CASE ) length += math.hypot(xa - xa , fxa - fxa ) # Increment step lowercase__ = xa lowercase__ = fxa return length if __name__ == "__main__": def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str: return math.sin(10 * x ) print("""f(x) = sin(10 * x)""") print("""The length of the curve from x = -10 to x = 10 is:""") lowercase_ = 10 while i <= 100_000: print(f'''With {i} steps: {line_length(f, -10, 10, i)}''') i *= 10
45
1
def __UpperCamelCase () -> List[str]: lowercase__ = 0 for i in range(1 , 1001 ): total += i**i return str(_SCREAMING_SNAKE_CASE )[-10:] if __name__ == "__main__": print(solution())
45
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowercase_ = { """configuration_squeezebert""": [ """SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SqueezeBertConfig""", """SqueezeBertOnnxConfig""", ], """tokenization_squeezebert""": ["""SqueezeBertTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["""SqueezeBertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """SqueezeBertForMaskedLM""", """SqueezeBertForMultipleChoice""", """SqueezeBertForQuestionAnswering""", """SqueezeBertForSequenceClassification""", """SqueezeBertForTokenClassification""", """SqueezeBertModel""", """SqueezeBertModule""", """SqueezeBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_squeezebert import ( SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, SqueezeBertConfig, SqueezeBertOnnxConfig, ) from .tokenization_squeezebert import SqueezeBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_squeezebert import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, SqueezeBertModule, SqueezeBertPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
45
1
import math import unittest def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> bool: assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True class SCREAMING_SNAKE_CASE (unittest.TestCase ): def SCREAMING_SNAKE_CASE_ ( self : int )-> Union[str, Any]: """simple docstring""" self.assertTrue(is_prime(2 ) ) self.assertTrue(is_prime(3 ) ) self.assertTrue(is_prime(5 ) ) self.assertTrue(is_prime(7 ) ) self.assertTrue(is_prime(11 ) ) self.assertTrue(is_prime(13 ) ) self.assertTrue(is_prime(17 ) ) self.assertTrue(is_prime(19 ) ) self.assertTrue(is_prime(23 ) ) self.assertTrue(is_prime(29 ) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> List[str]: """simple docstring""" with self.assertRaises(a ): is_prime(-19 ) self.assertFalse( is_prime(0 ) , 'Zero doesn\'t have any positive factors, primes must have exactly two.' , ) self.assertFalse( is_prime(1 ) , 'One only has 1 positive factor, primes must have exactly two.' , ) self.assertFalse(is_prime(2 * 2 ) ) self.assertFalse(is_prime(2 * 3 ) ) self.assertFalse(is_prime(3 * 3 ) ) self.assertFalse(is_prime(3 * 5 ) ) self.assertFalse(is_prime(3 * 5 * 7 ) ) if __name__ == "__main__": unittest.main()
45
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = 50 ) -> int: lowercase__ = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(f'''{solution() = }''')
45
1
import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict: if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): lowercase__ = np.full((len(_SCREAMING_SNAKE_CASE ), sequence_length, 2) , _SCREAMING_SNAKE_CASE ) else: lowercase__ = np.full((len(_SCREAMING_SNAKE_CASE ), sequence_length) , _SCREAMING_SNAKE_CASE ) for i, tensor in enumerate(_SCREAMING_SNAKE_CASE ): if padding_side == "right": if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): lowercase__ = tensor[:sequence_length] else: lowercase__ = tensor[:sequence_length] else: if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): lowercase__ = tensor[:sequence_length] else: lowercase__ = tensor[:sequence_length] return out_tensor.tolist() def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Union[str, Any]: lowercase__ = ord(_SCREAMING_SNAKE_CASE ) if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126): return True lowercase__ = unicodedata.category(_SCREAMING_SNAKE_CASE ) if cat.startswith('P' ): return True return False @dataclass class SCREAMING_SNAKE_CASE (UpperCAmelCase ): _UpperCamelCase : PreTrainedTokenizerBase _UpperCamelCase : Union[bool, str, PaddingStrategy] = True _UpperCamelCase : Optional[int] = None _UpperCamelCase : Optional[int] = None _UpperCamelCase : int = -1_00 _UpperCamelCase : str = "pt" def SCREAMING_SNAKE_CASE_ ( self : str , a : List[str] )-> str: """simple docstring""" import torch lowercase__ = 'label' if 'label' in features[0].keys() else 'labels' lowercase__ = [feature[label_name] for feature in features] if label_name in features[0].keys() else None lowercase__ = self.tokenizer.pad( a , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' if labels is None else None , ) if labels is None: return batch lowercase__ = torch.tensor(batch['entity_ids'] ).shape[1] lowercase__ = self.tokenizer.padding_side if padding_side == "right": lowercase__ = [ list(a ) + [self.label_pad_token_id] * (sequence_length - len(a )) for label in labels ] else: lowercase__ = [ [self.label_pad_token_id] * (sequence_length - len(a )) + list(a ) for label in labels ] lowercase__ = [feature['ner_tags'] for feature in features] lowercase__ = padding_tensor(a , -1 , a , a ) lowercase__ = [feature['original_entity_spans'] for feature in features] lowercase__ = padding_tensor(a , (-1, -1) , a , a ) lowercase__ = {k: torch.tensor(a , dtype=torch.intaa ) for k, v in batch.items()} return batch
45
import numpy as np import torch import tqdm from ...models.unet_ad import UNetaDModel from ...pipelines import DiffusionPipeline from ...utils import randn_tensor from ...utils.dummy_pt_objects import DDPMScheduler class SCREAMING_SNAKE_CASE (UpperCAmelCase ): def __init__( self : Optional[Any] , a : UNetaDModel , a : UNetaDModel , a : DDPMScheduler , a : Any , )-> Dict: """simple docstring""" super().__init__() lowercase__ = value_function lowercase__ = unet lowercase__ = scheduler lowercase__ = env lowercase__ = env.get_dataset() lowercase__ = {} for key in self.data.keys(): try: lowercase__ = self.data[key].mean() except: # noqa: E722 pass lowercase__ = {} for key in self.data.keys(): try: lowercase__ = self.data[key].std() except: # noqa: E722 pass lowercase__ = env.observation_space.shape[0] lowercase__ = env.action_space.shape[0] def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Any , a : int )-> Dict: """simple docstring""" return (x_in - self.means[key]) / self.stds[key] def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : str , a : List[str] )-> str: """simple docstring""" return x_in * self.stds[key] + self.means[key] def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : Tuple )-> Tuple: """simple docstring""" if type(a ) is dict: return {k: self.to_torch(a ) for k, v in x_in.items()} elif torch.is_tensor(a ): return x_in.to(self.unet.device ) return torch.tensor(a , device=self.unet.device ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Optional[int] , a : Dict , a : Optional[Any] )-> List[Any]: """simple docstring""" for key, val in cond.items(): lowercase__ = val.clone() return x_in def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Optional[Any] , a : Any , a : Optional[Any] , a : Optional[int] )-> List[Any]: """simple docstring""" lowercase__ = x.shape[0] lowercase__ = None for i in tqdm.tqdm(self.scheduler.timesteps ): # create batch of timesteps to pass into model lowercase__ = torch.full((batch_size,) , a , device=self.unet.device , dtype=torch.long ) for _ in range(a ): with torch.enable_grad(): x.requires_grad_() # permute to match dimension for pre-trained models lowercase__ = self.value_function(x.permute(0 , 2 , 1 ) , a ).sample lowercase__ = torch.autograd.grad([y.sum()] , [x] )[0] lowercase__ = self.scheduler._get_variance(a ) lowercase__ = torch.exp(0.5 * posterior_variance ) lowercase__ = model_std * grad lowercase__ = 0 lowercase__ = x.detach() lowercase__ = x + scale * grad lowercase__ = self.reset_xa(a , a , self.action_dim ) lowercase__ = self.unet(x.permute(0 , 2 , 1 ) , a ).sample.permute(0 , 2 , 1 ) # TODO: verify deprecation of this kwarg lowercase__ = self.scheduler.step(a , a , a , predict_epsilon=a )['prev_sample'] # apply conditions to the trajectory (set the initial state) lowercase__ = self.reset_xa(a , a , self.action_dim ) lowercase__ = self.to_torch(a ) return x, y def __call__( self : Any , a : Tuple , a : int=64 , a : Tuple=32 , a : List[Any]=2 , a : List[str]=0.1 )-> List[Any]: """simple docstring""" lowercase__ = self.normalize(a , 'observations' ) lowercase__ = obs[None].repeat(a , axis=0 ) lowercase__ = {0: self.to_torch(a )} lowercase__ = (batch_size, planning_horizon, self.state_dim + self.action_dim) # generate initial noise and apply our conditions (to make the trajectories start at current state) lowercase__ = randn_tensor(a , device=self.unet.device ) lowercase__ = self.reset_xa(a , a , self.action_dim ) lowercase__ = self.to_torch(a ) # run the diffusion process lowercase__ , lowercase__ = self.run_diffusion(a , a , a , a ) # sort output trajectories by value lowercase__ = y.argsort(0 , descending=a ).squeeze() lowercase__ = x[sorted_idx] lowercase__ = sorted_values[:, :, : self.action_dim] lowercase__ = actions.detach().cpu().numpy() lowercase__ = self.de_normalize(a , key='actions' ) # select the action with the highest value if y is not None: lowercase__ = 0 else: # if we didn't run value guiding, select a random action lowercase__ = np.random.randint(0 , a ) lowercase__ = denorm_actions[selected_index, 0] return denorm_actions
45
1
import asyncio import os import re import sys import tempfile import unittest from contextlib import contextmanager from copy import deepcopy from distutils.util import strtobool from enum import Enum from importlib.util import find_spec from pathlib import Path from unittest.mock import patch import pyarrow as pa import pytest import requests from packaging import version from datasets import config if config.PY_VERSION < version.parse("""3.8"""): import importlib_metadata else: import importlib.metadata as importlib_metadata def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> int: try: lowercase__ = os.environ[key] except KeyError: # KEY isn't set, default to `default`. lowercase__ = default else: # KEY is set, convert it to True or False. try: lowercase__ = strtobool(_SCREAMING_SNAKE_CASE ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(F"""If set, {key} must be yes or no.""" ) return _value lowercase_ = parse_flag_from_env("""RUN_SLOW""", default=False) lowercase_ = parse_flag_from_env("""RUN_REMOTE""", default=False) lowercase_ = parse_flag_from_env("""RUN_LOCAL""", default=True) lowercase_ = parse_flag_from_env("""RUN_PACKAGED""", default=True) # Compression lowercase_ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="""test requires lz4""") lowercase_ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="""test requires py7zr""") lowercase_ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="""test requires zstandard""") # Audio lowercase_ = pytest.mark.skipif( # On Windows and OS X, soundfile installs sndfile find_spec("""soundfile""") is None or version.parse(importlib_metadata.version("""soundfile""")) < version.parse("""0.12.0"""), reason="""test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; """, ) # Beam lowercase_ = pytest.mark.skipif( not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("""0.3.2"""), reason="""test requires apache-beam and a compatible dill version""", ) # Dill-cloudpickle compatibility lowercase_ = pytest.mark.skipif( config.DILL_VERSION <= version.parse("""0.3.2"""), reason="""test requires dill>0.3.2 for cloudpickle compatibility""", ) # Windows lowercase_ = pytest.mark.skipif( sys.platform == """win32""", reason="""test should not be run on Windows""", ) def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[Any]: try: import faiss # noqa except ImportError: lowercase__ = unittest.skip('test requires faiss' )(_SCREAMING_SNAKE_CASE ) return test_case def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[str]: try: import regex # noqa except ImportError: lowercase__ = unittest.skip('test requires regex' )(_SCREAMING_SNAKE_CASE ) return test_case def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str: try: import elasticsearch # noqa except ImportError: lowercase__ = unittest.skip('test requires elasticsearch' )(_SCREAMING_SNAKE_CASE ) return test_case def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Dict: try: import sqlalchemy # noqa except ImportError: lowercase__ = unittest.skip('test requires sqlalchemy' )(_SCREAMING_SNAKE_CASE ) return test_case def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Optional[int]: if not config.TORCH_AVAILABLE: lowercase__ = unittest.skip('test requires PyTorch' )(_SCREAMING_SNAKE_CASE ) return test_case def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Dict: if not config.TF_AVAILABLE: lowercase__ = unittest.skip('test requires TensorFlow' )(_SCREAMING_SNAKE_CASE ) return test_case def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Optional[int]: if not config.JAX_AVAILABLE: lowercase__ = unittest.skip('test requires JAX' )(_SCREAMING_SNAKE_CASE ) return test_case def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str: if not config.PIL_AVAILABLE: lowercase__ = unittest.skip('test requires Pillow' )(_SCREAMING_SNAKE_CASE ) return test_case def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> int: try: import transformers # noqa F401 except ImportError: return unittest.skip('test requires transformers' )(_SCREAMING_SNAKE_CASE ) else: return test_case def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[str]: try: import tiktoken # noqa F401 except ImportError: return unittest.skip('test requires tiktoken' )(_SCREAMING_SNAKE_CASE ) else: return test_case def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Any: try: import spacy # noqa F401 except ImportError: return unittest.skip('test requires spacy' )(_SCREAMING_SNAKE_CASE ) else: return test_case def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Dict: def _require_spacy_model(_SCREAMING_SNAKE_CASE ): try: import spacy # noqa F401 spacy.load(_SCREAMING_SNAKE_CASE ) except ImportError: return unittest.skip('test requires spacy' )(_SCREAMING_SNAKE_CASE ) except OSError: return unittest.skip('test requires spacy model \'{}\''.format(_SCREAMING_SNAKE_CASE ) )(_SCREAMING_SNAKE_CASE ) else: return test_case return _require_spacy_model def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[Any]: try: import pyspark # noqa F401 except ImportError: return unittest.skip('test requires pyspark' )(_SCREAMING_SNAKE_CASE ) else: return test_case def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> int: try: import joblibspark # noqa F401 except ImportError: return unittest.skip('test requires joblibspark' )(_SCREAMING_SNAKE_CASE ) else: return test_case def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Optional[Any]: if not _run_slow_tests or _run_slow_tests == 0: lowercase__ = unittest.skip('test is slow' )(_SCREAMING_SNAKE_CASE ) return test_case def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Tuple: if not _run_local_tests or _run_local_tests == 0: lowercase__ = unittest.skip('test is local' )(_SCREAMING_SNAKE_CASE ) return test_case def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[str]: if not _run_packaged_tests or _run_packaged_tests == 0: lowercase__ = unittest.skip('test is packaged' )(_SCREAMING_SNAKE_CASE ) return test_case def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Optional[int]: if not _run_remote_tests or _run_remote_tests == 0: lowercase__ = unittest.skip('test requires remote' )(_SCREAMING_SNAKE_CASE ) return test_case def __UpperCamelCase (*_SCREAMING_SNAKE_CASE ) -> Tuple: def decorate(cls ): for name, fn in cls.__dict__.items(): if callable(_SCREAMING_SNAKE_CASE ) and name.startswith('test' ): for decorator in decorators: lowercase__ = decorator(_SCREAMING_SNAKE_CASE ) setattr(cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return cls return decorate class SCREAMING_SNAKE_CASE (UpperCAmelCase ): pass class SCREAMING_SNAKE_CASE (UpperCAmelCase ): _UpperCamelCase : List[Any] = 0 _UpperCamelCase : Any = 1 _UpperCamelCase : Optional[Any] = 2 @contextmanager def __UpperCamelCase (_SCREAMING_SNAKE_CASE=OfflineSimulationMode.CONNECTION_FAILS , _SCREAMING_SNAKE_CASE=1E-16 ) -> Tuple: lowercase__ = requests.Session().request def timeout_request(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): # Change the url to an invalid url so that the connection hangs lowercase__ = 'https://10.255.255.1' if kwargs.get('timeout' ) is None: raise RequestWouldHangIndefinitelyError( F"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" ) lowercase__ = timeout try: return online_request(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) except Exception as e: # The following changes in the error are just here to make the offline timeout error prettier lowercase__ = url lowercase__ = e.args[0] lowercase__ = (max_retry_error.args[0].replace('10.255.255.1' , F"""OfflineMock[{url}]""" ),) lowercase__ = (max_retry_error,) raise def raise_connection_error(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): raise requests.ConnectionError('Offline mode is enabled.' , request=_SCREAMING_SNAKE_CASE ) if mode is OfflineSimulationMode.CONNECTION_FAILS: with patch('requests.Session.send' , _SCREAMING_SNAKE_CASE ): yield elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT: # inspired from https://stackoverflow.com/a/904609 with patch('requests.Session.request' , _SCREAMING_SNAKE_CASE ): yield elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: with patch('datasets.config.HF_DATASETS_OFFLINE' , _SCREAMING_SNAKE_CASE ): yield else: raise ValueError('Please use a value from the OfflineSimulationMode enum.' ) @contextmanager def __UpperCamelCase (*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Dict: lowercase__ = str(Path().resolve() ) with tempfile.TemporaryDirectory(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) as tmp_dir: try: os.chdir(_SCREAMING_SNAKE_CASE ) yield finally: os.chdir(_SCREAMING_SNAKE_CASE ) @contextmanager def __UpperCamelCase () -> str: import gc gc.collect() lowercase__ = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase." @contextmanager def __UpperCamelCase () -> Tuple: import gc gc.collect() lowercase__ = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase." def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict: return deepcopy(_SCREAMING_SNAKE_CASE ).integers(0 , 100 , 10 ).tolist() == deepcopy(_SCREAMING_SNAKE_CASE ).integers(0 , 100 , 10 ).tolist() def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[str]: import decorator from requests.exceptions import HTTPError def _wrapper(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): try: return func(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) except HTTPError as err: if str(_SCREAMING_SNAKE_CASE ).startswith('500' ) or str(_SCREAMING_SNAKE_CASE ).startswith('502' ): pytest.xfail(str(_SCREAMING_SNAKE_CASE ) ) raise err return decorator.decorator(_wrapper , _SCREAMING_SNAKE_CASE ) class SCREAMING_SNAKE_CASE : def __init__( self : List[str] , a : List[Any] , a : Any , a : Optional[int] )-> List[str]: """simple docstring""" lowercase__ = returncode lowercase__ = stdout lowercase__ = stderr async def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]: while True: lowercase__ = await stream.readline() if line: callback(_SCREAMING_SNAKE_CASE ) else: break async def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False ) -> _RunOutput: if echo: print('\nRunning: ' , ' '.join(_SCREAMING_SNAKE_CASE ) ) lowercase__ = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=_SCREAMING_SNAKE_CASE , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_SCREAMING_SNAKE_CASE , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) lowercase__ = [] lowercase__ = [] def tee(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="" ): lowercase__ = line.decode('utf-8' ).rstrip() sink.append(_SCREAMING_SNAKE_CASE ) if not quiet: print(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , file=_SCREAMING_SNAKE_CASE ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ _read_stream(p.stdout , lambda _SCREAMING_SNAKE_CASE : tee(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , sys.stdout , label='stdout:' ) ), _read_stream(p.stderr , lambda _SCREAMING_SNAKE_CASE : tee(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , sys.stderr , label='stderr:' ) ), ] , timeout=_SCREAMING_SNAKE_CASE , ) return _RunOutput(await p.wait() , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=180 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True ) -> _RunOutput: lowercase__ = asyncio.get_event_loop() lowercase__ = loop.run_until_complete( _stream_subprocess(_SCREAMING_SNAKE_CASE , env=_SCREAMING_SNAKE_CASE , stdin=_SCREAMING_SNAKE_CASE , timeout=_SCREAMING_SNAKE_CASE , quiet=_SCREAMING_SNAKE_CASE , echo=_SCREAMING_SNAKE_CASE ) ) lowercase__ = ' '.join(_SCREAMING_SNAKE_CASE ) if result.returncode > 0: lowercase__ = '\n'.join(result.stderr ) raise RuntimeError( F"""'{cmd_str}' failed with returncode {result.returncode}\n\n""" F"""The combined stderr from workers follows:\n{stderr}""" ) # check that the subprocess actually did run and produced some output, should the test rely on # the remote side to do the testing if not result.stdout and not result.stderr: raise RuntimeError(F"""'{cmd_str}' produced no output.""" ) return result def __UpperCamelCase () -> List[str]: lowercase__ = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' ) lowercase__ = re.sub(R'^gw' , '' , _SCREAMING_SNAKE_CASE , 0 , re.M ) return int(_SCREAMING_SNAKE_CASE ) def __UpperCamelCase () -> Optional[int]: lowercase__ = 29500 lowercase__ = pytest_xdist_worker_id() return port + uniq_delta
45
from PIL import Image def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Image: def brightness(_SCREAMING_SNAKE_CASE ) -> float: return 128 + level + (c - 128) if not -2_5_5.0 <= level <= 2_5_5.0: raise ValueError('level must be between -255.0 (black) and 255.0 (white)' ) return img.point(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": # Load image with Image.open("""image_data/lena.jpg""") as img: # Change brightness to 100 lowercase_ = change_brightness(img, 100) brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
45
1
import argparse import os import re import tensorflow as tf import torch from transformers import BertConfig, BertModel from transformers.utils import logging logging.set_verbosity_info() lowercase_ = logging.get_logger(__name__) def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: lowercase__ = os.path.abspath(_SCREAMING_SNAKE_CASE ) logger.info(F"""Converting TensorFlow checkpoint from {tf_path}""" ) # Load weights from TF model lowercase__ = tf.train.list_variables(_SCREAMING_SNAKE_CASE ) lowercase__ = [] lowercase__ = [] lowercase__ = [] for full_name, shape in init_vars: # logger.info(f"Loading TF weight {name} with shape {shape}") lowercase__ = full_name.split('/' ) if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]: logger.info(F"""Skipping non-model layer {full_name}""" ) continue if "optimizer" in full_name: logger.info(F"""Skipping optimization layer {full_name}""" ) continue if name[0] == "model": # ignore initial 'model' lowercase__ = name[1:] # figure out how many levels deep the name is lowercase__ = 0 for _name in name: if _name.startswith('layer_with_weights' ): depth += 1 else: break layer_depth.append(_SCREAMING_SNAKE_CASE ) # read data lowercase__ = tf.train.load_variable(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) names.append('/'.join(_SCREAMING_SNAKE_CASE ) ) arrays.append(_SCREAMING_SNAKE_CASE ) logger.info(F"""Read a total of {len(_SCREAMING_SNAKE_CASE ):,} layers""" ) # Sanity check if len(set(_SCREAMING_SNAKE_CASE ) ) != 1: raise ValueError(F"""Found layer names with different depths (layer depth {list(set(_SCREAMING_SNAKE_CASE ) )})""" ) lowercase__ = list(set(_SCREAMING_SNAKE_CASE ) )[0] if layer_depth != 1: raise ValueError( 'The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP' ' heads.' ) # convert layers logger.info('Converting weights...' ) for full_name, array in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): lowercase__ = full_name.split('/' ) lowercase__ = model lowercase__ = [] for i, m_name in enumerate(_SCREAMING_SNAKE_CASE ): if m_name == ".ATTRIBUTES": # variable names end with .ATTRIBUTES/VARIABLE_VALUE break if m_name.startswith('layer_with_weights' ): lowercase__ = int(m_name.split('-' )[-1] ) if layer_num <= 2: # embedding layers # layer_num 0: word_embeddings # layer_num 1: position_embeddings # layer_num 2: token_type_embeddings continue elif layer_num == 3: # embedding LayerNorm trace.extend(['embeddings', 'LayerNorm'] ) lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'embeddings' ) lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'LayerNorm' ) elif layer_num > 3 and layer_num < config.num_hidden_layers + 4: # encoder layers trace.extend(['encoder', 'layer', str(layer_num - 4 )] ) lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'encoder' ) lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'layer' ) lowercase__ = pointer[layer_num - 4] elif layer_num == config.num_hidden_layers + 4: # pooler layer trace.extend(['pooler', 'dense'] ) lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'pooler' ) lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'dense' ) elif m_name == "embeddings": trace.append('embeddings' ) lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'embeddings' ) if layer_num == 0: trace.append('word_embeddings' ) lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'word_embeddings' ) elif layer_num == 1: trace.append('position_embeddings' ) lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'position_embeddings' ) elif layer_num == 2: trace.append('token_type_embeddings' ) lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'token_type_embeddings' ) else: raise ValueError(F"""Unknown embedding layer with name {full_name}""" ) trace.append('weight' ) lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'weight' ) elif m_name == "_attention_layer": # self-attention layer trace.extend(['attention', 'self'] ) lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'attention' ) lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'self' ) elif m_name == "_attention_layer_norm": # output attention norm trace.extend(['attention', 'output', 'LayerNorm'] ) lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'attention' ) lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'output' ) lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'LayerNorm' ) elif m_name == "_attention_output_dense": # output attention dense trace.extend(['attention', 'output', 'dense'] ) lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'attention' ) lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'output' ) lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'dense' ) elif m_name == "_output_dense": # output dense trace.extend(['output', 'dense'] ) lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'output' ) lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'dense' ) elif m_name == "_output_layer_norm": # output dense trace.extend(['output', 'LayerNorm'] ) lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'output' ) lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'LayerNorm' ) elif m_name == "_key_dense": # attention key trace.append('key' ) lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'key' ) elif m_name == "_query_dense": # attention query trace.append('query' ) lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'query' ) elif m_name == "_value_dense": # attention value trace.append('value' ) lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'value' ) elif m_name == "_intermediate_dense": # attention intermediate dense trace.extend(['intermediate', 'dense'] ) lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'intermediate' ) lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'dense' ) elif m_name == "_output_layer_norm": # output layer norm trace.append('output' ) lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'output' ) # weights & biases elif m_name in ["bias", "beta"]: trace.append('bias' ) lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'bias' ) elif m_name in ["kernel", "gamma"]: trace.append('weight' ) lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'weight' ) else: logger.warning(F"""Ignored {m_name}""" ) # for certain layers reshape is necessary lowercase__ = '.'.join(_SCREAMING_SNAKE_CASE ) if re.match(R'(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)' , _SCREAMING_SNAKE_CASE ) or re.match( R'(\S+)\.attention\.output\.dense\.weight' , _SCREAMING_SNAKE_CASE ): lowercase__ = array.reshape(pointer.data.shape ) if "kernel" in full_name: lowercase__ = array.transpose() if pointer.shape == array.shape: lowercase__ = torch.from_numpy(_SCREAMING_SNAKE_CASE ) else: raise ValueError( F"""Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:""" F""" {array.shape}""" ) logger.info(F"""Successfully set variable {full_name} to PyTorch layer {trace}""" ) return model def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict: # Instantiate model logger.info(F"""Loading model based on config from {config_path}...""" ) lowercase__ = BertConfig.from_json_file(_SCREAMING_SNAKE_CASE ) lowercase__ = BertModel(_SCREAMING_SNAKE_CASE ) # Load weights from checkpoint logger.info(F"""Loading weights from checkpoint {tf_checkpoint_path}...""" ) load_tfa_weights_in_bert(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Save pytorch-model logger.info(F"""Saving PyTorch model to {pytorch_dump_path}...""" ) torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument( """--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow 2.x checkpoint path.""" ) parser.add_argument( """--bert_config_file""", type=str, required=True, help="""The config json file corresponding to the BERT model. This specifies the model architecture.""", ) parser.add_argument( """--pytorch_dump_path""", type=str, required=True, help="""Path to the output PyTorch model (must include filename).""", ) lowercase_ = parser.parse_args() convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
45
import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class SCREAMING_SNAKE_CASE (unittest.TestCase ): def __init__( self : Any , a : str , a : List[Any]=7 , a : int=3 , a : int=18 , a : Optional[Any]=30 , a : Optional[int]=400 , a : int=True , a : Tuple=None , a : Optional[Any]=True , a : str=False , a : str=True , a : int=True , a : Tuple=[0.5, 0.5, 0.5] , a : Any=[0.5, 0.5, 0.5] , )-> Optional[int]: """simple docstring""" lowercase__ = parent lowercase__ = batch_size lowercase__ = num_channels lowercase__ = image_size lowercase__ = min_resolution lowercase__ = max_resolution lowercase__ = do_resize lowercase__ = size if size is not None else {'height': 18, 'width': 20} lowercase__ = do_thumbnail lowercase__ = do_align_axis lowercase__ = do_pad lowercase__ = do_normalize lowercase__ = image_mean lowercase__ = image_std def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Tuple: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class SCREAMING_SNAKE_CASE (UpperCAmelCase , unittest.TestCase ): _UpperCamelCase : Optional[Any] = DonutImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE_ ( self : int )-> List[Any]: """simple docstring""" lowercase__ = DonutImageProcessingTester(self ) @property def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Optional[int]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE_ ( self : Any )-> int: """simple docstring""" lowercase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(a , 'do_resize' ) ) self.assertTrue(hasattr(a , 'size' ) ) self.assertTrue(hasattr(a , 'do_thumbnail' ) ) self.assertTrue(hasattr(a , 'do_align_long_axis' ) ) self.assertTrue(hasattr(a , 'do_pad' ) ) self.assertTrue(hasattr(a , 'do_normalize' ) ) self.assertTrue(hasattr(a , 'image_mean' ) ) self.assertTrue(hasattr(a , 'image_std' ) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Dict: """simple docstring""" lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'height': 18, 'width': 20} ) lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'height': 42, 'width': 42} ) # Previous config had dimensions in (width, height) order lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) ) self.assertEqual(image_processor.size , {'height': 84, 'width': 42} ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Dict: """simple docstring""" pass @is_flaky() def SCREAMING_SNAKE_CASE_ ( self : str )-> Optional[int]: """simple docstring""" lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a ) for image in image_inputs: self.assertIsInstance(a , Image.Image ) # Test not batched input lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched lowercase__ = image_processing(a , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) @is_flaky() def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Tuple: """simple docstring""" lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a ) for image in image_inputs: self.assertIsInstance(a , np.ndarray ) # Test not batched input lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched lowercase__ = image_processing(a , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) @is_flaky() def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Dict: """simple docstring""" lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a ) for image in image_inputs: self.assertIsInstance(a , torch.Tensor ) # Test not batched input lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched lowercase__ = image_processing(a , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , )
45
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase_ = { """configuration_x_clip""": [ """XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XCLIPConfig""", """XCLIPTextConfig""", """XCLIPVisionConfig""", ], """processing_x_clip""": ["""XCLIPProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """XCLIPModel""", """XCLIPPreTrainedModel""", """XCLIPTextModel""", """XCLIPVisionModel""", ] if TYPE_CHECKING: from .configuration_x_clip import ( XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, XCLIPConfig, XCLIPTextConfig, XCLIPVisionConfig, ) from .processing_x_clip import XCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_x_clip import ( XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, XCLIPModel, XCLIPPreTrainedModel, XCLIPTextModel, XCLIPVisionModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
45
import math def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]: if 0 not in (x, y): # We use the relation x^y = y*log10(x), where 10 is the base. return y * math.logaa(_SCREAMING_SNAKE_CASE ) else: if x == 0: # 0 raised to any number is 0 return 0 elif y == 0: return 1 # any number raised to 0 is 1 raise AssertionError('This should never happen' ) if __name__ == "__main__": # Main function # Read two numbers from input and typecast them to int using map function. # Here x is the base and y is the power. lowercase_ = """Enter the base and the power separated by a comma: """ lowercase_ , lowercase_ = map(int, input(prompt).split(""",""")) lowercase_ , lowercase_ = map(int, input(prompt).split(""",""")) # We find the log of each number, using the function res(), which takes two # arguments. lowercase_ = res(xa, ya) lowercase_ = res(xa, ya) # We check for the largest number if resa > resa: print("""Largest number is""", xa, """^""", ya) elif resa > resa: print("""Largest number is""", xa, """^""", ya) else: print("""Both are equal""")
45
1
import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('keep_in_memory' , [False, True] ) def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: lowercase__ = tmp_path / 'cache' lowercase__ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowercase__ = JsonDatasetReader(_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , keep_in_memory=_SCREAMING_SNAKE_CASE ).read() _check_json_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize( 'features' , [ None, {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}, {'col_1': 'string', 'col_2': 'string', 'col_3': 'string'}, {'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'}, {'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'}, ] , ) def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]: lowercase__ = tmp_path / 'cache' lowercase__ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} lowercase__ = features.copy() if features else default_expected_features lowercase__ = ( Features({feature: Value(_SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None ) lowercase__ = JsonDatasetReader(_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read() _check_json_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize( 'features' , [ None, {'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'}, ] , ) def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]: lowercase__ = tmp_path / 'cache' lowercase__ = {'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'} lowercase__ = features.copy() if features else default_expected_features lowercase__ = ( Features({feature: Value(_SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None ) lowercase__ = JsonDatasetReader(_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read() assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: # jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"} lowercase__ = {'col_2': 'int64', 'col_3': 'float64', 'col_1': 'string'} lowercase__ = features.copy() lowercase__ = ( Features({feature: Value(_SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None ) lowercase__ = tmp_path / 'cache' lowercase__ = JsonDatasetReader(_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read() assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] ) def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]: lowercase__ = tmp_path / 'cache' lowercase__ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} lowercase__ = JsonDatasetReader(_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , split=_SCREAMING_SNAKE_CASE ).read() _check_json_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('path_type' , [str, list] ) def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]: if issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): lowercase__ = jsonl_path elif issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): lowercase__ = [jsonl_path] lowercase__ = tmp_path / 'cache' lowercase__ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} lowercase__ = JsonDatasetReader(_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read() _check_json_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=("train",) ) -> int: assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for split in splits: lowercase__ = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('keep_in_memory' , [False, True] ) def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]: lowercase__ = tmp_path / 'cache' lowercase__ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowercase__ = JsonDatasetReader({'train': jsonl_path} , cache_dir=_SCREAMING_SNAKE_CASE , keep_in_memory=_SCREAMING_SNAKE_CASE ).read() _check_json_datasetdict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize( 'features' , [ None, {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}, {'col_1': 'string', 'col_2': 'string', 'col_3': 'string'}, {'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'}, {'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'}, ] , ) def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any: lowercase__ = tmp_path / 'cache' lowercase__ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} lowercase__ = features.copy() if features else default_expected_features lowercase__ = ( Features({feature: Value(_SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None ) lowercase__ = JsonDatasetReader({'train': jsonl_path} , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read() _check_json_datasetdict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] ) def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: if split: lowercase__ = {split: jsonl_path} else: lowercase__ = 'train' lowercase__ = {'train': jsonl_path, 'test': jsonl_path} lowercase__ = tmp_path / 'cache' lowercase__ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} lowercase__ = JsonDatasetReader(_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read() _check_json_datasetdict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str: return json.load(_SCREAMING_SNAKE_CASE ) def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[Any]: return [json.loads(_SCREAMING_SNAKE_CASE ) for line in buffer] class SCREAMING_SNAKE_CASE : @pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] ) def SCREAMING_SNAKE_CASE_ ( self : Any , a : int , a : Optional[int] , a : Tuple )-> Optional[int]: """simple docstring""" with io.BytesIO() as buffer: JsonDatasetWriter(a , a , lines=a ).write() buffer.seek(0 ) lowercase__ = load_json_function(a ) assert isinstance(a , a ) assert isinstance(exported_content[0] , a ) assert len(a ) == 10 @pytest.mark.parametrize( 'orient, container, keys, len_at' , [ ('records', list, {'tokens', 'labels', 'answers', 'id'}, None), ('split', dict, {'columns', 'data'}, 'data'), ('index', dict, set('0123456789' ), None), ('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'), ('values', list, None, None), ('table', dict, {'schema', 'data'}, 'data'), ] , ) def SCREAMING_SNAKE_CASE_ ( self : int , a : Optional[int] , a : str , a : Dict , a : Any , a : Dict )-> List[str]: """simple docstring""" with io.BytesIO() as buffer: JsonDatasetWriter(a , a , lines=a , orient=a ).write() buffer.seek(0 ) lowercase__ = load_json(a ) assert isinstance(a , a ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(a , 'keys' ) and not hasattr(exported_content[0] , 'keys' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(a ) == 10 @pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] ) def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : List[Any] , a : List[Any] , a : str )-> Any: """simple docstring""" with io.BytesIO() as buffer: JsonDatasetWriter(a , a , lines=a , num_proc=2 ).write() buffer.seek(0 ) lowercase__ = load_json_function(a ) assert isinstance(a , a ) assert isinstance(exported_content[0] , a ) assert len(a ) == 10 @pytest.mark.parametrize( 'orient, container, keys, len_at' , [ ('records', list, {'tokens', 'labels', 'answers', 'id'}, None), ('split', dict, {'columns', 'data'}, 'data'), ('index', dict, set('0123456789' ), None), ('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'), ('values', list, None, None), ('table', dict, {'schema', 'data'}, 'data'), ] , ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Tuple , a : Dict , a : Any , a : str , a : str )-> List[str]: """simple docstring""" with io.BytesIO() as buffer: JsonDatasetWriter(a , a , lines=a , orient=a , num_proc=2 ).write() buffer.seek(0 ) lowercase__ = load_json(a ) assert isinstance(a , a ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(a , 'keys' ) and not hasattr(exported_content[0] , 'keys' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(a ) == 10 def SCREAMING_SNAKE_CASE_ ( self : int , a : int )-> List[str]: """simple docstring""" with pytest.raises(a ): with io.BytesIO() as buffer: JsonDatasetWriter(a , a , num_proc=0 ) @pytest.mark.parametrize('compression, extension' , [('gzip', 'gz'), ('bz2', 'bz2'), ('xz', 'xz')] ) def SCREAMING_SNAKE_CASE_ ( self : str , a : List[str] , a : int , a : List[str] , a : Dict , a : Union[str, Any] )-> Dict: """simple docstring""" lowercase__ = tmp_path_factory.mktemp('data' ) / f"""test.json.{extension}""" lowercase__ = str(shared_datadir / f"""test_file.json.{extension}""" ) JsonDatasetWriter(a , a , compression=a ).write() with fsspec.open(a , 'rb' , compression='infer' ) as f: lowercase__ = f.read() with fsspec.open(a , 'rb' , compression='infer' ) as f: lowercase__ = f.read() assert exported_content == original_content
45
class SCREAMING_SNAKE_CASE : # Public class to implement a graph def __init__( self : int , a : int , a : int , a : list[list[bool]] )-> None: """simple docstring""" lowercase__ = row lowercase__ = col lowercase__ = graph def SCREAMING_SNAKE_CASE_ ( self : Dict , a : int , a : int , a : list[list[bool]] )-> bool: """simple docstring""" return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : int , a : int , a : list[list[bool]] )-> None: """simple docstring""" lowercase__ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order lowercase__ = [-1, 0, 1, -1, 1, -1, 0, 1] lowercase__ = True # Make those cells visited for k in range(8 ): if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , a ): self.diffs(i + row_nbr[k] , j + col_nbr[k] , a ) def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> int: # And finally, count all islands. """simple docstring""" lowercase__ = [[False for j in range(self.COL )] for i in range(self.ROW )] lowercase__ = 0 for i in range(self.ROW ): for j in range(self.COL ): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(a , a , a ) count += 1 return count
45
1
import argparse import dataclasses import json import logging import os import shutil from typing import List, Optional import datasets from accelerate import Accelerator from datasets import load_dataset from finetuning import finetune from tqdm.auto import tqdm import transformers from transformers import AutoConfig, set_seed from transformers.trainer_utils import IntervalStrategy lowercase_ = logging.getLogger(__name__) lowercase_ = """pytorch_model.bin""" @dataclasses.dataclass class SCREAMING_SNAKE_CASE : _UpperCamelCase : str = dataclasses.field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models.'} ) _UpperCamelCase : Optional[str] = dataclasses.field( default=UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co.'} , ) @dataclasses.dataclass class SCREAMING_SNAKE_CASE : _UpperCamelCase : str = dataclasses.field(metadata={'help': 'A csv or a json file containing the training data.'} ) _UpperCamelCase : str = dataclasses.field(metadata={'help': 'A csv or a json file containing the data to predict on.'} ) _UpperCamelCase : Optional[str] = dataclasses.field( default=UpperCAmelCase , metadata={'help': 'A csv or a json file containing the validation data.'} ) _UpperCamelCase : Optional[str] = dataclasses.field( default=UpperCAmelCase , metadata={'help': 'The name of the task to train on.'} , ) _UpperCamelCase : Optional[List[str]] = dataclasses.field( default=UpperCAmelCase , metadata={'help': 'The list of labels for the task.'} ) @dataclasses.dataclass class SCREAMING_SNAKE_CASE : _UpperCamelCase : str = dataclasses.field( metadata={'help': 'The output directory where the model predictions and checkpoints will be written.'} ) _UpperCamelCase : Optional[str] = dataclasses.field( default='accuracy' , metadata={'help': 'The evaluation metric used for the task.'} ) _UpperCamelCase : Optional[str] = dataclasses.field( default='no' , metadata={ 'help': 'The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]' } , ) _UpperCamelCase : Optional[int] = dataclasses.field( default=10 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , ) _UpperCamelCase : Optional[float] = dataclasses.field( default=0.0 , metadata={ 'help': 'How much the specified evaluation metric must improve to satisfy early stopping conditions.' } , ) _UpperCamelCase : Optional[bool] = dataclasses.field( default=UpperCAmelCase , metadata={'help': 'Whether to filter the pseudo-labeled data based on the confidence score.'} , ) _UpperCamelCase : Optional[bool] = dataclasses.field( default=UpperCAmelCase , metadata={'help': 'Whether to filter the pseudo-labeled data based on the validation performance.'} , ) _UpperCamelCase : Optional[bool] = dataclasses.field( default=UpperCAmelCase , metadata={'help': 'Whether to fine-tune on labeled data after pseudo training.'} , ) _UpperCamelCase : Optional[float] = dataclasses.field( default=0.0 , metadata={'help': 'Confidence threshold for pseudo-labeled data filtering.'} , ) _UpperCamelCase : Optional[int] = dataclasses.field( default=1_00 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , ) _UpperCamelCase : Optional[int] = dataclasses.field( default=UpperCAmelCase , metadata={'help': 'Random seed for initialization.'} , ) def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]: lowercase__ = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 ) if args.do_filter_by_confidence: lowercase__ = dataset.filter(lambda _SCREAMING_SNAKE_CASE : example["probability"] > args.confidence_threshold ) if args.do_filter_by_val_performance: assert eval_result >= 0.0 and eval_result <= 1.0 lowercase__ = int(eval_result * len(_SCREAMING_SNAKE_CASE ) ) print(_SCREAMING_SNAKE_CASE ) lowercase__ = dataset.sort('probability' , reverse=_SCREAMING_SNAKE_CASE ) lowercase__ = dataset.select(range(_SCREAMING_SNAKE_CASE ) ) lowercase__ = dataset.remove_columns(['label', 'probability'] ) lowercase__ = dataset.rename_column('prediction' , 'label' ) lowercase__ = dataset.map(lambda _SCREAMING_SNAKE_CASE : {"label": idalabel[example["label"]]} ) lowercase__ = dataset.shuffle(seed=args.seed ) lowercase__ = os.path.join(_SCREAMING_SNAKE_CASE , F"""train_pseudo.{args.data_file_extension}""" ) if args.data_file_extension == "csv": dataset.to_csv(_SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) else: dataset.to_json(_SCREAMING_SNAKE_CASE ) def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]: lowercase__ = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , ) logger.info(accelerator.state ) # Setup logging, we only want one process per machine to log things on the # screen. accelerator.is_local_main_process is only True for one process per # machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() lowercase__ = STModelArguments(model_name_or_path=_SCREAMING_SNAKE_CASE ) lowercase__ = STDataArguments(train_file=_SCREAMING_SNAKE_CASE , infer_file=_SCREAMING_SNAKE_CASE ) lowercase__ = STTrainingArguments(output_dir=_SCREAMING_SNAKE_CASE ) lowercase__ = argparse.Namespace() for arg_class in (model_args, data_args, training_args): for key, value in vars(_SCREAMING_SNAKE_CASE ).items(): setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for key, value in kwargs.items(): if hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Sanity checks lowercase__ = {} lowercase__ = None # You need to provide the training data and the data to predict on assert args.train_file is not None assert args.infer_file is not None lowercase__ = args.train_file lowercase__ = args.infer_file if args.evaluation_strategy != IntervalStrategy.NO.value: assert args.eval_file is not None lowercase__ = args.eval_file for key in data_files: lowercase__ = data_files[key].split('.' )[-1] assert extension in ["csv", "json"], F"""`{key}_file` should be a csv or a json file.""" if args.data_file_extension is None: lowercase__ = extension else: assert extension == args.data_file_extension, F"""`{key}_file` should be a {args.data_file_extension} file`.""" assert ( args.eval_metric in datasets.list_metrics() ), F"""{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.""" # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed ) logger.info('Creating the initial data directory for self-training...' ) lowercase__ = F"""{args.output_dir}/self-train_iter-{{}}""".format lowercase__ = data_dir_format(0 ) if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir , exist_ok=_SCREAMING_SNAKE_CASE ) os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE ) accelerator.wait_for_everyone() lowercase__ = None lowercase__ = None lowercase__ = 0 lowercase__ = False # Show the progress bar lowercase__ = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process ) # Self-train for iteration in range(0 , int(args.max_selftrain_iterations ) ): lowercase__ = data_dir_format(_SCREAMING_SNAKE_CASE ) assert os.path.exists(_SCREAMING_SNAKE_CASE ) # Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for # iteration > 0 lowercase__ = os.path.join(_SCREAMING_SNAKE_CASE , 'stage-1' ) lowercase__ = { 'accelerator': accelerator, 'model_name_or_path': args.model_name_or_path, 'cache_dir': args.cache_dir, 'do_train': True, 'train_file': data_files['train'] if iteration == 0 else data_files['train_pseudo'], 'do_eval': True if args.eval_file is not None else False, 'eval_file': data_files['eval'], 'do_predict': True, 'infer_file': data_files['infer'], 'task_name': args.task_name, 'label_list': args.label_list, 'output_dir': current_output_dir, 'eval_metric': args.eval_metric, 'evaluation_strategy': args.evaluation_strategy, 'early_stopping_patience': args.early_stopping_patience, 'early_stopping_threshold': args.early_stopping_threshold, 'seed': args.seed, } # Add additional training arguments for key, value in kwargs.items(): if key not in arguments_dict and not hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): arguments_dict.update({key: value} ) lowercase__ = os.path.join(_SCREAMING_SNAKE_CASE , 'best-checkpoint' , _SCREAMING_SNAKE_CASE ) if os.path.exists(_SCREAMING_SNAKE_CASE ): logger.info( 'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) else: logger.info('***** Running self-training: iteration: %d, stage: 1 *****' , _SCREAMING_SNAKE_CASE ) finetune(**_SCREAMING_SNAKE_CASE ) accelerator.wait_for_everyone() assert os.path.exists(_SCREAMING_SNAKE_CASE ) logger.info('Self-training job completed: iteration: %d, stage: 1.' , _SCREAMING_SNAKE_CASE ) if iteration > 0 and args.finetune_on_labeled_data: # Stage 2 (optional): fine-tuning on the original labeled data lowercase__ = os.path.join(_SCREAMING_SNAKE_CASE , 'best-checkpoint' ) lowercase__ = os.path.join(_SCREAMING_SNAKE_CASE , 'stage-2' ) # Update arguments_dict lowercase__ = model_path lowercase__ = data_files['train'] lowercase__ = current_output_dir lowercase__ = os.path.join(_SCREAMING_SNAKE_CASE , 'best-checkpoint' , _SCREAMING_SNAKE_CASE ) if os.path.exists(_SCREAMING_SNAKE_CASE ): logger.info( 'Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) else: logger.info('***** Running self-training: iteration: %d, stage: 2 *****' , _SCREAMING_SNAKE_CASE ) finetune(**_SCREAMING_SNAKE_CASE ) accelerator.wait_for_everyone() assert os.path.exists(_SCREAMING_SNAKE_CASE ) logger.info('Self-training job completed: iteration: %d, stage: 2.' , _SCREAMING_SNAKE_CASE ) lowercase__ = iteration lowercase__ = data_dir_format(iteration + 1 ) lowercase__ = AutoConfig.from_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , 'best-checkpoint' ) ) lowercase__ = config.idalabel lowercase__ = os.path.join(_SCREAMING_SNAKE_CASE , 'eval_results_best-checkpoint.json' ) lowercase__ = os.path.join(_SCREAMING_SNAKE_CASE , 'test_results_best-checkpoint.json' ) assert os.path.exists(_SCREAMING_SNAKE_CASE ) with open(_SCREAMING_SNAKE_CASE , 'r' ) as f: lowercase__ = float(json.load(_SCREAMING_SNAKE_CASE )[args.eval_metric] ) lowercase__ = os.path.join(_SCREAMING_SNAKE_CASE , 'infer_output_best-checkpoint.csv' ) assert os.path.exists(_SCREAMING_SNAKE_CASE ) # Loading the dataset from local csv or json files. lowercase__ = load_dataset(args.data_file_extension , data_files={'data': data_files['infer']} )['data'] lowercase__ = load_dataset('csv' , data_files={'data': infer_output_file} )['data'] if accelerator.is_main_process: os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE ) shutil.copy(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , F"""eval_results_iter-{iteration}.json""" ) ) if os.path.exists(_SCREAMING_SNAKE_CASE ): shutil.copy(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , F"""test_results_iter-{iteration}.json""" ) ) create_pseudo_labeled_data(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) accelerator.wait_for_everyone() lowercase__ = os.path.join(_SCREAMING_SNAKE_CASE , F"""train_pseudo.{args.data_file_extension}""" ) if args.evaluation_strategy != IntervalStrategy.NO.value: lowercase__ = eval_result if best_iteration is None: lowercase__ = new_iteration lowercase__ = new_eval_result else: if new_eval_result - best_eval_result > args.early_stopping_threshold: lowercase__ = new_iteration lowercase__ = new_eval_result lowercase__ = 0 else: if new_eval_result == best_eval_result: lowercase__ = new_iteration lowercase__ = new_eval_result early_stopping_patience_counter += 1 if early_stopping_patience_counter >= args.early_stopping_patience: lowercase__ = True progress_bar.update(1 ) if should_training_stop: break if best_iteration is not None: # Save the best iteration logger.info('Best iteration: %d' , _SCREAMING_SNAKE_CASE ) logger.info('Best evaluation result: %s = %f' , args.eval_metric , _SCREAMING_SNAKE_CASE ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(_SCREAMING_SNAKE_CASE , F"""eval_results_iter-{iteration}.json""" ) , os.path.join(_SCREAMING_SNAKE_CASE , 'eval_results_best-iteration.json' ) , ) else: # Assume that the last iteration is the best logger.info('Best iteration: %d' , args.max_selftrain_iterations - 1 ) logger.info('Best evaluation result: %s = %f' , args.eval_metric , _SCREAMING_SNAKE_CASE ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(_SCREAMING_SNAKE_CASE , F"""eval_results_iter-{args.max_selftrain_iterations - 1}.json""" ) , os.path.join(_SCREAMING_SNAKE_CASE , 'eval_results_best-iteration.json' ) , )
45
from string import ascii_uppercase lowercase_ = {str(ord(c) - 55): c for c in ascii_uppercase} def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise TypeError('int() can\'t convert non-string with explicit base' ) if num < 0: raise ValueError('parameter must be positive int' ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise TypeError('\'str\' object cannot be interpreted as an integer' ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise TypeError('\'float\' object cannot be interpreted as an integer' ) if base in (0, 1): raise ValueError('base must be >= 2' ) if base > 36: raise ValueError('base must be <= 36' ) lowercase__ = '' lowercase__ = 0 lowercase__ = 0 while div != 1: lowercase__ , lowercase__ = divmod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if base >= 11 and 9 < mod < 36: lowercase__ = ALPHABET_VALUES[str(_SCREAMING_SNAKE_CASE )] else: lowercase__ = str(_SCREAMING_SNAKE_CASE ) new_value += actual_value lowercase__ = num // base lowercase__ = div if div == 0: return str(new_value[::-1] ) elif div == 1: new_value += str(_SCREAMING_SNAKE_CASE ) return str(new_value[::-1] ) return new_value[::-1] if __name__ == "__main__": import doctest doctest.testmod() for base in range(2, 37): for num in range(1_000): assert int(decimal_to_any(num, base), base) == num, ( num, base, decimal_to_any(num, base), int(decimal_to_any(num, base), base), )
45
1
import logging import numpy as np import pytest from scipy.linalg import eigh logging.basicConfig(level=logging.INFO, format="""%(message)s""") def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> np.ndarray: return input_array.reshape((input_array.size, 1) ) def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> np.ndarray: lowercase__ = np.nan for i in range(_SCREAMING_SNAKE_CASE ): lowercase__ = features[:, labels == i] lowercase__ = data.mean(1 ) # Centralize the data of class i lowercase__ = data - column_reshape(_SCREAMING_SNAKE_CASE ) if i > 0: # If covariance_sum is not None covariance_sum += np.dot(_SCREAMING_SNAKE_CASE , centered_data.T ) else: # If covariance_sum is np.nan (i.e. first loop) lowercase__ = np.dot(_SCREAMING_SNAKE_CASE , centered_data.T ) return covariance_sum / features.shape[1] def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> np.ndarray: lowercase__ = features.mean(1 ) lowercase__ = np.nan for i in range(_SCREAMING_SNAKE_CASE ): lowercase__ = features[:, labels == i] lowercase__ = data.shape[1] lowercase__ = data.mean(1 ) if i > 0: # If covariance_sum is not None covariance_sum += device_data * np.dot( column_reshape(_SCREAMING_SNAKE_CASE ) - column_reshape(_SCREAMING_SNAKE_CASE ) , (column_reshape(_SCREAMING_SNAKE_CASE ) - column_reshape(_SCREAMING_SNAKE_CASE )).T , ) else: # If covariance_sum is np.nan (i.e. first loop) lowercase__ = device_data * np.dot( column_reshape(_SCREAMING_SNAKE_CASE ) - column_reshape(_SCREAMING_SNAKE_CASE ) , (column_reshape(_SCREAMING_SNAKE_CASE ) - column_reshape(_SCREAMING_SNAKE_CASE )).T , ) return covariance_sum / features.shape[1] def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> np.ndarray: # Check if the features have been loaded if features.any(): lowercase__ = features.mean(1 ) # Center the dataset lowercase__ = features - np.reshape(_SCREAMING_SNAKE_CASE , (data_mean.size, 1) ) lowercase__ = np.dot(_SCREAMING_SNAKE_CASE , centered_data.T ) / features.shape[1] lowercase__ , lowercase__ = np.linalg.eigh(_SCREAMING_SNAKE_CASE ) # Take all the columns in the reverse order (-1), and then takes only the first lowercase__ = eigenvectors[:, ::-1][:, 0:dimensions] # Project the database on the new space lowercase__ = np.dot(filtered_eigenvectors.T , _SCREAMING_SNAKE_CASE ) logging.info('Principal Component Analysis computed' ) return projected_data else: logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=_SCREAMING_SNAKE_CASE ) logging.error('Dataset empty' ) raise AssertionError def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> np.ndarray: assert classes > dimensions # Check if features have been already loaded if features.any: lowercase__ , lowercase__ = eigh( covariance_between_classes(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , covariance_within_classes(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , ) lowercase__ = eigenvectors[:, ::-1][:, :dimensions] lowercase__ , lowercase__ , lowercase__ = np.linalg.svd(_SCREAMING_SNAKE_CASE ) lowercase__ = svd_matrix[:, 0:dimensions] lowercase__ = np.dot(filtered_svd_matrix.T , _SCREAMING_SNAKE_CASE ) logging.info('Linear Discriminant Analysis computed' ) return projected_data else: logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=_SCREAMING_SNAKE_CASE ) logging.error('Dataset empty' ) raise AssertionError def __UpperCamelCase () -> None: # Create dummy dataset with 2 classes and 3 features lowercase__ = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] ) lowercase__ = np.array([0, 0, 0, 1, 1] ) lowercase__ = 2 lowercase__ = 2 # Assert that the function raises an AssertionError if dimensions > classes with pytest.raises(_SCREAMING_SNAKE_CASE ) as error_info: lowercase__ = linear_discriminant_analysis( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ): raise AssertionError( 'Did not raise AssertionError for dimensions > classes' ) assert error_info.type is AssertionError def __UpperCamelCase () -> None: lowercase__ = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] ) lowercase__ = 2 lowercase__ = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] ) with pytest.raises(_SCREAMING_SNAKE_CASE ) as error_info: lowercase__ = principal_component_analysis(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if not np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise AssertionError assert error_info.type is AssertionError if __name__ == "__main__": import doctest doctest.testmod()
45
import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class SCREAMING_SNAKE_CASE : def __init__( self : List[Any] , a : Any , a : Optional[int]=13 , a : Tuple=30 , a : Union[str, Any]=2 , a : List[str]=3 , a : Dict=True , a : List[str]=True , a : List[Any]=32 , a : List[str]=5 , a : Optional[int]=4 , a : List[str]=37 , a : Dict="gelu" , a : Dict=0.1 , a : List[str]=0.1 , a : int=10 , a : List[str]=0.02 , a : int=None , a : List[str]=2 , )-> Dict: """simple docstring""" lowercase__ = parent lowercase__ = batch_size lowercase__ = image_size lowercase__ = patch_size lowercase__ = num_channels lowercase__ = is_training lowercase__ = use_labels lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = intermediate_size lowercase__ = hidden_act lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = type_sequence_label_size lowercase__ = initializer_range lowercase__ = scope lowercase__ = encoder_stride # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) lowercase__ = (image_size // patch_size) ** 2 lowercase__ = num_patches + 1 def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Any: """simple docstring""" lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__ = None if self.use_labels: lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__ = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> List[Any]: """simple docstring""" return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : List[Any] , a : List[str] , a : Dict )-> Optional[Any]: """simple docstring""" lowercase__ = ViTModel(config=a ) model.to(a ) model.eval() lowercase__ = model(a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE_ ( self : int , a : Optional[Any] , a : int , a : Tuple )-> Union[str, Any]: """simple docstring""" lowercase__ = ViTForMaskedImageModeling(config=a ) model.to(a ) model.eval() lowercase__ = model(a ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowercase__ = 1 lowercase__ = ViTForMaskedImageModeling(a ) model.to(a ) model.eval() lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowercase__ = model(a ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : List[str] , a : int , a : List[Any] )-> str: """simple docstring""" lowercase__ = self.type_sequence_label_size lowercase__ = ViTForImageClassification(a ) model.to(a ) model.eval() lowercase__ = model(a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowercase__ = 1 lowercase__ = ViTForImageClassification(a ) model.to(a ) model.eval() lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowercase__ = model(a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> List[str]: """simple docstring""" lowercase__ = self.prepare_config_and_inputs() ( ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ) = config_and_inputs lowercase__ = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE (UpperCAmelCase , UpperCAmelCase , unittest.TestCase ): _UpperCamelCase : Any = ( ( ViTModel, ViTForImageClassification, ViTForMaskedImageModeling, ) if is_torch_available() else () ) _UpperCamelCase : Union[str, Any] = ( {'feature-extraction': ViTModel, 'image-classification': ViTForImageClassification} if is_torch_available() else {} ) _UpperCamelCase : int = True _UpperCamelCase : int = False _UpperCamelCase : Union[str, Any] = False _UpperCamelCase : Dict = False def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> List[str]: """simple docstring""" lowercase__ = ViTModelTester(self ) lowercase__ = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=37 ) def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Union[str, Any]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='ViT does not use inputs_embeds' ) def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]: """simple docstring""" pass def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Union[str, Any]: """simple docstring""" lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ = model_class(a ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowercase__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(a , nn.Linear ) ) def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Optional[int]: """simple docstring""" lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ = model_class(a ) lowercase__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ = [*signature.parameters.keys()] lowercase__ = ['pixel_values'] self.assertListEqual(arg_names[:1] , a ) def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Union[str, Any]: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a ) def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Tuple: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*a ) def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> int: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a ) @slow def SCREAMING_SNAKE_CASE_ ( self : Dict )-> List[Any]: """simple docstring""" for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ = ViTModel.from_pretrained(a ) self.assertIsNotNone(a ) def __UpperCamelCase () -> str: lowercase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class SCREAMING_SNAKE_CASE (unittest.TestCase ): @cached_property def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> List[Any]: """simple docstring""" return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None @slow def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Union[str, Any]: """simple docstring""" lowercase__ = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224' ).to(a ) lowercase__ = self.default_image_processor lowercase__ = prepare_img() lowercase__ = image_processor(images=a , return_tensors='pt' ).to(a ) # forward pass with torch.no_grad(): lowercase__ = model(**a ) # verify the logits lowercase__ = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , a ) lowercase__ = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1E-4 ) ) @slow def SCREAMING_SNAKE_CASE_ ( self : int )-> List[str]: """simple docstring""" lowercase__ = ViTModel.from_pretrained('facebook/dino-vits8' ).to(a ) lowercase__ = ViTImageProcessor.from_pretrained('facebook/dino-vits8' , size=480 ) lowercase__ = prepare_img() lowercase__ = image_processor(images=a , return_tensors='pt' ) lowercase__ = inputs.pixel_values.to(a ) # forward pass with torch.no_grad(): lowercase__ = model(a , interpolate_pos_encoding=a ) # verify the logits lowercase__ = torch.Size((1, 3_601, 384) ) self.assertEqual(outputs.last_hidden_state.shape , a ) lowercase__ = torch.tensor( [[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(a ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , a , atol=1E-4 ) ) @slow @require_accelerate @require_torch_gpu def SCREAMING_SNAKE_CASE_ ( self : str )-> str: """simple docstring""" lowercase__ = ViTModel.from_pretrained('facebook/dino-vits8' , torch_dtype=torch.floataa , device_map='auto' ) lowercase__ = self.default_image_processor lowercase__ = prepare_img() lowercase__ = image_processor(images=a , return_tensors='pt' ) lowercase__ = inputs.pixel_values.to(a ) # forward pass to make sure inference works in fp16 with torch.no_grad(): lowercase__ = model(a )
45
1
import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class SCREAMING_SNAKE_CASE (UpperCAmelCase ): _UpperCamelCase : Optional[Any] = '' _UpperCamelCase : str = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) _UpperCamelCase : str = None # compression type in fsspec. ex: "gzip" _UpperCamelCase : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self : List[str] , a : str = "" , a : Optional[str] = None , a : Optional[dict] = None , **a : List[Any] )-> str: """simple docstring""" super().__init__(self , **a ) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode lowercase__ = fsspec.open( a , mode='rb' , protocol=a , compression=self.compression , client_kwargs={ 'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459 'trust_env': True, # Enable reading proxy env variables. **(target_options or {}).pop('client_kwargs' , {} ), # To avoid issues if it was already passed. } , **(target_options or {}) , ) lowercase__ = os.path.basename(self.file.path.split('::' )[0] ) lowercase__ = ( self.compressed_name[: self.compressed_name.rindex('.' )] if '.' in self.compressed_name else self.compressed_name ) lowercase__ = None @classmethod def SCREAMING_SNAKE_CASE_ ( cls : Optional[Any] , a : Any )-> Optional[int]: """simple docstring""" return super()._strip_protocol(a ).lstrip('/' ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> List[str]: """simple docstring""" if self.dir_cache is None: lowercase__ = {**self.file.fs.info(self.file.path ), 'name': self.uncompressed_name} lowercase__ = {f['name']: f} def SCREAMING_SNAKE_CASE_ ( self : Any , a : str )-> Optional[int]: """simple docstring""" return self.file.open().read() def SCREAMING_SNAKE_CASE_ ( self : Dict , a : str , a : str = "rb" , a : Optional[Any]=None , a : Union[str, Any]=True , a : Any=None , **a : str , )-> Union[str, Any]: """simple docstring""" lowercase__ = self._strip_protocol(a ) if mode != "rb": raise ValueError(f"""Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'""" ) return self.file.open() class SCREAMING_SNAKE_CASE (UpperCAmelCase ): _UpperCamelCase : List[Any] = 'bz2' _UpperCamelCase : Union[str, Any] = 'bz2' _UpperCamelCase : Union[str, Any] = '.bz2' class SCREAMING_SNAKE_CASE (UpperCAmelCase ): _UpperCamelCase : Tuple = 'gzip' _UpperCamelCase : Dict = 'gzip' _UpperCamelCase : int = '.gz' class SCREAMING_SNAKE_CASE (UpperCAmelCase ): _UpperCamelCase : Tuple = 'lz4' _UpperCamelCase : Optional[int] = 'lz4' _UpperCamelCase : List[str] = '.lz4' class SCREAMING_SNAKE_CASE (UpperCAmelCase ): _UpperCamelCase : Optional[int] = 'xz' _UpperCamelCase : List[Any] = 'xz' _UpperCamelCase : Tuple = '.xz' class SCREAMING_SNAKE_CASE (UpperCAmelCase ): _UpperCamelCase : Tuple = 'zstd' _UpperCamelCase : List[str] = 'zstd' _UpperCamelCase : Union[str, Any] = '.zst' def __init__( self : Dict , a : str , a : str = "rb" , a : Optional[str] = None , a : Optional[dict] = None , a : int = DEFAULT_BLOCK_SIZE , **a : List[Any] , )-> List[Any]: """simple docstring""" super().__init__( fo=a , mode=a , target_protocol=a , target_options=a , block_size=a , **a , ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 lowercase__ = self.file.__enter__ class SCREAMING_SNAKE_CASE : def __init__( self : List[str] , a : Union[str, Any] )-> str: """simple docstring""" lowercase__ = file_ def __enter__( self : int )-> Union[str, Any]: """simple docstring""" self._file.__enter__() return self def __exit__( self : Dict , *a : Optional[Any] , **a : Union[str, Any] )-> Optional[int]: """simple docstring""" self._file.__exit__(*a , **a ) def __iter__( self : Any )-> Union[str, Any]: """simple docstring""" return iter(self._file ) def SCREAMING_SNAKE_CASE_ ( self : str )-> Optional[int]: """simple docstring""" return next(self._file ) def __getattr__( self : Optional[Any] , a : int )-> Optional[Any]: """simple docstring""" return getattr(self._file , a ) def fixed_enter(*a : List[Any] , **a : List[Any] ): return WrappedFile(_enter(*a , **a ) ) lowercase__ = fixed_enter
45
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[Any]: stooge(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) - 1 ) return arr def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int: if i >= h: return # If first element is smaller than the last then swap them if arr[i] > arr[h]: lowercase__ , lowercase__ = arr[h], arr[i] # If there are more than 2 elements in the array if h - i + 1 > 2: lowercase__ = (int)((h - i + 1) / 3 ) # Recursively sort first 2/3 elements stooge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (h - t) ) # Recursively sort last 2/3 elements stooge(_SCREAMING_SNAKE_CASE , i + t , (_SCREAMING_SNAKE_CASE) ) # Recursively sort first 2/3 elements stooge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (h - t) ) if __name__ == "__main__": lowercase_ = input("""Enter numbers separated by a comma:\n""").strip() lowercase_ = [int(item) for item in user_input.split(""",""")] print(stooge_sort(unsorted))
45
1
from __future__ import annotations from typing import Any class SCREAMING_SNAKE_CASE : def __init__( self : List[str] , a : int , a : int , a : float = 0 )-> None: """simple docstring""" lowercase__ , lowercase__ = row, column lowercase__ = [[default_value for c in range(a )] for r in range(a )] def __str__( self : int )-> str: """simple docstring""" lowercase__ = f"""Matrix consist of {self.row} rows and {self.column} columns\n""" # Make string identifier lowercase__ = 0 for row_vector in self.array: for obj in row_vector: lowercase__ = max(a , len(str(a ) ) ) lowercase__ = f"""%{max_element_length}s""" # Make string and return def single_line(a : list[float] ) -> str: nonlocal string_format_identifier lowercase__ = '[' line += ", ".join(string_format_identifier % (obj,) for obj in row_vector ) line += "]" return line s += "\n".join(single_line(a ) for row_vector in self.array ) return s def __repr__( self : List[str] )-> str: """simple docstring""" return str(self ) def SCREAMING_SNAKE_CASE_ ( self : Dict , a : tuple[int, int] )-> bool: """simple docstring""" if not (isinstance(a , (list, tuple) ) and len(a ) == 2): return False elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column): return False else: return True def __getitem__( self : List[Any] , a : tuple[int, int] )-> Any: """simple docstring""" assert self.validate_indicies(a ) return self.array[loc[0]][loc[1]] def __setitem__( self : int , a : tuple[int, int] , a : float )-> None: """simple docstring""" assert self.validate_indicies(a ) lowercase__ = value def __add__( self : List[Any] , a : Matrix )-> Matrix: """simple docstring""" assert isinstance(a , a ) assert self.row == another.row and self.column == another.column # Add lowercase__ = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): lowercase__ = self[r, c] + another[r, c] return result def __neg__( self : str )-> Matrix: """simple docstring""" lowercase__ = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): lowercase__ = -self[r, c] return result def __sub__( self : int , a : Matrix )-> Matrix: """simple docstring""" return self + (-another) def __mul__( self : Any , a : int | float | Matrix )-> Matrix: """simple docstring""" if isinstance(a , (int, float) ): # Scalar multiplication lowercase__ = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): lowercase__ = self[r, c] * another return result elif isinstance(a , a ): # Matrix multiplication assert self.column == another.row lowercase__ = Matrix(self.row , another.column ) for r in range(self.row ): for c in range(another.column ): for i in range(self.column ): result[r, c] += self[r, i] * another[i, c] return result else: lowercase__ = f"""Unsupported type given for another ({type(a )})""" raise TypeError(a ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Matrix: """simple docstring""" lowercase__ = Matrix(self.column , self.row ) for r in range(self.row ): for c in range(self.column ): lowercase__ = self[r, c] return result def SCREAMING_SNAKE_CASE_ ( self : int , a : Matrix , a : Matrix )-> Any: """simple docstring""" assert isinstance(a , a ) and isinstance(a , a ) assert self.row == self.column == u.row == v.row # u, v should be column vector assert u.column == v.column == 1 # u, v should be column vector # Calculate lowercase__ = v.transpose() lowercase__ = (v_t * self * u)[0, 0] + 1 if numerator_factor == 0: return None # It's not invertable return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor)) # Testing if __name__ == "__main__": def __UpperCamelCase () -> None: # a^(-1) lowercase__ = Matrix(3 , 3 , 0 ) for i in range(3 ): lowercase__ = 1 print(F"""a^(-1) is {ainv}""" ) # u, v lowercase__ = Matrix(3 , 1 , 0 ) lowercase__ , lowercase__ , lowercase__ = 1, 2, -3 lowercase__ = Matrix(3 , 1 , 0 ) lowercase__ , lowercase__ , lowercase__ = 4, -2, 5 print(F"""u is {u}""" ) print(F"""v is {v}""" ) print(F"""uv^T is {u * v.transpose()}""" ) # Sherman Morrison print(F"""(a + uv^T)^(-1) is {ainv.sherman_morrison(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}""" ) def __UpperCamelCase () -> None: import doctest doctest.testmod() testa()
45
from scipy.stats import spearmanr import datasets lowercase_ = """ The Spearman rank-order correlation coefficient is a measure of the relationship between two datasets. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Positive correlations imply that as data in dataset x increases, so does data in dataset y. Negative correlations imply that as x increases, y decreases. Correlations of -1 or +1 imply an exact monotonic relationship. Unlike the Pearson correlation, the Spearman correlation does not assume that both datasets are normally distributed. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Spearman correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. """ lowercase_ = """ Args: predictions (`List[float]`): Predicted labels, as returned by a model. references (`List[float]`): Ground truth labels. return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns only the spearmanr score. Defaults to `False`. Returns: spearmanr (`float`): Spearman correlation coefficient. p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input. Examples: Example 1: >>> spearmanr_metric = datasets.load_metric(\"spearmanr\") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4]) >>> print(results) {'spearmanr': -0.7} Example 2: >>> spearmanr_metric = datasets.load_metric(\"spearmanr\") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], ... predictions=[10, 9, 2.5, 6, 4], ... return_pvalue=True) >>> print(results['spearmanr']) -0.7 >>> print(round(results['spearmanr_pvalue'], 2)) 0.19 """ lowercase_ = R"""\ @book{kokoska2000crc, title={CRC standard probability and statistics tables and formulae}, author={Kokoska, Stephen and Zwillinger, Daniel}, year={2000}, publisher={Crc Press} } @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE (datasets.Metric ): def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Tuple: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('float' ), 'references': datasets.Value('float' ), } ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , ) def SCREAMING_SNAKE_CASE_ ( self : int , a : str , a : Any , a : str=False )-> Optional[int]: """simple docstring""" lowercase__ = spearmanr(a , a ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
45
1
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]: lowercase__ = 0 lowercase__ = len(_SCREAMING_SNAKE_CASE ) - 1 while left <= right: # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None lowercase__ = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(_SCREAMING_SNAKE_CASE ): return None lowercase__ = sorted_collection[point] if current_item == item: return point else: if point < left: lowercase__ = left lowercase__ = point elif point > right: lowercase__ = right lowercase__ = point else: if item < current_item: lowercase__ = point - 1 else: lowercase__ = point + 1 return None def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]: # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None lowercase__ = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(_SCREAMING_SNAKE_CASE ): return None if sorted_collection[point] == item: return point elif point < left: return interpolation_search_by_recursion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif point > right: return interpolation_search_by_recursion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else: if sorted_collection[point] > item: return interpolation_search_by_recursion( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , point - 1 ) else: return interpolation_search_by_recursion( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , point + 1 , _SCREAMING_SNAKE_CASE ) def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Optional[Any]: if collection != sorted(_SCREAMING_SNAKE_CASE ): raise ValueError('Collection must be ascending sorted' ) return True if __name__ == "__main__": import sys lowercase_ = 0 if debug == 1: lowercase_ = [10, 30, 40, 45, 50, 66, 77, 93] try: __assert_sorted(collection) except ValueError: sys.exit("""Sequence must be ascending sorted to apply interpolation search""") lowercase_ = 67 lowercase_ = interpolation_search(collection, target) if result is not None: print(f'''{target} found at positions: {result}''') else: print("""Not found""")
45
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = 50 ) -> int: lowercase__ = [1] * (length + 1) for row_length in range(3 , length + 1 ): for block_length in range(3 , row_length + 1 ): for block_start in range(row_length - block_length ): ways_number[row_length] += ways_number[ row_length - block_start - block_length - 1 ] ways_number[row_length] += 1 return ways_number[length] if __name__ == "__main__": print(f'''{solution() = }''')
45
1
import inspect import unittest from transformers import ConvNextConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class SCREAMING_SNAKE_CASE : def __init__( self : Optional[Any] , a : List[str] , a : List[Any]=13 , a : str=32 , a : Tuple=3 , a : int=4 , a : Optional[Any]=[10, 20, 30, 40] , a : int=[2, 2, 3, 2] , a : Any=True , a : Tuple=True , a : int=37 , a : List[Any]="gelu" , a : str=10 , a : int=0.02 , a : List[Any]=["stage2", "stage3", "stage4"] , a : str=[2, 3, 4] , a : str=None , )-> Optional[int]: """simple docstring""" lowercase__ = parent lowercase__ = batch_size lowercase__ = image_size lowercase__ = num_channels lowercase__ = num_stages lowercase__ = hidden_sizes lowercase__ = depths lowercase__ = is_training lowercase__ = use_labels lowercase__ = intermediate_size lowercase__ = hidden_act lowercase__ = num_labels lowercase__ = initializer_range lowercase__ = out_features lowercase__ = out_indices lowercase__ = scope def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Optional[Any]: """simple docstring""" lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__ = None if self.use_labels: lowercase__ = ids_tensor([self.batch_size] , self.num_labels ) lowercase__ = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]: """simple docstring""" return ConvNextConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=a , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Optional[int] , a : Any , a : Union[str, Any] )-> List[str]: """simple docstring""" lowercase__ = ConvNextModel(config=a ) model.to(a ) model.eval() lowercase__ = model(a ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def SCREAMING_SNAKE_CASE_ ( self : Dict , a : Optional[Any] , a : Dict , a : List[Any] )-> List[str]: """simple docstring""" lowercase__ = ConvNextForImageClassification(a ) model.to(a ) model.eval() lowercase__ = model(a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : Optional[int] , a : List[Any] , a : Optional[int] )-> Optional[Any]: """simple docstring""" lowercase__ = ConvNextBackbone(config=a ) model.to(a ) model.eval() lowercase__ = model(a ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None lowercase__ = None lowercase__ = ConvNextBackbone(config=a ) model.to(a ) model.eval() lowercase__ = model(a ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Any: """simple docstring""" lowercase__ = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ = config_and_inputs lowercase__ = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE (UpperCAmelCase , UpperCAmelCase , unittest.TestCase ): _UpperCamelCase : Tuple = ( ( ConvNextModel, ConvNextForImageClassification, ConvNextBackbone, ) if is_torch_available() else () ) _UpperCamelCase : Optional[Any] = ( {'feature-extraction': ConvNextModel, 'image-classification': ConvNextForImageClassification} if is_torch_available() else {} ) _UpperCamelCase : Dict = True _UpperCamelCase : Dict = False _UpperCamelCase : Optional[Any] = False _UpperCamelCase : Optional[Any] = False _UpperCamelCase : Union[str, Any] = False def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Tuple: """simple docstring""" lowercase__ = ConvNextModelTester(self ) lowercase__ = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=37 ) def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Any: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def SCREAMING_SNAKE_CASE_ ( self : Any )-> Union[str, Any]: """simple docstring""" return @unittest.skip(reason='ConvNext does not use inputs_embeds' ) def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> List[str]: """simple docstring""" pass @unittest.skip(reason='ConvNext does not support input and output embeddings' ) def SCREAMING_SNAKE_CASE_ ( self : Any )-> Any: """simple docstring""" pass @unittest.skip(reason='ConvNext does not use feedforward chunking' ) def SCREAMING_SNAKE_CASE_ ( self : int )-> Dict: """simple docstring""" pass def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Tuple: """simple docstring""" lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ = model_class(a ) lowercase__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ = [*signature.parameters.keys()] lowercase__ = ['pixel_values'] self.assertListEqual(arg_names[:1] , a ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> List[str]: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a ) def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Any: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*a ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> int: """simple docstring""" def check_hidden_states_output(a : Tuple , a : List[str] , a : List[str] ): lowercase__ = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowercase__ = model(**self._prepare_for_class(a , a ) ) lowercase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowercase__ = self.model_tester.num_stages self.assertEqual(len(a ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ = True check_hidden_states_output(a , a , a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ = True check_hidden_states_output(a , a , a ) def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Optional[Any]: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a ) @slow def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Union[str, Any]: """simple docstring""" for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ = ConvNextModel.from_pretrained(a ) self.assertIsNotNone(a ) def __UpperCamelCase () -> Optional[Any]: lowercase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class SCREAMING_SNAKE_CASE (unittest.TestCase ): @cached_property def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> Tuple: """simple docstring""" return AutoImageProcessor.from_pretrained('facebook/convnext-tiny-224' ) if is_vision_available() else None @slow def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> List[str]: """simple docstring""" lowercase__ = ConvNextForImageClassification.from_pretrained('facebook/convnext-tiny-224' ).to(a ) lowercase__ = self.default_image_processor lowercase__ = prepare_img() lowercase__ = image_processor(images=a , return_tensors='pt' ).to(a ) # forward pass with torch.no_grad(): lowercase__ = model(**a ) # verify the logits lowercase__ = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , a ) lowercase__ = torch.tensor([-0.0260, -0.4739, 0.1911] ).to(a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1E-4 ) ) @require_torch class SCREAMING_SNAKE_CASE (unittest.TestCase , UpperCAmelCase ): _UpperCamelCase : Any = (ConvNextBackbone,) if is_torch_available() else () _UpperCamelCase : Optional[int] = ConvNextConfig _UpperCamelCase : Union[str, Any] = False def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Union[str, Any]: """simple docstring""" lowercase__ = ConvNextModelTester(self )
45
import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() lowercase_ = logging.get_logger(__name__) lowercase_ = { """b0""": efficientnet.EfficientNetBa, """b1""": efficientnet.EfficientNetBa, """b2""": efficientnet.EfficientNetBa, """b3""": efficientnet.EfficientNetBa, """b4""": efficientnet.EfficientNetBa, """b5""": efficientnet.EfficientNetBa, """b6""": efficientnet.EfficientNetBa, """b7""": efficientnet.EfficientNetBa, } lowercase_ = { """b0""": { """hidden_dim""": 1_280, """width_coef""": 1.0, """depth_coef""": 1.0, """image_size""": 224, """dropout_rate""": 0.2, """dw_padding""": [], }, """b1""": { """hidden_dim""": 1_280, """width_coef""": 1.0, """depth_coef""": 1.1, """image_size""": 240, """dropout_rate""": 0.2, """dw_padding""": [16], }, """b2""": { """hidden_dim""": 1_408, """width_coef""": 1.1, """depth_coef""": 1.2, """image_size""": 260, """dropout_rate""": 0.3, """dw_padding""": [5, 8, 16], }, """b3""": { """hidden_dim""": 1_536, """width_coef""": 1.2, """depth_coef""": 1.4, """image_size""": 300, """dropout_rate""": 0.3, """dw_padding""": [5, 18], }, """b4""": { """hidden_dim""": 1_792, """width_coef""": 1.4, """depth_coef""": 1.8, """image_size""": 380, """dropout_rate""": 0.4, """dw_padding""": [6], }, """b5""": { """hidden_dim""": 2_048, """width_coef""": 1.6, """depth_coef""": 2.2, """image_size""": 456, """dropout_rate""": 0.4, """dw_padding""": [13, 27], }, """b6""": { """hidden_dim""": 2_304, """width_coef""": 1.8, """depth_coef""": 2.6, """image_size""": 528, """dropout_rate""": 0.5, """dw_padding""": [31], }, """b7""": { """hidden_dim""": 2_560, """width_coef""": 2.0, """depth_coef""": 3.1, """image_size""": 600, """dropout_rate""": 0.5, """dw_padding""": [18], }, } def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str: lowercase__ = EfficientNetConfig() lowercase__ = CONFIG_MAP[model_name]['hidden_dim'] lowercase__ = CONFIG_MAP[model_name]['width_coef'] lowercase__ = CONFIG_MAP[model_name]['depth_coef'] lowercase__ = CONFIG_MAP[model_name]['image_size'] lowercase__ = CONFIG_MAP[model_name]['dropout_rate'] lowercase__ = CONFIG_MAP[model_name]['dw_padding'] lowercase__ = 'huggingface/label-files' lowercase__ = 'imagenet-1k-id2label.json' lowercase__ = 1000 lowercase__ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) ) lowercase__ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} lowercase__ = idalabel lowercase__ = {v: k for k, v in idalabel.items()} return config def __UpperCamelCase () -> Tuple: lowercase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg' lowercase__ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) return im def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Optional[Any]: lowercase__ = CONFIG_MAP[model_name]['image_size'] lowercase__ = EfficientNetImageProcessor( size={'height': size, 'width': size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=_SCREAMING_SNAKE_CASE , ) return preprocessor def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Tuple: lowercase__ = [v.split('_' )[0].split('block' )[1] for v in original_param_names if v.startswith('block' )] lowercase__ = sorted(set(_SCREAMING_SNAKE_CASE ) ) lowercase__ = len(_SCREAMING_SNAKE_CASE ) lowercase__ = {b: str(_SCREAMING_SNAKE_CASE ) for b, i in zip(_SCREAMING_SNAKE_CASE , range(_SCREAMING_SNAKE_CASE ) )} lowercase__ = [] rename_keys.append(('stem_conv/kernel:0', 'embeddings.convolution.weight') ) rename_keys.append(('stem_bn/gamma:0', 'embeddings.batchnorm.weight') ) rename_keys.append(('stem_bn/beta:0', 'embeddings.batchnorm.bias') ) rename_keys.append(('stem_bn/moving_mean:0', 'embeddings.batchnorm.running_mean') ) rename_keys.append(('stem_bn/moving_variance:0', 'embeddings.batchnorm.running_var') ) for b in block_names: lowercase__ = block_name_mapping[b] rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") ) rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") ) rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") ) rename_keys.append( (F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") ) rename_keys.append( (F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") ) rename_keys.append( (F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") ) rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") ) rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") ) rename_keys.append( (F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") ) rename_keys.append( (F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") ) rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") ) rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") ) rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") ) rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") ) rename_keys.append( (F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") ) rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") ) rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") ) rename_keys.append( (F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") ) rename_keys.append( (F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") ) rename_keys.append(('top_conv/kernel:0', 'encoder.top_conv.weight') ) rename_keys.append(('top_bn/gamma:0', 'encoder.top_bn.weight') ) rename_keys.append(('top_bn/beta:0', 'encoder.top_bn.bias') ) rename_keys.append(('top_bn/moving_mean:0', 'encoder.top_bn.running_mean') ) rename_keys.append(('top_bn/moving_variance:0', 'encoder.top_bn.running_var') ) lowercase__ = {} for item in rename_keys: if item[0] in original_param_names: lowercase__ = 'efficientnet.' + item[1] lowercase__ = 'classifier.weight' lowercase__ = 'classifier.bias' return key_mapping def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: for key, value in tf_params.items(): if "normalization" in key: continue lowercase__ = key_mapping[key] if "_conv" in key and "kernel" in key: lowercase__ = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: lowercase__ = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: lowercase__ = torch.from_numpy(np.transpose(_SCREAMING_SNAKE_CASE ) ) else: lowercase__ = torch.from_numpy(_SCREAMING_SNAKE_CASE ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(_SCREAMING_SNAKE_CASE ) @torch.no_grad() def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: lowercase__ = model_classes[model_name]( include_top=_SCREAMING_SNAKE_CASE , weights='imagenet' , input_tensor=_SCREAMING_SNAKE_CASE , input_shape=_SCREAMING_SNAKE_CASE , pooling=_SCREAMING_SNAKE_CASE , classes=1000 , classifier_activation='softmax' , ) lowercase__ = original_model.trainable_variables lowercase__ = original_model.non_trainable_variables lowercase__ = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: lowercase__ = param.numpy() lowercase__ = list(tf_params.keys() ) # Load HuggingFace model lowercase__ = get_efficientnet_config(_SCREAMING_SNAKE_CASE ) lowercase__ = EfficientNetForImageClassification(_SCREAMING_SNAKE_CASE ).eval() lowercase__ = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print('Converting parameters...' ) lowercase__ = rename_keys(_SCREAMING_SNAKE_CASE ) replace_params(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Initialize preprocessor and preprocess input image lowercase__ = convert_image_processor(_SCREAMING_SNAKE_CASE ) lowercase__ = preprocessor(images=prepare_img() , return_tensors='pt' ) # HF model inference hf_model.eval() with torch.no_grad(): lowercase__ = hf_model(**_SCREAMING_SNAKE_CASE ) lowercase__ = outputs.logits.detach().numpy() # Original model inference lowercase__ = False lowercase__ = CONFIG_MAP[model_name]['image_size'] lowercase__ = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) lowercase__ = image.img_to_array(_SCREAMING_SNAKE_CASE ) lowercase__ = np.expand_dims(_SCREAMING_SNAKE_CASE , axis=0 ) lowercase__ = original_model.predict(_SCREAMING_SNAKE_CASE ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ), "The predicted logits are not the same." print('Model outputs match!' ) if save_model: # Create folder to save model if not os.path.isdir(_SCREAMING_SNAKE_CASE ): os.mkdir(_SCREAMING_SNAKE_CASE ) # Save converted model and image processor hf_model.save_pretrained(_SCREAMING_SNAKE_CASE ) preprocessor.save_pretrained(_SCREAMING_SNAKE_CASE ) if push_to_hub: # Push model and image processor to hub print(F"""Pushing converted {model_name} to the hub...""" ) lowercase__ = F"""efficientnet-{model_name}""" preprocessor.push_to_hub(_SCREAMING_SNAKE_CASE ) hf_model.push_to_hub(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""b0""", type=str, help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""hf_model""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""") parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") lowercase_ = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
45
1
from typing import Dict, Iterable, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): import PIL # soft dependency if is_pytesseract_available(): import pytesseract lowercase_ = logging.get_logger(__name__) def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]: return [ int(1000 * (box[0] / width) ), int(1000 * (box[1] / height) ), int(1000 * (box[2] / width) ), int(1000 * (box[3] / height) ), ] def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]: lowercase__ = to_pil_image(_SCREAMING_SNAKE_CASE ) lowercase__ , lowercase__ = pil_image.size lowercase__ = pytesseract.image_to_data(_SCREAMING_SNAKE_CASE , lang=_SCREAMING_SNAKE_CASE , output_type='dict' , config=_SCREAMING_SNAKE_CASE ) lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = data['text'], data['left'], data['top'], data['width'], data['height'] # filter empty words and corresponding coordinates lowercase__ = [idx for idx, word in enumerate(_SCREAMING_SNAKE_CASE ) if not word.strip()] lowercase__ = [word for idx, word in enumerate(_SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices] lowercase__ = [coord for idx, coord in enumerate(_SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices] lowercase__ = [coord for idx, coord in enumerate(_SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices] lowercase__ = [coord for idx, coord in enumerate(_SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices] lowercase__ = [coord for idx, coord in enumerate(_SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format lowercase__ = [] for x, y, w, h in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): lowercase__ = [x, y, x + w, y + h] actual_boxes.append(_SCREAMING_SNAKE_CASE ) # finally, normalize the bounding boxes lowercase__ = [] for box in actual_boxes: normalized_boxes.append(normalize_box(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) assert len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ), "Not as many words as there are bounding boxes" return words, normalized_boxes class SCREAMING_SNAKE_CASE (UpperCAmelCase ): _UpperCamelCase : List[str] = ['pixel_values'] def __init__( self : List[str] , a : bool = True , a : Dict[str, int] = None , a : PILImageResampling = PILImageResampling.BILINEAR , a : bool = True , a : float = 1 / 255 , a : bool = True , a : Union[float, Iterable[float]] = None , a : Union[float, Iterable[float]] = None , a : bool = True , a : Optional[str] = None , a : Optional[str] = "" , **a : Any , )-> None: """simple docstring""" super().__init__(**a ) lowercase__ = size if size is not None else {'height': 224, 'width': 224} lowercase__ = get_size_dict(a ) lowercase__ = do_resize lowercase__ = size lowercase__ = resample lowercase__ = do_rescale lowercase__ = rescale_value lowercase__ = do_normalize lowercase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowercase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD lowercase__ = apply_ocr lowercase__ = ocr_lang lowercase__ = tesseract_config def SCREAMING_SNAKE_CASE_ ( self : str , a : np.ndarray , a : Dict[str, int] , a : PILImageResampling = PILImageResampling.BILINEAR , a : Optional[Union[str, ChannelDimension]] = None , **a : List[Any] , )-> np.ndarray: """simple docstring""" lowercase__ = get_size_dict(a ) if "height" not in size or "width" not in size: raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" ) lowercase__ = (size['height'], size['width']) return resize(a , size=a , resample=a , data_format=a , **a ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : np.ndarray , a : Union[int, float] , a : Optional[Union[str, ChannelDimension]] = None , **a : Union[str, Any] , )-> np.ndarray: """simple docstring""" return rescale(a , scale=a , data_format=a , **a ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : np.ndarray , a : Union[float, Iterable[float]] , a : Union[float, Iterable[float]] , a : Optional[Union[str, ChannelDimension]] = None , **a : Tuple , )-> np.ndarray: """simple docstring""" return normalize(a , mean=a , std=a , data_format=a , **a ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : ImageInput , a : bool = None , a : Dict[str, int] = None , a : List[str]=None , a : bool = None , a : float = None , a : bool = None , a : Union[float, Iterable[float]] = None , a : Union[float, Iterable[float]] = None , a : bool = None , a : Optional[str] = None , a : Optional[str] = None , a : Optional[Union[str, TensorType]] = None , a : ChannelDimension = ChannelDimension.FIRST , **a : int , )-> PIL.Image.Image: """simple docstring""" lowercase__ = do_resize if do_resize is not None else self.do_resize lowercase__ = size if size is not None else self.size lowercase__ = get_size_dict(a ) lowercase__ = resample if resample is not None else self.resample lowercase__ = do_rescale if do_rescale is not None else self.do_rescale lowercase__ = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase__ = do_normalize if do_normalize is not None else self.do_normalize lowercase__ = image_mean if image_mean is not None else self.image_mean lowercase__ = image_std if image_std is not None else self.image_std lowercase__ = apply_ocr if apply_ocr is not None else self.apply_ocr lowercase__ = ocr_lang if ocr_lang is not None else self.ocr_lang lowercase__ = tesseract_config if tesseract_config is not None else self.tesseract_config lowercase__ = make_list_of_images(a ) if not valid_images(a ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('If do_normalize is True, image_mean and image_std must be specified.' ) # All transformations expect numpy arrays. lowercase__ = [to_numpy_array(a ) for image in images] # Tesseract OCR to get words + normalized bounding boxes if apply_ocr: requires_backends(self , 'pytesseract' ) lowercase__ = [] lowercase__ = [] for image in images: lowercase__ , lowercase__ = apply_tesseract(a , a , a ) words_batch.append(a ) boxes_batch.append(a ) if do_resize: lowercase__ = [self.resize(image=a , size=a , resample=a ) for image in images] if do_rescale: lowercase__ = [self.rescale(image=a , scale=a ) for image in images] if do_normalize: lowercase__ = [self.normalize(image=a , mean=a , std=a ) for image in images] lowercase__ = [to_channel_dimension_format(a , a ) for image in images] lowercase__ = BatchFeature(data={'pixel_values': images} , tensor_type=a ) if apply_ocr: lowercase__ = words_batch lowercase__ = boxes_batch return data
45
import argparse import json import subprocess def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]: lowercase__ = [] lowercase__ = ( F"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\"""" ' https://api.github.com/repos/huggingface/transformers/actions/runners' ) lowercase__ = subprocess.run(_SCREAMING_SNAKE_CASE , shell=_SCREAMING_SNAKE_CASE , stdout=subprocess.PIPE ) lowercase__ = output.stdout.decode('utf-8' ) lowercase__ = json.loads(_SCREAMING_SNAKE_CASE ) lowercase__ = status['runners'] for runner in runners: if runner["name"] in target_runners: if runner["status"] == "offline": offline_runners.append(_SCREAMING_SNAKE_CASE ) # save the result so we can report them on Slack with open('offline_runners.txt' , 'w' ) as fp: fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) ) if len(_SCREAMING_SNAKE_CASE ) > 0: lowercase__ = '\n'.join([x['name'] for x in offline_runners] ) raise ValueError(F"""The following runners are offline:\n{failed}""" ) if __name__ == "__main__": def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str: return values.split(',' ) lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--target_runners""", default=None, type=list_str, required=True, help="""Comma-separated list of runners to check status.""", ) parser.add_argument( """--token""", default=None, type=str, required=True, help="""A token that has actions:read permission.""" ) lowercase_ = parser.parse_args() get_runner_status(args.target_runners, args.token)
45
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { """sayakpaul/vit-msn-base""": """https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json""", # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class SCREAMING_SNAKE_CASE (UpperCAmelCase ): _UpperCamelCase : Dict = 'vit_msn' def __init__( self : Union[str, Any] , a : List[Any]=768 , a : Optional[Any]=12 , a : Optional[int]=12 , a : str=3_072 , a : Tuple="gelu" , a : Dict=0.0 , a : Dict=0.0 , a : Optional[Any]=0.02 , a : Any=1E-0_6 , a : List[Any]=224 , a : Union[str, Any]=16 , a : Tuple=3 , a : List[Any]=True , **a : Any , )-> Tuple: """simple docstring""" super().__init__(**a ) lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = intermediate_size lowercase__ = hidden_act lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = initializer_range lowercase__ = layer_norm_eps lowercase__ = image_size lowercase__ = patch_size lowercase__ = num_channels lowercase__ = qkv_bias
45
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class SCREAMING_SNAKE_CASE (UpperCAmelCase ): _UpperCamelCase : Tuple = 'ClapFeatureExtractor' _UpperCamelCase : Union[str, Any] = ('RobertaTokenizer', 'RobertaTokenizerFast') def __init__( self : List[Any] , a : int , a : str )-> Any: """simple docstring""" super().__init__(a , a ) def __call__( self : Any , a : Tuple=None , a : Optional[int]=None , a : int=None , **a : Optional[int] )-> Union[str, Any]: """simple docstring""" lowercase__ = kwargs.pop('sampling_rate' , a ) if text is None and audios is None: raise ValueError('You have to specify either text or audios. Both cannot be none.' ) if text is not None: lowercase__ = self.tokenizer(a , return_tensors=a , **a ) if audios is not None: lowercase__ = self.feature_extractor( a , sampling_rate=a , return_tensors=a , **a ) if text is not None and audios is not None: lowercase__ = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**a ) , tensor_type=a ) def SCREAMING_SNAKE_CASE_ ( self : str , *a : Dict , **a : int )-> Optional[int]: """simple docstring""" return self.tokenizer.batch_decode(*a , **a ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] , *a : int , **a : Dict )-> Dict: """simple docstring""" return self.tokenizer.decode(*a , **a ) @property def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Optional[int]: """simple docstring""" lowercase__ = self.tokenizer.model_input_names lowercase__ = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
45
1
import warnings from ...utils import logging from .image_processing_flava import FlavaImageProcessor lowercase_ = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE (UpperCAmelCase ): def __init__( self : Optional[Any] , *a : Optional[int] , **a : List[Any] )-> None: """simple docstring""" warnings.warn( 'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use FlavaImageProcessor instead.' , a , ) super().__init__(*a , **a )
45
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_barthez import BarthezTokenizer else: lowercase_ = None lowercase_ = logging.get_logger(__name__) lowercase_ = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""} lowercase_ = { """vocab_file""": { """moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""", """moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""", """moussaKam/barthez-orangesum-title""": ( """https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model""" ), }, """tokenizer_file""": { """moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json""", """moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json""", """moussaKam/barthez-orangesum-title""": ( """https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json""" ), }, } lowercase_ = { """moussaKam/mbarthez""": 1_024, """moussaKam/barthez""": 1_024, """moussaKam/barthez-orangesum-title""": 1_024, } lowercase_ = """▁""" class SCREAMING_SNAKE_CASE (UpperCAmelCase ): _UpperCamelCase : Dict = VOCAB_FILES_NAMES _UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase : Optional[Any] = ['input_ids', 'attention_mask'] _UpperCamelCase : int = BarthezTokenizer def __init__( self : List[Any] , a : Union[str, Any]=None , a : Optional[Any]=None , a : Dict="<s>" , a : Union[str, Any]="</s>" , a : List[str]="</s>" , a : Optional[Any]="<s>" , a : int="<unk>" , a : str="<pad>" , a : Optional[int]="<mask>" , **a : Union[str, Any] , )-> Tuple: """simple docstring""" lowercase__ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token super().__init__( a , tokenizer_file=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , **a , ) lowercase__ = vocab_file lowercase__ = False if not self.vocab_file else True def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : List[int] , a : Optional[List[int]] = None )-> List[int]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowercase__ = [self.cls_token_id] lowercase__ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : List[int] , a : Optional[List[int]] = None )-> List[int]: """simple docstring""" lowercase__ = [self.sep_token_id] lowercase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def SCREAMING_SNAKE_CASE_ ( self : Dict , a : str , a : Optional[str] = None )-> Tuple[str]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(a ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowercase__ = os.path.join( a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(a ): copyfile(self.vocab_file , a ) return (out_vocab_file,)
45
1
import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]: lowercase__ = XCLIPTextConfig() # derive patch size from model name lowercase__ = model_name.find('patch' ) lowercase__ = int(model_name[start_idx + len('patch' ) : start_idx + len('patch' ) + 2] ) lowercase__ = XCLIPVisionConfig(patch_size=_SCREAMING_SNAKE_CASE , num_frames=_SCREAMING_SNAKE_CASE ) if "large" in model_name: lowercase__ = 768 lowercase__ = 3072 lowercase__ = 12 lowercase__ = 1024 lowercase__ = 4096 lowercase__ = 16 lowercase__ = 24 lowercase__ = 768 lowercase__ = 3072 if model_name == "xclip-large-patch14-16-frames": lowercase__ = 336 lowercase__ = XCLIPConfig.from_text_vision_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if "large" in model_name: lowercase__ = 768 return config def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Union[str, Any]: # text encoder if name == "token_embedding.weight": lowercase__ = name.replace('token_embedding.weight' , 'text_model.embeddings.token_embedding.weight' ) if name == "positional_embedding": lowercase__ = name.replace('positional_embedding' , 'text_model.embeddings.position_embedding.weight' ) if "ln_1" in name: lowercase__ = name.replace('ln_1' , 'layer_norm1' ) if "ln_2" in name: lowercase__ = name.replace('ln_2' , 'layer_norm2' ) if "c_fc" in name: lowercase__ = name.replace('c_fc' , 'fc1' ) if "c_proj" in name: lowercase__ = name.replace('c_proj' , 'fc2' ) if name.startswith('transformer.resblocks' ): lowercase__ = name.replace('transformer.resblocks' , 'text_model.encoder.layers' ) if "attn.out_proj" in name and "message" not in name: lowercase__ = name.replace('attn.out_proj' , 'self_attn.out_proj' ) if "ln_final" in name: lowercase__ = name.replace('ln_final' , 'text_model.final_layer_norm' ) # visual encoder if name == "visual.class_embedding": lowercase__ = name.replace('visual.class_embedding' , 'vision_model.embeddings.class_embedding' ) if name == "visual.positional_embedding": lowercase__ = name.replace('visual.positional_embedding' , 'vision_model.embeddings.position_embedding.weight' ) if name.startswith('visual.transformer.resblocks' ): lowercase__ = name.replace('visual.transformer.resblocks' , 'vision_model.encoder.layers' ) if "visual.conv1" in name: lowercase__ = name.replace('visual.conv1' , 'vision_model.embeddings.patch_embedding' ) if "visual.ln_pre" in name: lowercase__ = name.replace('visual.ln_pre' , 'vision_model.pre_layernorm' ) if "visual.ln_post" in name: lowercase__ = name.replace('visual.ln_post' , 'vision_model.post_layernorm' ) if "visual.proj" in name: lowercase__ = name.replace('visual.proj' , 'visual_projection.weight' ) if "text_projection" in name: lowercase__ = name.replace('text_projection' , 'text_projection.weight' ) # things on top if "prompts_visual_proj" in name: lowercase__ = name.replace('prompts_visual_proj' , 'prompts_visual_projection' ) if "prompts_visual_ln" in name: lowercase__ = name.replace('prompts_visual_ln' , 'prompts_visual_layernorm' ) # mit if name == "mit.positional_embedding": lowercase__ = name.replace('positional' , 'position' ) if name.startswith('mit.resblocks' ): lowercase__ = name.replace('mit.resblocks' , 'mit.encoder.layers' ) # prompts generator if name.startswith('prompts_generator.norm' ): lowercase__ = name.replace('prompts_generator.norm' , 'prompts_generator.layernorm' ) return name def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]: for key in orig_state_dict.copy().keys(): lowercase__ = orig_state_dict.pop(_SCREAMING_SNAKE_CASE ) if "attn.in_proj" in key: lowercase__ = key.split('.' ) if key.startswith('visual' ): lowercase__ = key_split[3] lowercase__ = config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: lowercase__ = val[ :dim, : ] lowercase__ = val[ dim : dim * 2, : ] lowercase__ = val[ -dim:, : ] else: lowercase__ = val[ :dim ] lowercase__ = val[ dim : dim * 2 ] lowercase__ = val[ -dim: ] else: if "weight" in key: lowercase__ = val[ :dim, : ] lowercase__ = val[ dim : dim * 2, : ] lowercase__ = val[ -dim:, : ] else: lowercase__ = val[:dim] lowercase__ = val[ dim : dim * 2 ] lowercase__ = val[-dim:] elif key.startswith('mit' ): lowercase__ = key_split[2] lowercase__ = config.vision_config.mit_hidden_size if "weight" in key: lowercase__ = val[:dim, :] lowercase__ = val[dim : dim * 2, :] lowercase__ = val[-dim:, :] else: lowercase__ = val[:dim] lowercase__ = val[dim : dim * 2] lowercase__ = val[-dim:] else: lowercase__ = key_split[2] lowercase__ = config.text_config.hidden_size if "weight" in key: lowercase__ = val[:dim, :] lowercase__ = val[ dim : dim * 2, : ] lowercase__ = val[-dim:, :] else: lowercase__ = val[:dim] lowercase__ = val[ dim : dim * 2 ] lowercase__ = val[-dim:] else: lowercase__ = rename_key(_SCREAMING_SNAKE_CASE ) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: lowercase__ = val.T lowercase__ = val return orig_state_dict def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str: if num_frames == 8: lowercase__ = 'eating_spaghetti_8_frames.npy' elif num_frames == 16: lowercase__ = 'eating_spaghetti.npy' elif num_frames == 32: lowercase__ = 'eating_spaghetti_32_frames.npy' lowercase__ = hf_hub_download( repo_id='hf-internal-testing/spaghetti-video' , filename=_SCREAMING_SNAKE_CASE , repo_type='dataset' , ) lowercase__ = np.load(_SCREAMING_SNAKE_CASE ) return list(_SCREAMING_SNAKE_CASE ) def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False ) -> List[str]: lowercase__ = { # fully supervised kinetics-400 checkpoints 'xclip-base-patch32': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth', 'xclip-base-patch32-16-frames': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth' ), 'xclip-base-patch16': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth', 'xclip-base-patch16-16-frames': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth' ), 'xclip-large-patch14': 'https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&amp;export=download&amp;confirm=t&amp;uuid=b26caedc-88e2-473e-830a-9d158b653cdb', 'xclip-large-patch14-16-frames': 'https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&amp;export=download&amp;confirm=t&amp;uuid=538fa810-e671-4050-b385-9a623f89804f', # fully supervised kinetics-600 checkpoints 'xclip-base-patch16-kinetics-600': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth' ), 'xclip-base-patch16-kinetics-600-16-frames': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth' ), 'xclip-large-patch14-kinetics-600': 'https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&amp;export=download&amp;confirm=t&amp;uuid=141d4977-4a65-44ae-864f-4b0c19f838be', # few shot 'xclip-base-patch16-hmdb-2-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth' ), 'xclip-base-patch16-hmdb-4-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth' ), 'xclip-base-patch16-hmdb-8-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth' ), 'xclip-base-patch16-hmdb-16-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth' ), 'xclip-base-patch16-ucf-2-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth' ), 'xclip-base-patch16-ucf-4-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth' ), 'xclip-base-patch16-ucf-8-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth' ), 'xclip-base-patch16-ucf-16-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth' ), # zero shot 'xclip-base-patch16-zero-shot': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth', } lowercase__ = model_to_url[model_name] lowercase__ = 8 if "16-frames" in model_name: lowercase__ = 16 elif "shot" in model_name: lowercase__ = 32 lowercase__ = get_xclip_config(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) lowercase__ = XCLIPModel(_SCREAMING_SNAKE_CASE ) model.eval() if "drive" in checkpoint_url: lowercase__ = 'pytorch_model.bin' gdown.cached_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , quiet=_SCREAMING_SNAKE_CASE ) lowercase__ = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )['model'] else: lowercase__ = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE )['model'] lowercase__ = convert_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) lowercase__ = XCLIPModel(_SCREAMING_SNAKE_CASE ) lowercase__ , lowercase__ = model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE ) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() lowercase__ = 336 if model_name == 'xclip-large-patch14-16-frames' else 224 lowercase__ = VideoMAEImageProcessor(size=_SCREAMING_SNAKE_CASE ) lowercase__ = CLIPTokenizer.from_pretrained('openai/clip-vit-base-patch32' ) lowercase__ = CLIPTokenizerFast.from_pretrained('openai/clip-vit-base-patch32' ) lowercase__ = XCLIPProcessor(image_processor=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE ) lowercase__ = prepare_video(_SCREAMING_SNAKE_CASE ) lowercase__ = processor( text=['playing sports', 'eating spaghetti', 'go shopping'] , videos=_SCREAMING_SNAKE_CASE , return_tensors='pt' , padding=_SCREAMING_SNAKE_CASE ) print('Shape of pixel values:' , inputs.pixel_values.shape ) with torch.no_grad(): lowercase__ = model(**_SCREAMING_SNAKE_CASE ) # Verify outputs lowercase__ = outputs.logits_per_video lowercase__ = logits_per_video.softmax(dim=1 ) print('Probs:' , _SCREAMING_SNAKE_CASE ) # kinetics-400 if model_name == "xclip-base-patch32": lowercase__ = torch.tensor([[0.0_0_1_9, 0.9_9_5_1, 0.0_0_3_0]] ) elif model_name == "xclip-base-patch32-16-frames": lowercase__ = torch.tensor([[7.0_999E-04, 9.9_883E-01, 4.5_580E-04]] ) elif model_name == "xclip-base-patch16": lowercase__ = torch.tensor([[0.0_0_8_3, 0.9_6_8_1, 0.0_2_3_6]] ) elif model_name == "xclip-base-patch16-16-frames": lowercase__ = torch.tensor([[7.6_937E-04, 9.9_728E-01, 1.9_473E-03]] ) elif model_name == "xclip-large-patch14": lowercase__ = torch.tensor([[0.0_0_6_2, 0.9_8_6_4, 0.0_0_7_5]] ) elif model_name == "xclip-large-patch14-16-frames": lowercase__ = torch.tensor([[3.3_877E-04, 9.9_937E-01, 2.8_888E-04]] ) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": lowercase__ = torch.tensor([[0.0_5_5_5, 0.8_9_1_4, 0.0_5_3_1]] ) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": lowercase__ = torch.tensor([[3.8_554E-04, 9.9_929E-01, 3.2_754E-04]] ) elif model_name == "xclip-large-patch14-kinetics-600": lowercase__ = torch.tensor([[0.0_0_3_6, 0.9_9_2_0, 0.0_0_4_5]] ) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": lowercase__ = torch.tensor([[7.1_890E-06, 9.9_994E-01, 5.6_559E-05]] ) elif model_name == "xclip-base-patch16-hmdb-4-shot": lowercase__ = torch.tensor([[1.0_320E-05, 9.9_993E-01, 6.2_435E-05]] ) elif model_name == "xclip-base-patch16-hmdb-8-shot": lowercase__ = torch.tensor([[4.1_377E-06, 9.9_990E-01, 9.8_386E-05]] ) elif model_name == "xclip-base-patch16-hmdb-16-shot": lowercase__ = torch.tensor([[4.1_347E-05, 9.9_962E-01, 3.3_411E-04]] ) elif model_name == "xclip-base-patch16-ucf-2-shot": lowercase__ = torch.tensor([[8.5_857E-05, 9.9_928E-01, 6.3_291E-04]] ) elif model_name == "xclip-base-patch16-ucf-4-shot": lowercase__ = torch.tensor([[8.5_857E-05, 9.9_928E-01, 6.3_291E-04]] ) elif model_name == "xclip-base-patch16-ucf-8-shot": lowercase__ = torch.tensor([[0.0_0_2_7, 0.9_9_0_4, 0.0_0_7_0]] ) elif model_name == "xclip-base-patch16-ucf-16-shot": lowercase__ = torch.tensor([[9.8_219E-04, 9.9_593E-01, 3.0_863E-03]] ) # zero shot elif model_name == "xclip-base-patch16-zero-shot": lowercase__ = torch.tensor([[3.5_082E-04, 9.9_785E-01, 1.7_966E-03]] ) else: raise ValueError(F"""Model name {model_name} not supported""" ) assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) if push_to_hub: print('Pushing model, processor and slow tokenizer files to the hub...' ) model.push_to_hub(_SCREAMING_SNAKE_CASE , organization='nielsr' ) processor.push_to_hub(_SCREAMING_SNAKE_CASE , organization='nielsr' ) slow_tokenizer.push_to_hub(_SCREAMING_SNAKE_CASE , organization='nielsr' ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""xclip-base-patch32""", type=str, help="""Name of the model.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) lowercase_ = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
45
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionSAGPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class SCREAMING_SNAKE_CASE (UpperCAmelCase , UpperCAmelCase , unittest.TestCase ): _UpperCamelCase : List[Any] = StableDiffusionSAGPipeline _UpperCamelCase : str = TEXT_TO_IMAGE_PARAMS _UpperCamelCase : Dict = TEXT_TO_IMAGE_BATCH_PARAMS _UpperCamelCase : str = TEXT_TO_IMAGE_IMAGE_PARAMS _UpperCamelCase : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS _UpperCamelCase : Union[str, Any] = False def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Dict: """simple docstring""" torch.manual_seed(0 ) lowercase__ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) lowercase__ = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=a , set_alpha_to_one=a , ) torch.manual_seed(0 ) lowercase__ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) torch.manual_seed(0 ) lowercase__ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) lowercase__ = CLIPTextModel(a ) lowercase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) lowercase__ = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Optional[int] , a : Any=0 )-> Union[str, Any]: """simple docstring""" if str(a ).startswith('mps' ): lowercase__ = torch.manual_seed(a ) else: lowercase__ = torch.Generator(device=a ).manual_seed(a ) lowercase__ = { 'prompt': '.', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 1.0, 'sag_scale': 1.0, 'output_type': 'numpy', } return inputs def SCREAMING_SNAKE_CASE_ ( self : Any )-> Any: """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class SCREAMING_SNAKE_CASE (unittest.TestCase ): def SCREAMING_SNAKE_CASE_ ( self : str )-> int: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]: """simple docstring""" lowercase__ = StableDiffusionSAGPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' ) lowercase__ = sag_pipe.to(a ) sag_pipe.set_progress_bar_config(disable=a ) lowercase__ = '.' lowercase__ = torch.manual_seed(0 ) lowercase__ = sag_pipe( [prompt] , generator=a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' ) lowercase__ = output.images lowercase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowercase__ = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2 def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]: """simple docstring""" lowercase__ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' ) lowercase__ = sag_pipe.to(a ) sag_pipe.set_progress_bar_config(disable=a ) lowercase__ = '.' lowercase__ = torch.manual_seed(0 ) lowercase__ = sag_pipe( [prompt] , generator=a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' ) lowercase__ = output.images lowercase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowercase__ = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2 def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> List[Any]: """simple docstring""" lowercase__ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' ) lowercase__ = sag_pipe.to(a ) sag_pipe.set_progress_bar_config(disable=a ) lowercase__ = '.' lowercase__ = torch.manual_seed(0 ) lowercase__ = sag_pipe( [prompt] , width=768 , height=512 , generator=a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' , ) lowercase__ = output.images assert image.shape == (1, 512, 768, 3)
45
1
from datetime import datetime import requests def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> bytes: lowercase__ = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url=' lowercase__ = requests.get(base_url + url ).json()[0]['urls'][0]['src'] return requests.get(_SCREAMING_SNAKE_CASE ).content if __name__ == "__main__": lowercase_ = input("""Enter Video/IGTV url: """).strip() lowercase_ = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4''' with open(file_name, """wb""") as fp: fp.write(download_video(url)) print(f'''Done. Video saved to disk as {file_name}.''')
45
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { """facebook/deit-base-distilled-patch16-224""": ( """https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json""" ), # See all DeiT models at https://huggingface.co/models?filter=deit } class SCREAMING_SNAKE_CASE (UpperCAmelCase ): _UpperCamelCase : Any = 'deit' def __init__( self : Any , a : Union[str, Any]=768 , a : Optional[Any]=12 , a : Union[str, Any]=12 , a : Optional[int]=3_072 , a : Optional[int]="gelu" , a : Optional[Any]=0.0 , a : List[Any]=0.0 , a : int=0.02 , a : List[str]=1E-1_2 , a : Optional[int]=224 , a : Tuple=16 , a : List[Any]=3 , a : List[str]=True , a : Any=16 , **a : Union[str, Any] , )-> int: """simple docstring""" super().__init__(**a ) lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = intermediate_size lowercase__ = hidden_act lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = initializer_range lowercase__ = layer_norm_eps lowercase__ = image_size lowercase__ = patch_size lowercase__ = num_channels lowercase__ = qkv_bias lowercase__ = encoder_stride class SCREAMING_SNAKE_CASE (UpperCAmelCase ): _UpperCamelCase : List[Any] = version.parse('1.11' ) @property def SCREAMING_SNAKE_CASE_ ( self : int )-> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def SCREAMING_SNAKE_CASE_ ( self : Any )-> float: """simple docstring""" return 1E-4
45
1
from sympy import diff, lambdify, symbols from sympy.functions import * # noqa: F403 def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = "x" , _SCREAMING_SNAKE_CASE = 10**-10 , _SCREAMING_SNAKE_CASE = 1 , ) -> complex: lowercase__ = symbols(_SCREAMING_SNAKE_CASE ) lowercase__ = lambdify(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) lowercase__ = lambdify(_SCREAMING_SNAKE_CASE , diff(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) lowercase__ = starting_point while True: if diff_function(_SCREAMING_SNAKE_CASE ) != 0: lowercase__ = prev_guess - multiplicity * func(_SCREAMING_SNAKE_CASE ) / diff_function( _SCREAMING_SNAKE_CASE ) else: raise ZeroDivisionError('Could not find root' ) from None # Precision is checked by comparing the difference of consecutive guesses if abs(next_guess - prev_guess ) < precision: return next_guess lowercase__ = next_guess # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(f'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''') # Find root of polynomial # Find fourth Root of 5 print(f'''The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}''') # Find value of e print( """The root of log(y) - 1 = 0 is """, f'''{newton_raphson("log(y) - 1", 2, variable="y")}''', ) # Exponential Roots print( """The root of exp(x) - 1 = 0 is""", f'''{newton_raphson("exp(x) - 1", 10, precision=0.005)}''', ) # Find root of cos(x) print(f'''The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}''')
45
import os import zipfile import requests from get_ci_error_statistics import download_artifact, get_artifacts_links def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 ) -> List[Any]: lowercase__ = None if token is not None: lowercase__ = {'Accept': 'application/vnd.github+json', 'Authorization': F"""Bearer {token}"""} # The id of a workflow (not of a workflow run) lowercase__ = '636036' lowercase__ = F"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs""" # On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results url += F"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}""" lowercase__ = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json() return result["workflow_runs"] def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Union[str, Any]: lowercase__ = get_daily_ci_runs(_SCREAMING_SNAKE_CASE ) lowercase__ = None for workflow_run in workflow_runs: if workflow_run["status"] == "completed": lowercase__ = workflow_run['id'] break return workflow_run_id def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: lowercase__ = get_last_daily_ci_runs(_SCREAMING_SNAKE_CASE ) if workflow_run_id is not None: lowercase__ = get_artifacts_links(worflow_run_id=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE ) for artifact_name in artifact_names: if artifact_name in artifacts_links: lowercase__ = artifacts_links[artifact_name] download_artifact( artifact_name=_SCREAMING_SNAKE_CASE , artifact_url=_SCREAMING_SNAKE_CASE , output_dir=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE ) def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict: get_last_daily_ci_artifacts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) lowercase__ = {} for artifact_name in artifact_names: lowercase__ = os.path.join(_SCREAMING_SNAKE_CASE , F"""{artifact_name}.zip""" ) if os.path.isfile(_SCREAMING_SNAKE_CASE ): lowercase__ = {} with zipfile.ZipFile(_SCREAMING_SNAKE_CASE ) as z: for filename in z.namelist(): if not os.path.isdir(_SCREAMING_SNAKE_CASE ): # read the file with z.open(_SCREAMING_SNAKE_CASE ) as f: lowercase__ = f.read().decode('UTF-8' ) return results
45
1
from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging lowercase_ = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE (UpperCAmelCase ): _UpperCamelCase : Optional[Any] = ['input_features', 'attention_mask'] def __init__( self : Tuple , a : List[str]=80 , a : List[Any]=16_000 , a : Any=80 , a : str=0.0 , a : List[Any]=True , a : List[str]=True , a : int=True , **a : str , )-> int: """simple docstring""" super().__init__(feature_size=a , sampling_rate=a , padding_value=a , **a ) lowercase__ = num_mel_bins lowercase__ = do_ceptral_normalize lowercase__ = normalize_means lowercase__ = normalize_vars lowercase__ = True def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : np.ndarray , )-> np.ndarray: """simple docstring""" lowercase__ = waveform * (2**15) # Kaldi compliance: 16-bit signed integers lowercase__ = torch.from_numpy(a ).unsqueeze(0 ) lowercase__ = ta_kaldi.fbank(a , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate ) return features.numpy() @staticmethod def SCREAMING_SNAKE_CASE_ ( a : np.ndarray , a : int , a : Optional[bool] = True , a : Optional[bool] = True , a : float = 0.0 , )-> np.ndarray: """simple docstring""" if normalize_means: lowercase__ = x[:input_length].mean(axis=0 ) lowercase__ = np.subtract(a , a ) if normalize_vars: lowercase__ = x[:input_length].std(axis=0 ) lowercase__ = np.divide(a , a ) if input_length < x.shape[0]: lowercase__ = padding_value # make sure array is in float32 lowercase__ = x.astype(np.floataa ) return x def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : List[np.ndarray] , a : Optional[np.ndarray] = None )-> List[np.ndarray]: """simple docstring""" lowercase__ = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(a , a , self.normalize_means , self.normalize_vars , self.padding_value ) for x, n in zip(a , a ) ] def __call__( self : Tuple , a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , a : Union[bool, str, PaddingStrategy] = False , a : Optional[int] = None , a : bool = False , a : Optional[int] = None , a : Optional[Union[str, TensorType]] = None , a : Optional[int] = None , a : Optional[bool] = None , **a : List[Any] , )-> BatchFeature: """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of""" f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with""" f""" {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) lowercase__ = isinstance(a , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) lowercase__ = is_batched_numpy or ( isinstance(a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: lowercase__ = [np.asarray(a , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(a , np.ndarray ): lowercase__ = np.asarray(a , dtype=np.floataa ) elif isinstance(a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): lowercase__ = raw_speech.astype(np.floataa ) # always return batch if not is_batched: lowercase__ = [raw_speech] # extract fbank features lowercase__ = [self._extract_fbank_features(a ) for waveform in raw_speech] # convert into correct format for padding lowercase__ = BatchFeature({'input_features': features} ) lowercase__ = self.pad( a , padding=a , max_length=a , truncation=a , pad_to_multiple_of=a , return_attention_mask=a , **a , ) # make sure list is in array format lowercase__ = padded_inputs.get('input_features' ) if isinstance(input_features[0] , a ): lowercase__ = [np.asarray(a , dtype=np.floataa ) for feature in input_features] lowercase__ = padded_inputs.get('attention_mask' ) if attention_mask is not None: lowercase__ = [np.asarray(a , dtype=np.intaa ) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: lowercase__ = ( np.array(a , dtype=np.intaa ) if self._get_padding_strategies(a , max_length=a ) is not PaddingStrategy.DO_NOT_PAD else None ) lowercase__ = self.normalize( padded_inputs['input_features'] , attention_mask=a ) if return_tensors is not None: lowercase__ = padded_inputs.convert_to_tensors(a ) return padded_inputs
45
import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device lowercase_ = False class SCREAMING_SNAKE_CASE (unittest.TestCase ): pass @nightly @require_torch_gpu class SCREAMING_SNAKE_CASE (unittest.TestCase ): def SCREAMING_SNAKE_CASE_ ( self : str )-> Optional[int]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Union[str, Any]: """simple docstring""" lowercase__ = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa ) pipe.to(a ) pipe.set_progress_bar_config(disable=a ) lowercase__ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' ) lowercase__ = torch.manual_seed(0 ) lowercase__ = pipe.dual_guided( prompt='first prompt' , image=a , text_to_image_strength=0.75 , generator=a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(a ) lowercase__ = VersatileDiffusionPipeline.from_pretrained(a , torch_dtype=torch.floataa ) pipe.to(a ) pipe.set_progress_bar_config(disable=a ) lowercase__ = generator.manual_seed(0 ) lowercase__ = pipe.dual_guided( prompt='first prompt' , image=a , text_to_image_strength=0.75 , generator=a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass" def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> List[str]: """simple docstring""" lowercase__ = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa ) pipe.to(a ) pipe.set_progress_bar_config(disable=a ) lowercase__ = 'cyberpunk 2077' lowercase__ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' ) lowercase__ = torch.manual_seed(0 ) lowercase__ = pipe.dual_guided( prompt=a , image=a , text_to_image_strength=0.75 , generator=a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images lowercase__ = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) lowercase__ = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 lowercase__ = 'A painting of a squirrel eating a burger ' lowercase__ = torch.manual_seed(0 ) lowercase__ = pipe.text_to_image( prompt=a , generator=a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images lowercase__ = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) lowercase__ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 lowercase__ = pipe.image_variation(a , generator=a , output_type='numpy' ).images lowercase__ = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) lowercase__ = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
45
1
from __future__ import annotations def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> list[int]: lowercase__ = 0 lowercase__ = len(_SCREAMING_SNAKE_CASE ) - 1 while i < j: if nums[i] + nums[j] == target: return [i, j] elif nums[i] + nums[j] < target: lowercase__ = i + 1 else: lowercase__ = j - 1 return [] if __name__ == "__main__": import doctest doctest.testmod() print(f'''{two_pointer([2, 7, 11, 15], 9) = }''')
45
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> bool: if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise ValueError('Input series is not valid, valid series - [2, 4, 6]' ) if len(_SCREAMING_SNAKE_CASE ) == 0: raise ValueError('Input list must be a non empty list' ) if len(_SCREAMING_SNAKE_CASE ) == 1: return True lowercase__ = series[1] - series[0] for index in range(len(_SCREAMING_SNAKE_CASE ) - 1 ): if series[index + 1] - series[index] != common_diff: return False return True def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> float: if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise ValueError('Input series is not valid, valid series - [2, 4, 6]' ) if len(_SCREAMING_SNAKE_CASE ) == 0: raise ValueError('Input list must be a non empty list' ) lowercase__ = 0 for val in series: answer += val return answer / len(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod()
45
1
from __future__ import annotations from collections.abc import Callable def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 100 , ) -> float: lowercase__ = x_start lowercase__ = fnc(_SCREAMING_SNAKE_CASE ) lowercase__ = 0.0 for _ in range(_SCREAMING_SNAKE_CASE ): # Approximates small segments of curve as linear and solve # for trapezoidal area lowercase__ = (x_end - x_start) / steps + xa lowercase__ = fnc(_SCREAMING_SNAKE_CASE ) area += abs(fxa + fxa ) * (xa - xa) / 2 # Increment step lowercase__ = xa lowercase__ = fxa return area if __name__ == "__main__": def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Union[str, Any]: return x**3 + x**2 print("""f(x) = x^3 + x^2""") print("""The area between the curve, x = -5, x = 5 and the x axis is:""") lowercase_ = 10 while i <= 100_000: print(f'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''') i *= 10
45
from __future__ import annotations import math from collections.abc import Callable def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 100 , ) -> float: lowercase__ = x_start lowercase__ = fnc(_SCREAMING_SNAKE_CASE ) lowercase__ = 0.0 for _ in range(_SCREAMING_SNAKE_CASE ): # Approximates curve as a sequence of linear lines and sums their length lowercase__ = (x_end - x_start) / steps + xa lowercase__ = fnc(_SCREAMING_SNAKE_CASE ) length += math.hypot(xa - xa , fxa - fxa ) # Increment step lowercase__ = xa lowercase__ = fxa return length if __name__ == "__main__": def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str: return math.sin(10 * x ) print("""f(x) = sin(10 * x)""") print("""The length of the curve from x = -10 to x = 10 is:""") lowercase_ = 10 while i <= 100_000: print(f'''With {i} steps: {line_length(f, -10, 10, i)}''') i *= 10
45
1
from typing import Optional from urllib.parse import quote import huggingface_hub as hfh from packaging import version def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> str: if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release: # old versions of hfh don't url-encode the file path lowercase__ = quote(_SCREAMING_SNAKE_CASE ) return hfh.hf_hub_url(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' , revision=_SCREAMING_SNAKE_CASE )
45
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowercase_ = { """configuration_squeezebert""": [ """SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SqueezeBertConfig""", """SqueezeBertOnnxConfig""", ], """tokenization_squeezebert""": ["""SqueezeBertTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["""SqueezeBertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """SqueezeBertForMaskedLM""", """SqueezeBertForMultipleChoice""", """SqueezeBertForQuestionAnswering""", """SqueezeBertForSequenceClassification""", """SqueezeBertForTokenClassification""", """SqueezeBertModel""", """SqueezeBertModule""", """SqueezeBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_squeezebert import ( SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, SqueezeBertConfig, SqueezeBertOnnxConfig, ) from .tokenization_squeezebert import SqueezeBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_squeezebert import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, SqueezeBertModule, SqueezeBertPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
45
1
import os def __UpperCamelCase (_SCREAMING_SNAKE_CASE = "input.txt" ) -> int: with open(os.path.join(os.path.dirname(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) ) as input_file: lowercase__ = [ [int(_SCREAMING_SNAKE_CASE ) for element in line.split(',' )] for line in input_file.readlines() ] lowercase__ = len(_SCREAMING_SNAKE_CASE ) lowercase__ = len(matrix[0] ) lowercase__ = [[-1 for _ in range(_SCREAMING_SNAKE_CASE )] for _ in range(_SCREAMING_SNAKE_CASE )] for i in range(_SCREAMING_SNAKE_CASE ): lowercase__ = matrix[i][0] for j in range(1 , _SCREAMING_SNAKE_CASE ): for i in range(_SCREAMING_SNAKE_CASE ): lowercase__ = minimal_path_sums[i][j - 1] + matrix[i][j] for i in range(1 , _SCREAMING_SNAKE_CASE ): lowercase__ = min( minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] ) for i in range(rows - 2 , -1 , -1 ): lowercase__ = min( minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] ) return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums ) if __name__ == "__main__": print(f'''{solution() = }''')
45
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = 50 ) -> int: lowercase__ = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(f'''{solution() = }''')
45
1
from string import ascii_uppercase lowercase_ = {str(ord(c) - 55): c for c in ascii_uppercase} def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise TypeError('int() can\'t convert non-string with explicit base' ) if num < 0: raise ValueError('parameter must be positive int' ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise TypeError('\'str\' object cannot be interpreted as an integer' ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise TypeError('\'float\' object cannot be interpreted as an integer' ) if base in (0, 1): raise ValueError('base must be >= 2' ) if base > 36: raise ValueError('base must be <= 36' ) lowercase__ = '' lowercase__ = 0 lowercase__ = 0 while div != 1: lowercase__ , lowercase__ = divmod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if base >= 11 and 9 < mod < 36: lowercase__ = ALPHABET_VALUES[str(_SCREAMING_SNAKE_CASE )] else: lowercase__ = str(_SCREAMING_SNAKE_CASE ) new_value += actual_value lowercase__ = num // base lowercase__ = div if div == 0: return str(new_value[::-1] ) elif div == 1: new_value += str(_SCREAMING_SNAKE_CASE ) return str(new_value[::-1] ) return new_value[::-1] if __name__ == "__main__": import doctest doctest.testmod() for base in range(2, 37): for num in range(1_000): assert int(decimal_to_any(num, base), base) == num, ( num, base, decimal_to_any(num, base), int(decimal_to_any(num, base), base), )
45
import numpy as np import torch import tqdm from ...models.unet_ad import UNetaDModel from ...pipelines import DiffusionPipeline from ...utils import randn_tensor from ...utils.dummy_pt_objects import DDPMScheduler class SCREAMING_SNAKE_CASE (UpperCAmelCase ): def __init__( self : Optional[Any] , a : UNetaDModel , a : UNetaDModel , a : DDPMScheduler , a : Any , )-> Dict: """simple docstring""" super().__init__() lowercase__ = value_function lowercase__ = unet lowercase__ = scheduler lowercase__ = env lowercase__ = env.get_dataset() lowercase__ = {} for key in self.data.keys(): try: lowercase__ = self.data[key].mean() except: # noqa: E722 pass lowercase__ = {} for key in self.data.keys(): try: lowercase__ = self.data[key].std() except: # noqa: E722 pass lowercase__ = env.observation_space.shape[0] lowercase__ = env.action_space.shape[0] def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Any , a : int )-> Dict: """simple docstring""" return (x_in - self.means[key]) / self.stds[key] def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : str , a : List[str] )-> str: """simple docstring""" return x_in * self.stds[key] + self.means[key] def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : Tuple )-> Tuple: """simple docstring""" if type(a ) is dict: return {k: self.to_torch(a ) for k, v in x_in.items()} elif torch.is_tensor(a ): return x_in.to(self.unet.device ) return torch.tensor(a , device=self.unet.device ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Optional[int] , a : Dict , a : Optional[Any] )-> List[Any]: """simple docstring""" for key, val in cond.items(): lowercase__ = val.clone() return x_in def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Optional[Any] , a : Any , a : Optional[Any] , a : Optional[int] )-> List[Any]: """simple docstring""" lowercase__ = x.shape[0] lowercase__ = None for i in tqdm.tqdm(self.scheduler.timesteps ): # create batch of timesteps to pass into model lowercase__ = torch.full((batch_size,) , a , device=self.unet.device , dtype=torch.long ) for _ in range(a ): with torch.enable_grad(): x.requires_grad_() # permute to match dimension for pre-trained models lowercase__ = self.value_function(x.permute(0 , 2 , 1 ) , a ).sample lowercase__ = torch.autograd.grad([y.sum()] , [x] )[0] lowercase__ = self.scheduler._get_variance(a ) lowercase__ = torch.exp(0.5 * posterior_variance ) lowercase__ = model_std * grad lowercase__ = 0 lowercase__ = x.detach() lowercase__ = x + scale * grad lowercase__ = self.reset_xa(a , a , self.action_dim ) lowercase__ = self.unet(x.permute(0 , 2 , 1 ) , a ).sample.permute(0 , 2 , 1 ) # TODO: verify deprecation of this kwarg lowercase__ = self.scheduler.step(a , a , a , predict_epsilon=a )['prev_sample'] # apply conditions to the trajectory (set the initial state) lowercase__ = self.reset_xa(a , a , self.action_dim ) lowercase__ = self.to_torch(a ) return x, y def __call__( self : Any , a : Tuple , a : int=64 , a : Tuple=32 , a : List[Any]=2 , a : List[str]=0.1 )-> List[Any]: """simple docstring""" lowercase__ = self.normalize(a , 'observations' ) lowercase__ = obs[None].repeat(a , axis=0 ) lowercase__ = {0: self.to_torch(a )} lowercase__ = (batch_size, planning_horizon, self.state_dim + self.action_dim) # generate initial noise and apply our conditions (to make the trajectories start at current state) lowercase__ = randn_tensor(a , device=self.unet.device ) lowercase__ = self.reset_xa(a , a , self.action_dim ) lowercase__ = self.to_torch(a ) # run the diffusion process lowercase__ , lowercase__ = self.run_diffusion(a , a , a , a ) # sort output trajectories by value lowercase__ = y.argsort(0 , descending=a ).squeeze() lowercase__ = x[sorted_idx] lowercase__ = sorted_values[:, :, : self.action_dim] lowercase__ = actions.detach().cpu().numpy() lowercase__ = self.de_normalize(a , key='actions' ) # select the action with the highest value if y is not None: lowercase__ = 0 else: # if we didn't run value guiding, select a random action lowercase__ = np.random.randint(0 , a ) lowercase__ = denorm_actions[selected_index, 0] return denorm_actions
45
1
import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import ( DiffusionPipeline, UnCLIPImageVariationPipeline, UnCLIPScheduler, UNetaDConditionModel, UNetaDModel, ) from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel from diffusers.utils import floats_tensor, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class SCREAMING_SNAKE_CASE (UpperCAmelCase , unittest.TestCase ): _UpperCamelCase : Tuple = UnCLIPImageVariationPipeline _UpperCamelCase : List[Any] = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'} _UpperCamelCase : Any = IMAGE_VARIATION_BATCH_PARAMS _UpperCamelCase : Dict = [ 'generator', 'return_dict', 'decoder_num_inference_steps', 'super_res_num_inference_steps', ] _UpperCamelCase : List[str] = False @property def SCREAMING_SNAKE_CASE_ ( self : str )-> Union[str, Any]: """simple docstring""" return 32 @property def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Union[str, Any]: """simple docstring""" return 32 @property def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Dict: """simple docstring""" return self.time_input_dim @property def SCREAMING_SNAKE_CASE_ ( self : Any )-> Optional[Any]: """simple docstring""" return self.time_input_dim * 4 @property def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Dict: """simple docstring""" return 100 @property def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Tuple: """simple docstring""" lowercase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) return tokenizer @property def SCREAMING_SNAKE_CASE_ ( self : Any )-> Optional[Any]: """simple docstring""" torch.manual_seed(0 ) lowercase__ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) return CLIPTextModelWithProjection(a ) @property def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Dict: """simple docstring""" torch.manual_seed(0 ) lowercase__ = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) return CLIPVisionModelWithProjection(a ) @property def SCREAMING_SNAKE_CASE_ ( self : Dict )-> str: """simple docstring""" torch.manual_seed(0 ) lowercase__ = { 'clip_embeddings_dim': self.text_embedder_hidden_size, 'time_embed_dim': self.time_embed_dim, 'cross_attention_dim': self.cross_attention_dim, } lowercase__ = UnCLIPTextProjModel(**a ) return model @property def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> List[Any]: """simple docstring""" torch.manual_seed(0 ) lowercase__ = { 'sample_size': 32, # RGB in channels 'in_channels': 3, # Out channels is double in channels because predicts mean and variance 'out_channels': 6, 'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'), 'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'), 'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn', 'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2), 'layers_per_block': 1, 'cross_attention_dim': self.cross_attention_dim, 'attention_head_dim': 4, 'resnet_time_scale_shift': 'scale_shift', 'class_embed_type': 'identity', } lowercase__ = UNetaDConditionModel(**a ) return model @property def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Tuple: """simple docstring""" return { "sample_size": 64, "layers_per_block": 1, "down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"), "up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"), "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "in_channels": 6, "out_channels": 3, } @property def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> str: """simple docstring""" torch.manual_seed(0 ) lowercase__ = UNetaDModel(**self.dummy_super_res_kwargs ) return model @property def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> List[Any]: """simple docstring""" torch.manual_seed(1 ) lowercase__ = UNetaDModel(**self.dummy_super_res_kwargs ) return model def SCREAMING_SNAKE_CASE_ ( self : Any )-> Tuple: """simple docstring""" lowercase__ = self.dummy_decoder lowercase__ = self.dummy_text_proj lowercase__ = self.dummy_text_encoder lowercase__ = self.dummy_tokenizer lowercase__ = self.dummy_super_res_first lowercase__ = self.dummy_super_res_last lowercase__ = UnCLIPScheduler( variance_type='learned_range' , prediction_type='epsilon' , num_train_timesteps=1_000 , ) lowercase__ = UnCLIPScheduler( variance_type='fixed_small_log' , prediction_type='epsilon' , num_train_timesteps=1_000 , ) lowercase__ = CLIPImageProcessor(crop_size=32 , size=32 ) lowercase__ = self.dummy_image_encoder return { "decoder": decoder, "text_encoder": text_encoder, "tokenizer": tokenizer, "text_proj": text_proj, "feature_extractor": feature_extractor, "image_encoder": image_encoder, "super_res_first": super_res_first, "super_res_last": super_res_last, "decoder_scheduler": decoder_scheduler, "super_res_scheduler": super_res_scheduler, } def SCREAMING_SNAKE_CASE_ ( self : str , a : Any , a : List[str]=0 , a : List[Any]=True )-> Optional[int]: """simple docstring""" lowercase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(a ) ).to(a ) if str(a ).startswith('mps' ): lowercase__ = torch.manual_seed(a ) else: lowercase__ = torch.Generator(device=a ).manual_seed(a ) if pil_image: lowercase__ = input_image * 0.5 + 0.5 lowercase__ = input_image.clamp(0 , 1 ) lowercase__ = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() lowercase__ = DiffusionPipeline.numpy_to_pil(a )[0] return { "image": input_image, "generator": generator, "decoder_num_inference_steps": 2, "super_res_num_inference_steps": 2, "output_type": "np", } def SCREAMING_SNAKE_CASE_ ( self : int )-> List[Any]: """simple docstring""" lowercase__ = 'cpu' lowercase__ = self.get_dummy_components() lowercase__ = self.pipeline_class(**a ) lowercase__ = pipe.to(a ) pipe.set_progress_bar_config(disable=a ) lowercase__ = self.get_dummy_inputs(a , pil_image=a ) lowercase__ = pipe(**a ) lowercase__ = output.images lowercase__ = self.get_dummy_inputs(a , pil_image=a ) lowercase__ = pipe( **a , return_dict=a , )[0] lowercase__ = image[0, -3:, -3:, -1] lowercase__ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase__ = np.array( [ 0.9997, 0.0002, 0.9997, 0.9997, 0.9969, 0.0023, 0.9997, 0.9969, 0.9970, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def SCREAMING_SNAKE_CASE_ ( self : str )-> Any: """simple docstring""" lowercase__ = 'cpu' lowercase__ = self.get_dummy_components() lowercase__ = self.pipeline_class(**a ) lowercase__ = pipe.to(a ) pipe.set_progress_bar_config(disable=a ) lowercase__ = self.get_dummy_inputs(a , pil_image=a ) lowercase__ = pipe(**a ) lowercase__ = output.images lowercase__ = self.get_dummy_inputs(a , pil_image=a ) lowercase__ = pipe( **a , return_dict=a , )[0] lowercase__ = image[0, -3:, -3:, -1] lowercase__ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase__ = np.array([0.9997, 0.0003, 0.9997, 0.9997, 0.9970, 0.0024, 0.9997, 0.9971, 0.9971] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> Optional[int]: """simple docstring""" lowercase__ = 'cpu' lowercase__ = self.get_dummy_components() lowercase__ = self.pipeline_class(**a ) lowercase__ = pipe.to(a ) pipe.set_progress_bar_config(disable=a ) lowercase__ = self.get_dummy_inputs(a , pil_image=a ) lowercase__ = [ pipeline_inputs['image'], pipeline_inputs['image'], ] lowercase__ = pipe(**a ) lowercase__ = output.images lowercase__ = self.get_dummy_inputs(a , pil_image=a ) lowercase__ = [ tuple_pipeline_inputs['image'], tuple_pipeline_inputs['image'], ] lowercase__ = pipe( **a , return_dict=a , )[0] lowercase__ = image[0, -3:, -3:, -1] lowercase__ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (2, 64, 64, 3) lowercase__ = np.array( [ 0.9997, 0.9989, 0.0008, 0.0021, 0.9960, 0.0018, 0.0014, 0.0002, 0.9933, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> Any: """simple docstring""" lowercase__ = torch.device('cpu' ) class SCREAMING_SNAKE_CASE : _UpperCamelCase : Tuple = 1 lowercase__ = self.get_dummy_components() lowercase__ = self.pipeline_class(**a ) lowercase__ = pipe.to(a ) pipe.set_progress_bar_config(disable=a ) lowercase__ = torch.Generator(device=a ).manual_seed(0 ) lowercase__ = pipe.decoder.dtype lowercase__ = 1 lowercase__ = ( batch_size, pipe.decoder.config.in_channels, pipe.decoder.config.sample_size, pipe.decoder.config.sample_size, ) lowercase__ = pipe.prepare_latents( a , dtype=a , device=a , generator=a , latents=a , scheduler=DummyScheduler() ) lowercase__ = ( batch_size, pipe.super_res_first.config.in_channels // 2, pipe.super_res_first.config.sample_size, pipe.super_res_first.config.sample_size, ) lowercase__ = pipe.prepare_latents( a , dtype=a , device=a , generator=a , latents=a , scheduler=DummyScheduler() ) lowercase__ = self.get_dummy_inputs(a , pil_image=a ) lowercase__ = pipe( **a , decoder_latents=a , super_res_latents=a ).images lowercase__ = self.get_dummy_inputs(a , pil_image=a ) # Don't pass image, instead pass embedding lowercase__ = pipeline_inputs.pop('image' ) lowercase__ = pipe.image_encoder(a ).image_embeds lowercase__ = pipe( **a , decoder_latents=a , super_res_latents=a , image_embeddings=a , ).images # make sure passing text embeddings manually is identical assert np.abs(img_out_a - img_out_a ).max() < 1E-4 @skip_mps def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Dict: """simple docstring""" lowercase__ = torch_device == 'cpu' # Check is relaxed because there is not a torch 2.0 sliced attention added kv processor lowercase__ = 1E-2 self._test_attention_slicing_forward_pass( test_max_difference=a , expected_max_diff=a ) @skip_mps def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Tuple: """simple docstring""" lowercase__ = torch_device == 'cpu' lowercase__ = True lowercase__ = [ 'decoder_num_inference_steps', 'super_res_num_inference_steps', ] self._test_inference_batch_single_identical( test_max_difference=a , relax_max_difference=a , additional_params_copy_to_batched_inputs=a , ) def SCREAMING_SNAKE_CASE_ ( self : int )-> Union[str, Any]: """simple docstring""" lowercase__ = [ 'decoder_num_inference_steps', 'super_res_num_inference_steps', ] if torch_device == "mps": # TODO: MPS errors with larger batch sizes lowercase__ = [2, 3] self._test_inference_batch_consistent( batch_sizes=a , additional_params_copy_to_batched_inputs=a , ) else: self._test_inference_batch_consistent( additional_params_copy_to_batched_inputs=a ) @skip_mps def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> int: """simple docstring""" return super().test_dict_tuple_outputs_equivalent() @skip_mps def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Union[str, Any]: """simple docstring""" return super().test_save_load_local() @skip_mps def SCREAMING_SNAKE_CASE_ ( self : str )-> str: """simple docstring""" return super().test_save_load_optional_components() @slow @require_torch_gpu class SCREAMING_SNAKE_CASE (unittest.TestCase ): def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Dict: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE_ ( self : Any )-> Dict: """simple docstring""" lowercase__ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png' ) lowercase__ = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/unclip/karlo_v1_alpha_cat_variation_fp16.npy' ) lowercase__ = UnCLIPImageVariationPipeline.from_pretrained( 'kakaobrain/karlo-v1-alpha-image-variations' , torch_dtype=torch.floataa ) lowercase__ = pipeline.to(a ) pipeline.set_progress_bar_config(disable=a ) lowercase__ = torch.Generator(device='cpu' ).manual_seed(0 ) lowercase__ = pipeline( a , generator=a , output_type='np' , ) lowercase__ = output.images[0] assert image.shape == (256, 256, 3) assert_mean_pixel_difference(a , a , 15 )
45
from PIL import Image def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Image: def brightness(_SCREAMING_SNAKE_CASE ) -> float: return 128 + level + (c - 128) if not -2_5_5.0 <= level <= 2_5_5.0: raise ValueError('level must be between -255.0 (black) and 255.0 (white)' ) return img.point(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": # Load image with Image.open("""image_data/lena.jpg""") as img: # Change brightness to 100 lowercase_ = change_brightness(img, 100) brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
45
1
import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## lowercase_ = 16 lowercase_ = 32 def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 16 ) -> Optional[int]: lowercase__ = AutoTokenizer.from_pretrained('bert-base-cased' ) lowercase__ = load_dataset('glue' , 'mrpc' ) def tokenize_function(_SCREAMING_SNAKE_CASE ): # max_length=None => use the model max length (it's actually the default) lowercase__ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): lowercase__ = datasets.map( _SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=['idx', 'sentence1', 'sentence2'] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowercase__ = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(_SCREAMING_SNAKE_CASE ): # On TPU it's best to pad everything to the same length or training will be very slow. lowercase__ = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": lowercase__ = 16 elif accelerator.mixed_precision != "no": lowercase__ = 8 else: lowercase__ = None return tokenizer.pad( _SCREAMING_SNAKE_CASE , padding='longest' , max_length=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_tensors='pt' , ) # Instantiate dataloaders. lowercase__ = DataLoader( tokenized_datasets['train'] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE ) lowercase__ = DataLoader( tokenized_datasets['validation'] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders lowercase_ = mocked_dataloaders # noqa: F811 def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict: # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS' , _SCREAMING_SNAKE_CASE ) == "1": lowercase__ = 2 # Initialize accelerator lowercase__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowercase__ = config['lr'] lowercase__ = int(config['num_epochs'] ) lowercase__ = int(config['seed'] ) lowercase__ = int(config['batch_size'] ) lowercase__ = evaluate.load('glue' , 'mrpc' ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=_SCREAMING_SNAKE_CASE ) def inner_training_loop(_SCREAMING_SNAKE_CASE ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(_SCREAMING_SNAKE_CASE ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowercase__ = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=_SCREAMING_SNAKE_CASE ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). lowercase__ = model.to(accelerator.device ) # Instantiate optimizer lowercase__ = AdamW(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE ) lowercase__ , lowercase__ = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Instantiate scheduler lowercase__ = get_linear_schedule_with_warmup( optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(_SCREAMING_SNAKE_CASE ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = accelerator.prepare( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Now we train the model for epoch in range(_SCREAMING_SNAKE_CASE ): model.train() for step, batch in enumerate(_SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) lowercase__ = model(**_SCREAMING_SNAKE_CASE ) lowercase__ = outputs.loss accelerator.backward(_SCREAMING_SNAKE_CASE ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowercase__ = model(**_SCREAMING_SNAKE_CASE ) lowercase__ = outputs.logits.argmax(dim=-1 ) lowercase__ , lowercase__ = accelerator.gather_for_metrics((predictions, batch['labels']) ) metric.add_batch( predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , ) lowercase__ = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" , _SCREAMING_SNAKE_CASE ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def __UpperCamelCase () -> Optional[Any]: lowercase__ = argparse.ArgumentParser(description='Simple example of training script.' ) parser.add_argument( '--mixed_precision' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose' 'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.' 'and an Nvidia Ampere GPU.' , ) parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' ) lowercase__ = parser.parse_args() lowercase__ = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16} training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
45
import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class SCREAMING_SNAKE_CASE (unittest.TestCase ): def __init__( self : Any , a : str , a : List[Any]=7 , a : int=3 , a : int=18 , a : Optional[Any]=30 , a : Optional[int]=400 , a : int=True , a : Tuple=None , a : Optional[Any]=True , a : str=False , a : str=True , a : int=True , a : Tuple=[0.5, 0.5, 0.5] , a : Any=[0.5, 0.5, 0.5] , )-> Optional[int]: """simple docstring""" lowercase__ = parent lowercase__ = batch_size lowercase__ = num_channels lowercase__ = image_size lowercase__ = min_resolution lowercase__ = max_resolution lowercase__ = do_resize lowercase__ = size if size is not None else {'height': 18, 'width': 20} lowercase__ = do_thumbnail lowercase__ = do_align_axis lowercase__ = do_pad lowercase__ = do_normalize lowercase__ = image_mean lowercase__ = image_std def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Tuple: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class SCREAMING_SNAKE_CASE (UpperCAmelCase , unittest.TestCase ): _UpperCamelCase : Optional[Any] = DonutImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE_ ( self : int )-> List[Any]: """simple docstring""" lowercase__ = DonutImageProcessingTester(self ) @property def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Optional[int]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE_ ( self : Any )-> int: """simple docstring""" lowercase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(a , 'do_resize' ) ) self.assertTrue(hasattr(a , 'size' ) ) self.assertTrue(hasattr(a , 'do_thumbnail' ) ) self.assertTrue(hasattr(a , 'do_align_long_axis' ) ) self.assertTrue(hasattr(a , 'do_pad' ) ) self.assertTrue(hasattr(a , 'do_normalize' ) ) self.assertTrue(hasattr(a , 'image_mean' ) ) self.assertTrue(hasattr(a , 'image_std' ) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Dict: """simple docstring""" lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'height': 18, 'width': 20} ) lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'height': 42, 'width': 42} ) # Previous config had dimensions in (width, height) order lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) ) self.assertEqual(image_processor.size , {'height': 84, 'width': 42} ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Dict: """simple docstring""" pass @is_flaky() def SCREAMING_SNAKE_CASE_ ( self : str )-> Optional[int]: """simple docstring""" lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a ) for image in image_inputs: self.assertIsInstance(a , Image.Image ) # Test not batched input lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched lowercase__ = image_processing(a , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) @is_flaky() def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Tuple: """simple docstring""" lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a ) for image in image_inputs: self.assertIsInstance(a , np.ndarray ) # Test not batched input lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched lowercase__ = image_processing(a , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) @is_flaky() def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Dict: """simple docstring""" lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a ) for image in image_inputs: self.assertIsInstance(a , torch.Tensor ) # Test not batched input lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched lowercase__ = image_processing(a , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , )
45
1
from __future__ import annotations import time import numpy as np lowercase_ = [8, 5, 9, 7] lowercase_ = [ [2, 0, 1, 1], [0, 1, 2, 1], [4, 0, 0, 3], [0, 2, 1, 0], [1, 0, 3, 0], ] lowercase_ = [ [3, 2, 1, 4], [0, 2, 5, 2], [5, 1, 0, 5], [1, 5, 3, 0], [3, 0, 3, 3], ] class SCREAMING_SNAKE_CASE : def __init__( self : Dict , a : list[int] , a : list[list[int]] , a : list[list[int]] , )-> None: """simple docstring""" lowercase__ = claim_vector lowercase__ = allocated_resources_table lowercase__ = maximum_claim_table def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> list[int]: """simple docstring""" return [ sum(p_item[i] for p_item in self.__allocated_resources_table ) for i in range(len(self.__allocated_resources_table[0] ) ) ] def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> list[int]: """simple docstring""" return np.array(self.__claim_vector ) - np.array( self.__processes_resource_summation() ) def SCREAMING_SNAKE_CASE_ ( self : str )-> list[list[int]]: """simple docstring""" return [ list(np.array(self.__maximum_claim_table[i] ) - np.array(a ) ) for i, allocated_resource in enumerate(self.__allocated_resources_table ) ] def SCREAMING_SNAKE_CASE_ ( self : Dict )-> dict[int, list[int]]: """simple docstring""" return {self.__need().index(a ): i for i in self.__need()} def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , **a : List[str] )-> None: """simple docstring""" lowercase__ = self.__need() lowercase__ = self.__allocated_resources_table lowercase__ = self.__available_resources() lowercase__ = self.__need_index_manager() for kw, val in kwargs.items(): if kw and val is True: self.__pretty_data() print('_' * 50 + '\n' ) while need_list: lowercase__ = False for each_need in need_list: lowercase__ = True for index, need in enumerate(a ): if need > available_resources[index]: lowercase__ = False break if execution: lowercase__ = True # get the original index of the process from ind_ctrl db for original_need_index, need_clone in need_index_manager.items(): if each_need == need_clone: lowercase__ = original_need_index print(f"""Process {process_number + 1} is executing.""" ) # remove the process run from stack need_list.remove(a ) # update available/freed resources stack lowercase__ = np.array(a ) + np.array( alloc_resources_table[process_number] ) print( 'Updated available resource stack for processes: ' + ' '.join([str(a ) for x in available_resources] ) ) break if safe: print('The process is in a safe state.\n' ) else: print('System in unsafe state. Aborting...\n' ) break def SCREAMING_SNAKE_CASE_ ( self : Any )-> Optional[int]: """simple docstring""" print(' ' * 9 + 'Allocated Resource Table' ) for item in self.__allocated_resources_table: print( f"""P{self.__allocated_resources_table.index(a ) + 1}""" + ' '.join(f"""{it:>8}""" for it in item ) + '\n' ) print(' ' * 9 + 'System Resource Table' ) for item in self.__maximum_claim_table: print( f"""P{self.__maximum_claim_table.index(a ) + 1}""" + ' '.join(f"""{it:>8}""" for it in item ) + '\n' ) print( 'Current Usage by Active Processes: ' + ' '.join(str(a ) for x in self.__claim_vector ) ) print( 'Initial Available Resources: ' + ' '.join(str(a ) for x in self.__available_resources() ) ) time.sleep(1 ) if __name__ == "__main__": import doctest doctest.testmod()
45
import math def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]: if 0 not in (x, y): # We use the relation x^y = y*log10(x), where 10 is the base. return y * math.logaa(_SCREAMING_SNAKE_CASE ) else: if x == 0: # 0 raised to any number is 0 return 0 elif y == 0: return 1 # any number raised to 0 is 1 raise AssertionError('This should never happen' ) if __name__ == "__main__": # Main function # Read two numbers from input and typecast them to int using map function. # Here x is the base and y is the power. lowercase_ = """Enter the base and the power separated by a comma: """ lowercase_ , lowercase_ = map(int, input(prompt).split(""",""")) lowercase_ , lowercase_ = map(int, input(prompt).split(""",""")) # We find the log of each number, using the function res(), which takes two # arguments. lowercase_ = res(xa, ya) lowercase_ = res(xa, ya) # We check for the largest number if resa > resa: print("""Largest number is""", xa, """^""", ya) elif resa > resa: print("""Largest number is""", xa, """^""", ya) else: print("""Both are equal""")
45
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { """facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/config.json""", # See all XGLM models at https://huggingface.co/models?filter=xglm } class SCREAMING_SNAKE_CASE (UpperCAmelCase ): _UpperCamelCase : Optional[Any] = 'xglm' _UpperCamelCase : str = ['past_key_values'] _UpperCamelCase : Union[str, Any] = { 'num_attention_heads': 'attention_heads', 'hidden_size': 'd_model', 'num_hidden_layers': 'num_layers', } def __init__( self : Any , a : List[Any]=256_008 , a : List[str]=2_048 , a : List[str]=1_024 , a : int=4_096 , a : int=24 , a : List[str]=16 , a : Dict="gelu" , a : Optional[int]=0.1 , a : Optional[Any]=0.1 , a : Optional[Any]=0.0 , a : int=0.0 , a : Optional[int]=0.02 , a : Union[str, Any]=True , a : List[Any]=True , a : Any=2 , a : Dict=1 , a : str=0 , a : Tuple=2 , **a : Optional[Any] , )-> List[Any]: """simple docstring""" lowercase__ = vocab_size lowercase__ = max_position_embeddings lowercase__ = d_model lowercase__ = ffn_dim lowercase__ = num_layers lowercase__ = attention_heads lowercase__ = activation_function lowercase__ = dropout lowercase__ = attention_dropout lowercase__ = activation_dropout lowercase__ = layerdrop lowercase__ = init_std lowercase__ = scale_embedding # scale factor will be sqrt(d_model) if True lowercase__ = use_cache super().__init__( pad_token_id=a , bos_token_id=a , eos_token_id=a , decoder_start_token_id=a , **a , )
45
class SCREAMING_SNAKE_CASE : # Public class to implement a graph def __init__( self : int , a : int , a : int , a : list[list[bool]] )-> None: """simple docstring""" lowercase__ = row lowercase__ = col lowercase__ = graph def SCREAMING_SNAKE_CASE_ ( self : Dict , a : int , a : int , a : list[list[bool]] )-> bool: """simple docstring""" return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : int , a : int , a : list[list[bool]] )-> None: """simple docstring""" lowercase__ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order lowercase__ = [-1, 0, 1, -1, 1, -1, 0, 1] lowercase__ = True # Make those cells visited for k in range(8 ): if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , a ): self.diffs(i + row_nbr[k] , j + col_nbr[k] , a ) def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> int: # And finally, count all islands. """simple docstring""" lowercase__ = [[False for j in range(self.COL )] for i in range(self.ROW )] lowercase__ = 0 for i in range(self.ROW ): for j in range(self.COL ): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(a , a , a ) count += 1 return count
45
1
import math from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { """facebook/data2vec-base-960h""": """https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json""", # See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio } class SCREAMING_SNAKE_CASE (UpperCAmelCase ): _UpperCamelCase : Any = 'data2vec-audio' def __init__( self : List[str] , a : Optional[Any]=32 , a : Optional[int]=768 , a : Optional[int]=12 , a : List[Any]=12 , a : Optional[int]=3_072 , a : List[Any]="gelu" , a : Optional[Any]=0.1 , a : List[Any]=0.1 , a : Optional[int]=0.1 , a : Dict=0.0 , a : Optional[Any]=0.1 , a : List[Any]=0.1 , a : List[str]=0.02 , a : Any=1E-5 , a : Optional[int]="gelu" , a : Optional[Any]=(512, 512, 512, 512, 512, 512, 512) , a : List[str]=(5, 2, 2, 2, 2, 2, 2) , a : Dict=(10, 3, 3, 3, 3, 2, 2) , a : str=False , a : int=16 , a : Dict=19 , a : int=5 , a : Optional[Any]=0.05 , a : Tuple=10 , a : int=2 , a : Optional[Any]=0.0 , a : Any=10 , a : Any=0 , a : List[Any]="sum" , a : str=False , a : Dict=False , a : Optional[Any]=256 , a : Optional[Any]=(512, 512, 512, 512, 1_500) , a : Tuple=(5, 3, 3, 1, 1) , a : List[Any]=(1, 2, 3, 1, 1) , a : int=512 , a : List[str]=0 , a : Tuple=1 , a : Dict=2 , a : Dict=False , a : Optional[Any]=3 , a : List[str]=2 , a : Union[str, Any]=3 , a : Optional[Any]=None , **a : Optional[Any] , )-> List[Any]: """simple docstring""" super().__init__(**a , pad_token_id=a , bos_token_id=a , eos_token_id=a ) lowercase__ = hidden_size lowercase__ = feat_extract_activation lowercase__ = list(a ) lowercase__ = list(a ) lowercase__ = list(a ) lowercase__ = conv_bias lowercase__ = num_conv_pos_embeddings lowercase__ = num_conv_pos_embedding_groups lowercase__ = conv_pos_kernel_size lowercase__ = len(self.conv_dim ) lowercase__ = num_hidden_layers lowercase__ = intermediate_size lowercase__ = hidden_act lowercase__ = num_attention_heads lowercase__ = hidden_dropout lowercase__ = attention_dropout lowercase__ = activation_dropout lowercase__ = feat_proj_dropout lowercase__ = final_dropout lowercase__ = layerdrop lowercase__ = layer_norm_eps lowercase__ = initializer_range lowercase__ = vocab_size lowercase__ = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==' ' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =' f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,""" f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowercase__ = mask_time_prob lowercase__ = mask_time_length lowercase__ = mask_time_min_masks lowercase__ = mask_feature_prob lowercase__ = mask_feature_length lowercase__ = mask_feature_min_masks # ctc loss lowercase__ = ctc_loss_reduction lowercase__ = ctc_zero_infinity # adapter lowercase__ = add_adapter lowercase__ = adapter_kernel_size lowercase__ = adapter_stride lowercase__ = num_adapter_layers lowercase__ = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. lowercase__ = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. lowercase__ = list(a ) lowercase__ = list(a ) lowercase__ = list(a ) lowercase__ = xvector_output_dim @property def SCREAMING_SNAKE_CASE_ ( self : Any )-> Union[str, Any]: """simple docstring""" return math.prod(self.conv_stride )
45
from string import ascii_uppercase lowercase_ = {str(ord(c) - 55): c for c in ascii_uppercase} def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise TypeError('int() can\'t convert non-string with explicit base' ) if num < 0: raise ValueError('parameter must be positive int' ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise TypeError('\'str\' object cannot be interpreted as an integer' ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise TypeError('\'float\' object cannot be interpreted as an integer' ) if base in (0, 1): raise ValueError('base must be >= 2' ) if base > 36: raise ValueError('base must be <= 36' ) lowercase__ = '' lowercase__ = 0 lowercase__ = 0 while div != 1: lowercase__ , lowercase__ = divmod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if base >= 11 and 9 < mod < 36: lowercase__ = ALPHABET_VALUES[str(_SCREAMING_SNAKE_CASE )] else: lowercase__ = str(_SCREAMING_SNAKE_CASE ) new_value += actual_value lowercase__ = num // base lowercase__ = div if div == 0: return str(new_value[::-1] ) elif div == 1: new_value += str(_SCREAMING_SNAKE_CASE ) return str(new_value[::-1] ) return new_value[::-1] if __name__ == "__main__": import doctest doctest.testmod() for base in range(2, 37): for num in range(1_000): assert int(decimal_to_any(num, base), base) == num, ( num, base, decimal_to_any(num, base), int(decimal_to_any(num, base), base), )
45
1
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets, # U and V such that every edge (u, v) either connects a vertex from U to V or a vertex # from V to U. In other words, for every edge (u, v), either u belongs to U and v to V, # or u belongs to V and v to U. We can also say that there is no edge that connects # vertices of same set. def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> int: lowercase__ = [False] * len(_SCREAMING_SNAKE_CASE ) lowercase__ = [-1] * len(_SCREAMING_SNAKE_CASE ) def dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): lowercase__ = True lowercase__ = c for u in graph[v]: if not visited[u]: dfs(_SCREAMING_SNAKE_CASE , 1 - c ) for i in range(len(_SCREAMING_SNAKE_CASE ) ): if not visited[i]: dfs(_SCREAMING_SNAKE_CASE , 0 ) for i in range(len(_SCREAMING_SNAKE_CASE ) ): for j in graph[i]: if color[i] == color[j]: return False return True # Adjacency list of graph lowercase_ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []} print(check_bipartite_dfs(graph))
45
import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class SCREAMING_SNAKE_CASE : def __init__( self : List[Any] , a : Any , a : Optional[int]=13 , a : Tuple=30 , a : Union[str, Any]=2 , a : List[str]=3 , a : Dict=True , a : List[str]=True , a : List[Any]=32 , a : List[str]=5 , a : Optional[int]=4 , a : List[str]=37 , a : Dict="gelu" , a : Dict=0.1 , a : List[str]=0.1 , a : int=10 , a : List[str]=0.02 , a : int=None , a : List[str]=2 , )-> Dict: """simple docstring""" lowercase__ = parent lowercase__ = batch_size lowercase__ = image_size lowercase__ = patch_size lowercase__ = num_channels lowercase__ = is_training lowercase__ = use_labels lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = intermediate_size lowercase__ = hidden_act lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = type_sequence_label_size lowercase__ = initializer_range lowercase__ = scope lowercase__ = encoder_stride # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) lowercase__ = (image_size // patch_size) ** 2 lowercase__ = num_patches + 1 def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Any: """simple docstring""" lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__ = None if self.use_labels: lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__ = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> List[Any]: """simple docstring""" return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : List[Any] , a : List[str] , a : Dict )-> Optional[Any]: """simple docstring""" lowercase__ = ViTModel(config=a ) model.to(a ) model.eval() lowercase__ = model(a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE_ ( self : int , a : Optional[Any] , a : int , a : Tuple )-> Union[str, Any]: """simple docstring""" lowercase__ = ViTForMaskedImageModeling(config=a ) model.to(a ) model.eval() lowercase__ = model(a ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowercase__ = 1 lowercase__ = ViTForMaskedImageModeling(a ) model.to(a ) model.eval() lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowercase__ = model(a ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : List[str] , a : int , a : List[Any] )-> str: """simple docstring""" lowercase__ = self.type_sequence_label_size lowercase__ = ViTForImageClassification(a ) model.to(a ) model.eval() lowercase__ = model(a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowercase__ = 1 lowercase__ = ViTForImageClassification(a ) model.to(a ) model.eval() lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowercase__ = model(a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> List[str]: """simple docstring""" lowercase__ = self.prepare_config_and_inputs() ( ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ) = config_and_inputs lowercase__ = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE (UpperCAmelCase , UpperCAmelCase , unittest.TestCase ): _UpperCamelCase : Any = ( ( ViTModel, ViTForImageClassification, ViTForMaskedImageModeling, ) if is_torch_available() else () ) _UpperCamelCase : Union[str, Any] = ( {'feature-extraction': ViTModel, 'image-classification': ViTForImageClassification} if is_torch_available() else {} ) _UpperCamelCase : int = True _UpperCamelCase : int = False _UpperCamelCase : Union[str, Any] = False _UpperCamelCase : Dict = False def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> List[str]: """simple docstring""" lowercase__ = ViTModelTester(self ) lowercase__ = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=37 ) def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Union[str, Any]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='ViT does not use inputs_embeds' ) def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]: """simple docstring""" pass def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Union[str, Any]: """simple docstring""" lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ = model_class(a ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowercase__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(a , nn.Linear ) ) def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Optional[int]: """simple docstring""" lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ = model_class(a ) lowercase__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ = [*signature.parameters.keys()] lowercase__ = ['pixel_values'] self.assertListEqual(arg_names[:1] , a ) def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Union[str, Any]: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a ) def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Tuple: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*a ) def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> int: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a ) @slow def SCREAMING_SNAKE_CASE_ ( self : Dict )-> List[Any]: """simple docstring""" for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ = ViTModel.from_pretrained(a ) self.assertIsNotNone(a ) def __UpperCamelCase () -> str: lowercase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class SCREAMING_SNAKE_CASE (unittest.TestCase ): @cached_property def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> List[Any]: """simple docstring""" return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None @slow def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Union[str, Any]: """simple docstring""" lowercase__ = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224' ).to(a ) lowercase__ = self.default_image_processor lowercase__ = prepare_img() lowercase__ = image_processor(images=a , return_tensors='pt' ).to(a ) # forward pass with torch.no_grad(): lowercase__ = model(**a ) # verify the logits lowercase__ = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , a ) lowercase__ = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1E-4 ) ) @slow def SCREAMING_SNAKE_CASE_ ( self : int )-> List[str]: """simple docstring""" lowercase__ = ViTModel.from_pretrained('facebook/dino-vits8' ).to(a ) lowercase__ = ViTImageProcessor.from_pretrained('facebook/dino-vits8' , size=480 ) lowercase__ = prepare_img() lowercase__ = image_processor(images=a , return_tensors='pt' ) lowercase__ = inputs.pixel_values.to(a ) # forward pass with torch.no_grad(): lowercase__ = model(a , interpolate_pos_encoding=a ) # verify the logits lowercase__ = torch.Size((1, 3_601, 384) ) self.assertEqual(outputs.last_hidden_state.shape , a ) lowercase__ = torch.tensor( [[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(a ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , a , atol=1E-4 ) ) @slow @require_accelerate @require_torch_gpu def SCREAMING_SNAKE_CASE_ ( self : str )-> str: """simple docstring""" lowercase__ = ViTModel.from_pretrained('facebook/dino-vits8' , torch_dtype=torch.floataa , device_map='auto' ) lowercase__ = self.default_image_processor lowercase__ = prepare_img() lowercase__ = image_processor(images=a , return_tensors='pt' ) lowercase__ = inputs.pixel_values.to(a ) # forward pass to make sure inference works in fp16 with torch.no_grad(): lowercase__ = model(a )
45
1
import itertools import math def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> bool: if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def __UpperCamelCase () -> str: lowercase__ = 2 while True: if is_prime(_SCREAMING_SNAKE_CASE ): yield num num += 1 def __UpperCamelCase (_SCREAMING_SNAKE_CASE = 10001 ) -> int: return next(itertools.islice(prime_generator() , nth - 1 , _SCREAMING_SNAKE_CASE ) ) if __name__ == "__main__": print(f'''{solution() = }''')
45
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[Any]: stooge(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) - 1 ) return arr def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int: if i >= h: return # If first element is smaller than the last then swap them if arr[i] > arr[h]: lowercase__ , lowercase__ = arr[h], arr[i] # If there are more than 2 elements in the array if h - i + 1 > 2: lowercase__ = (int)((h - i + 1) / 3 ) # Recursively sort first 2/3 elements stooge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (h - t) ) # Recursively sort last 2/3 elements stooge(_SCREAMING_SNAKE_CASE , i + t , (_SCREAMING_SNAKE_CASE) ) # Recursively sort first 2/3 elements stooge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (h - t) ) if __name__ == "__main__": lowercase_ = input("""Enter numbers separated by a comma:\n""").strip() lowercase_ = [int(item) for item in user_input.split(""",""")] print(stooge_sort(unsorted))
45
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { """distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/config.json""", """distilbert-base-uncased-distilled-squad""": ( """https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json""" ), """distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/config.json""", """distilbert-base-cased-distilled-squad""": ( """https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json""" ), """distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json""", """distilbert-base-multilingual-cased""": ( """https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json""" ), """distilbert-base-uncased-finetuned-sst-2-english""": ( """https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json""" ), } class SCREAMING_SNAKE_CASE (UpperCAmelCase ): _UpperCamelCase : int = 'distilbert' _UpperCamelCase : int = { 'hidden_size': 'dim', 'num_attention_heads': 'n_heads', 'num_hidden_layers': 'n_layers', } def __init__( self : Optional[int] , a : Union[str, Any]=30_522 , a : List[Any]=512 , a : Optional[Any]=False , a : Any=6 , a : int=12 , a : int=768 , a : int=4 * 768 , a : List[Any]=0.1 , a : Dict=0.1 , a : List[str]="gelu" , a : Union[str, Any]=0.02 , a : Dict=0.1 , a : Optional[Any]=0.2 , a : Optional[int]=0 , **a : Tuple , )-> Dict: """simple docstring""" lowercase__ = vocab_size lowercase__ = max_position_embeddings lowercase__ = sinusoidal_pos_embds lowercase__ = n_layers lowercase__ = n_heads lowercase__ = dim lowercase__ = hidden_dim lowercase__ = dropout lowercase__ = attention_dropout lowercase__ = activation lowercase__ = initializer_range lowercase__ = qa_dropout lowercase__ = seq_classif_dropout super().__init__(**a , pad_token_id=a ) class SCREAMING_SNAKE_CASE (UpperCAmelCase ): @property def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": lowercase__ = {0: 'batch', 1: 'choice', 2: 'sequence'} else: lowercase__ = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
45
from scipy.stats import spearmanr import datasets lowercase_ = """ The Spearman rank-order correlation coefficient is a measure of the relationship between two datasets. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Positive correlations imply that as data in dataset x increases, so does data in dataset y. Negative correlations imply that as x increases, y decreases. Correlations of -1 or +1 imply an exact monotonic relationship. Unlike the Pearson correlation, the Spearman correlation does not assume that both datasets are normally distributed. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Spearman correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. """ lowercase_ = """ Args: predictions (`List[float]`): Predicted labels, as returned by a model. references (`List[float]`): Ground truth labels. return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns only the spearmanr score. Defaults to `False`. Returns: spearmanr (`float`): Spearman correlation coefficient. p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input. Examples: Example 1: >>> spearmanr_metric = datasets.load_metric(\"spearmanr\") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4]) >>> print(results) {'spearmanr': -0.7} Example 2: >>> spearmanr_metric = datasets.load_metric(\"spearmanr\") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], ... predictions=[10, 9, 2.5, 6, 4], ... return_pvalue=True) >>> print(results['spearmanr']) -0.7 >>> print(round(results['spearmanr_pvalue'], 2)) 0.19 """ lowercase_ = R"""\ @book{kokoska2000crc, title={CRC standard probability and statistics tables and formulae}, author={Kokoska, Stephen and Zwillinger, Daniel}, year={2000}, publisher={Crc Press} } @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE (datasets.Metric ): def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Tuple: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('float' ), 'references': datasets.Value('float' ), } ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , ) def SCREAMING_SNAKE_CASE_ ( self : int , a : str , a : Any , a : str=False )-> Optional[int]: """simple docstring""" lowercase__ = spearmanr(a , a ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
45
1
import unittest from transformers import load_tool from transformers.utils import is_torch_available if is_torch_available(): import torch from transformers.testing_utils import require_torch from .test_tools_common import ToolTesterMixin @require_torch class SCREAMING_SNAKE_CASE (unittest.TestCase , UpperCAmelCase ): def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Any: """simple docstring""" lowercase__ = load_tool('text-to-speech' ) self.tool.setup() def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Optional[int]: """simple docstring""" torch.manual_seed(0 ) lowercase__ = self.tool('hey' ) lowercase__ = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0005966668832115829, -0.0003657640190795064, -0.00013439502799883485] ) , ) ) def SCREAMING_SNAKE_CASE_ ( self : int )-> Optional[Any]: """simple docstring""" torch.manual_seed(0 ) lowercase__ = self.tool('hey' ) lowercase__ = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0005966668832115829, -0.0003657640190795064, -0.00013439502799883485] ) , ) )
45
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = 50 ) -> int: lowercase__ = [1] * (length + 1) for row_length in range(3 , length + 1 ): for block_length in range(3 , row_length + 1 ): for block_start in range(row_length - block_length ): ways_number[row_length] += ways_number[ row_length - block_start - block_length - 1 ] ways_number[row_length] += 1 return ways_number[length] if __name__ == "__main__": print(f'''{solution() = }''')
45
1
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float: if discount_rate < 0: raise ValueError('Discount rate cannot be negative' ) if not cash_flows: raise ValueError('Cash flows list cannot be empty' ) lowercase__ = sum( cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_SCREAMING_SNAKE_CASE ) ) return round(_SCREAMING_SNAKE_CASE , ndigits=2 ) if __name__ == "__main__": import doctest doctest.testmod()
45
import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() lowercase_ = logging.get_logger(__name__) lowercase_ = { """b0""": efficientnet.EfficientNetBa, """b1""": efficientnet.EfficientNetBa, """b2""": efficientnet.EfficientNetBa, """b3""": efficientnet.EfficientNetBa, """b4""": efficientnet.EfficientNetBa, """b5""": efficientnet.EfficientNetBa, """b6""": efficientnet.EfficientNetBa, """b7""": efficientnet.EfficientNetBa, } lowercase_ = { """b0""": { """hidden_dim""": 1_280, """width_coef""": 1.0, """depth_coef""": 1.0, """image_size""": 224, """dropout_rate""": 0.2, """dw_padding""": [], }, """b1""": { """hidden_dim""": 1_280, """width_coef""": 1.0, """depth_coef""": 1.1, """image_size""": 240, """dropout_rate""": 0.2, """dw_padding""": [16], }, """b2""": { """hidden_dim""": 1_408, """width_coef""": 1.1, """depth_coef""": 1.2, """image_size""": 260, """dropout_rate""": 0.3, """dw_padding""": [5, 8, 16], }, """b3""": { """hidden_dim""": 1_536, """width_coef""": 1.2, """depth_coef""": 1.4, """image_size""": 300, """dropout_rate""": 0.3, """dw_padding""": [5, 18], }, """b4""": { """hidden_dim""": 1_792, """width_coef""": 1.4, """depth_coef""": 1.8, """image_size""": 380, """dropout_rate""": 0.4, """dw_padding""": [6], }, """b5""": { """hidden_dim""": 2_048, """width_coef""": 1.6, """depth_coef""": 2.2, """image_size""": 456, """dropout_rate""": 0.4, """dw_padding""": [13, 27], }, """b6""": { """hidden_dim""": 2_304, """width_coef""": 1.8, """depth_coef""": 2.6, """image_size""": 528, """dropout_rate""": 0.5, """dw_padding""": [31], }, """b7""": { """hidden_dim""": 2_560, """width_coef""": 2.0, """depth_coef""": 3.1, """image_size""": 600, """dropout_rate""": 0.5, """dw_padding""": [18], }, } def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str: lowercase__ = EfficientNetConfig() lowercase__ = CONFIG_MAP[model_name]['hidden_dim'] lowercase__ = CONFIG_MAP[model_name]['width_coef'] lowercase__ = CONFIG_MAP[model_name]['depth_coef'] lowercase__ = CONFIG_MAP[model_name]['image_size'] lowercase__ = CONFIG_MAP[model_name]['dropout_rate'] lowercase__ = CONFIG_MAP[model_name]['dw_padding'] lowercase__ = 'huggingface/label-files' lowercase__ = 'imagenet-1k-id2label.json' lowercase__ = 1000 lowercase__ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) ) lowercase__ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} lowercase__ = idalabel lowercase__ = {v: k for k, v in idalabel.items()} return config def __UpperCamelCase () -> Tuple: lowercase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg' lowercase__ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) return im def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Optional[Any]: lowercase__ = CONFIG_MAP[model_name]['image_size'] lowercase__ = EfficientNetImageProcessor( size={'height': size, 'width': size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=_SCREAMING_SNAKE_CASE , ) return preprocessor def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Tuple: lowercase__ = [v.split('_' )[0].split('block' )[1] for v in original_param_names if v.startswith('block' )] lowercase__ = sorted(set(_SCREAMING_SNAKE_CASE ) ) lowercase__ = len(_SCREAMING_SNAKE_CASE ) lowercase__ = {b: str(_SCREAMING_SNAKE_CASE ) for b, i in zip(_SCREAMING_SNAKE_CASE , range(_SCREAMING_SNAKE_CASE ) )} lowercase__ = [] rename_keys.append(('stem_conv/kernel:0', 'embeddings.convolution.weight') ) rename_keys.append(('stem_bn/gamma:0', 'embeddings.batchnorm.weight') ) rename_keys.append(('stem_bn/beta:0', 'embeddings.batchnorm.bias') ) rename_keys.append(('stem_bn/moving_mean:0', 'embeddings.batchnorm.running_mean') ) rename_keys.append(('stem_bn/moving_variance:0', 'embeddings.batchnorm.running_var') ) for b in block_names: lowercase__ = block_name_mapping[b] rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") ) rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") ) rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") ) rename_keys.append( (F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") ) rename_keys.append( (F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") ) rename_keys.append( (F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") ) rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") ) rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") ) rename_keys.append( (F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") ) rename_keys.append( (F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") ) rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") ) rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") ) rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") ) rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") ) rename_keys.append( (F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") ) rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") ) rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") ) rename_keys.append( (F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") ) rename_keys.append( (F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") ) rename_keys.append(('top_conv/kernel:0', 'encoder.top_conv.weight') ) rename_keys.append(('top_bn/gamma:0', 'encoder.top_bn.weight') ) rename_keys.append(('top_bn/beta:0', 'encoder.top_bn.bias') ) rename_keys.append(('top_bn/moving_mean:0', 'encoder.top_bn.running_mean') ) rename_keys.append(('top_bn/moving_variance:0', 'encoder.top_bn.running_var') ) lowercase__ = {} for item in rename_keys: if item[0] in original_param_names: lowercase__ = 'efficientnet.' + item[1] lowercase__ = 'classifier.weight' lowercase__ = 'classifier.bias' return key_mapping def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: for key, value in tf_params.items(): if "normalization" in key: continue lowercase__ = key_mapping[key] if "_conv" in key and "kernel" in key: lowercase__ = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: lowercase__ = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: lowercase__ = torch.from_numpy(np.transpose(_SCREAMING_SNAKE_CASE ) ) else: lowercase__ = torch.from_numpy(_SCREAMING_SNAKE_CASE ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(_SCREAMING_SNAKE_CASE ) @torch.no_grad() def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: lowercase__ = model_classes[model_name]( include_top=_SCREAMING_SNAKE_CASE , weights='imagenet' , input_tensor=_SCREAMING_SNAKE_CASE , input_shape=_SCREAMING_SNAKE_CASE , pooling=_SCREAMING_SNAKE_CASE , classes=1000 , classifier_activation='softmax' , ) lowercase__ = original_model.trainable_variables lowercase__ = original_model.non_trainable_variables lowercase__ = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: lowercase__ = param.numpy() lowercase__ = list(tf_params.keys() ) # Load HuggingFace model lowercase__ = get_efficientnet_config(_SCREAMING_SNAKE_CASE ) lowercase__ = EfficientNetForImageClassification(_SCREAMING_SNAKE_CASE ).eval() lowercase__ = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print('Converting parameters...' ) lowercase__ = rename_keys(_SCREAMING_SNAKE_CASE ) replace_params(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Initialize preprocessor and preprocess input image lowercase__ = convert_image_processor(_SCREAMING_SNAKE_CASE ) lowercase__ = preprocessor(images=prepare_img() , return_tensors='pt' ) # HF model inference hf_model.eval() with torch.no_grad(): lowercase__ = hf_model(**_SCREAMING_SNAKE_CASE ) lowercase__ = outputs.logits.detach().numpy() # Original model inference lowercase__ = False lowercase__ = CONFIG_MAP[model_name]['image_size'] lowercase__ = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) lowercase__ = image.img_to_array(_SCREAMING_SNAKE_CASE ) lowercase__ = np.expand_dims(_SCREAMING_SNAKE_CASE , axis=0 ) lowercase__ = original_model.predict(_SCREAMING_SNAKE_CASE ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ), "The predicted logits are not the same." print('Model outputs match!' ) if save_model: # Create folder to save model if not os.path.isdir(_SCREAMING_SNAKE_CASE ): os.mkdir(_SCREAMING_SNAKE_CASE ) # Save converted model and image processor hf_model.save_pretrained(_SCREAMING_SNAKE_CASE ) preprocessor.save_pretrained(_SCREAMING_SNAKE_CASE ) if push_to_hub: # Push model and image processor to hub print(F"""Pushing converted {model_name} to the hub...""" ) lowercase__ = F"""efficientnet-{model_name}""" preprocessor.push_to_hub(_SCREAMING_SNAKE_CASE ) hf_model.push_to_hub(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""b0""", type=str, help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""hf_model""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""") parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") lowercase_ = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
45
1
import numpy as np import torch import tqdm from ...models.unet_ad import UNetaDModel from ...pipelines import DiffusionPipeline from ...utils import randn_tensor from ...utils.dummy_pt_objects import DDPMScheduler class SCREAMING_SNAKE_CASE (UpperCAmelCase ): def __init__( self : Optional[Any] , a : UNetaDModel , a : UNetaDModel , a : DDPMScheduler , a : Any , )-> Dict: """simple docstring""" super().__init__() lowercase__ = value_function lowercase__ = unet lowercase__ = scheduler lowercase__ = env lowercase__ = env.get_dataset() lowercase__ = {} for key in self.data.keys(): try: lowercase__ = self.data[key].mean() except: # noqa: E722 pass lowercase__ = {} for key in self.data.keys(): try: lowercase__ = self.data[key].std() except: # noqa: E722 pass lowercase__ = env.observation_space.shape[0] lowercase__ = env.action_space.shape[0] def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Any , a : int )-> Dict: """simple docstring""" return (x_in - self.means[key]) / self.stds[key] def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : str , a : List[str] )-> str: """simple docstring""" return x_in * self.stds[key] + self.means[key] def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : Tuple )-> Tuple: """simple docstring""" if type(a ) is dict: return {k: self.to_torch(a ) for k, v in x_in.items()} elif torch.is_tensor(a ): return x_in.to(self.unet.device ) return torch.tensor(a , device=self.unet.device ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Optional[int] , a : Dict , a : Optional[Any] )-> List[Any]: """simple docstring""" for key, val in cond.items(): lowercase__ = val.clone() return x_in def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Optional[Any] , a : Any , a : Optional[Any] , a : Optional[int] )-> List[Any]: """simple docstring""" lowercase__ = x.shape[0] lowercase__ = None for i in tqdm.tqdm(self.scheduler.timesteps ): # create batch of timesteps to pass into model lowercase__ = torch.full((batch_size,) , a , device=self.unet.device , dtype=torch.long ) for _ in range(a ): with torch.enable_grad(): x.requires_grad_() # permute to match dimension for pre-trained models lowercase__ = self.value_function(x.permute(0 , 2 , 1 ) , a ).sample lowercase__ = torch.autograd.grad([y.sum()] , [x] )[0] lowercase__ = self.scheduler._get_variance(a ) lowercase__ = torch.exp(0.5 * posterior_variance ) lowercase__ = model_std * grad lowercase__ = 0 lowercase__ = x.detach() lowercase__ = x + scale * grad lowercase__ = self.reset_xa(a , a , self.action_dim ) lowercase__ = self.unet(x.permute(0 , 2 , 1 ) , a ).sample.permute(0 , 2 , 1 ) # TODO: verify deprecation of this kwarg lowercase__ = self.scheduler.step(a , a , a , predict_epsilon=a )['prev_sample'] # apply conditions to the trajectory (set the initial state) lowercase__ = self.reset_xa(a , a , self.action_dim ) lowercase__ = self.to_torch(a ) return x, y def __call__( self : Any , a : Tuple , a : int=64 , a : Tuple=32 , a : List[Any]=2 , a : List[str]=0.1 )-> List[Any]: """simple docstring""" lowercase__ = self.normalize(a , 'observations' ) lowercase__ = obs[None].repeat(a , axis=0 ) lowercase__ = {0: self.to_torch(a )} lowercase__ = (batch_size, planning_horizon, self.state_dim + self.action_dim) # generate initial noise and apply our conditions (to make the trajectories start at current state) lowercase__ = randn_tensor(a , device=self.unet.device ) lowercase__ = self.reset_xa(a , a , self.action_dim ) lowercase__ = self.to_torch(a ) # run the diffusion process lowercase__ , lowercase__ = self.run_diffusion(a , a , a , a ) # sort output trajectories by value lowercase__ = y.argsort(0 , descending=a ).squeeze() lowercase__ = x[sorted_idx] lowercase__ = sorted_values[:, :, : self.action_dim] lowercase__ = actions.detach().cpu().numpy() lowercase__ = self.de_normalize(a , key='actions' ) # select the action with the highest value if y is not None: lowercase__ = 0 else: # if we didn't run value guiding, select a random action lowercase__ = np.random.randint(0 , a ) lowercase__ = denorm_actions[selected_index, 0] return denorm_actions
45
import argparse import json import subprocess def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]: lowercase__ = [] lowercase__ = ( F"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\"""" ' https://api.github.com/repos/huggingface/transformers/actions/runners' ) lowercase__ = subprocess.run(_SCREAMING_SNAKE_CASE , shell=_SCREAMING_SNAKE_CASE , stdout=subprocess.PIPE ) lowercase__ = output.stdout.decode('utf-8' ) lowercase__ = json.loads(_SCREAMING_SNAKE_CASE ) lowercase__ = status['runners'] for runner in runners: if runner["name"] in target_runners: if runner["status"] == "offline": offline_runners.append(_SCREAMING_SNAKE_CASE ) # save the result so we can report them on Slack with open('offline_runners.txt' , 'w' ) as fp: fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) ) if len(_SCREAMING_SNAKE_CASE ) > 0: lowercase__ = '\n'.join([x['name'] for x in offline_runners] ) raise ValueError(F"""The following runners are offline:\n{failed}""" ) if __name__ == "__main__": def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str: return values.split(',' ) lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--target_runners""", default=None, type=list_str, required=True, help="""Comma-separated list of runners to check status.""", ) parser.add_argument( """--token""", default=None, type=str, required=True, help="""A token that has actions:read permission.""" ) lowercase_ = parser.parse_args() get_runner_status(args.target_runners, args.token)
45
1
import unittest from transformers import BigBirdTokenizer, BigBirdTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowercase_ = """▁""" lowercase_ = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece @require_tokenizers class SCREAMING_SNAKE_CASE (UpperCAmelCase , unittest.TestCase ): _UpperCamelCase : str = BigBirdTokenizer _UpperCamelCase : Optional[Any] = BigBirdTokenizerFast _UpperCamelCase : List[str] = True _UpperCamelCase : Tuple = True def SCREAMING_SNAKE_CASE_ ( self : Dict )-> int: """simple docstring""" super().setUp() lowercase__ = self.tokenizer_class(a , keep_accents=a ) tokenizer.save_pretrained(self.tmpdirname ) def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Dict: """simple docstring""" lowercase__ = '<s>' lowercase__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a ) def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Union[str, Any]: """simple docstring""" lowercase__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<unk>' ) self.assertEqual(vocab_keys[1] , '<s>' ) self.assertEqual(vocab_keys[-1] , '[MASK]' ) self.assertEqual(len(a ) , 1_004 ) def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Union[str, Any]: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1_000 ) def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Optional[Any]: """simple docstring""" if not self.test_rust_tokenizer: return lowercase__ = self.get_tokenizer() lowercase__ = self.get_rust_tokenizer() lowercase__ = 'I was born in 92000, and this is falsé.' lowercase__ = tokenizer.tokenize(a ) lowercase__ = rust_tokenizer.tokenize(a ) self.assertListEqual(a , a ) lowercase__ = tokenizer.encode(a , add_special_tokens=a ) lowercase__ = rust_tokenizer.encode(a , add_special_tokens=a ) self.assertListEqual(a , a ) lowercase__ = self.get_rust_tokenizer() lowercase__ = tokenizer.encode(a ) lowercase__ = rust_tokenizer.encode(a ) self.assertListEqual(a , a ) def SCREAMING_SNAKE_CASE_ ( self : str )-> str: """simple docstring""" lowercase__ = BigBirdTokenizer(a , keep_accents=a ) lowercase__ = tokenizer.tokenize('This is a test' ) self.assertListEqual(a , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(a ) , [285, 46, 10, 170, 382] , ) lowercase__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( a , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) lowercase__ = tokenizer.convert_tokens_to_ids(a ) self.assertListEqual( a , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) lowercase__ = tokenizer.convert_ids_to_tokens(a ) self.assertListEqual( a , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ] , ) @cached_property def SCREAMING_SNAKE_CASE_ ( self : Any )-> Any: """simple docstring""" return BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base' ) @slow def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> Tuple: """simple docstring""" lowercase__ = 'Hello World!' lowercase__ = [65, 18_536, 2_260, 101, 66] self.assertListEqual(a , self.big_tokenizer.encode(a ) ) @slow def SCREAMING_SNAKE_CASE_ ( self : str )-> List[Any]: """simple docstring""" lowercase__ = ( 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will' ' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth' ) # fmt: off lowercase__ = [65, 871, 419, 358, 946, 991, 2_521, 452, 358, 1_357, 387, 7_751, 3_536, 112, 985, 456, 126, 865, 938, 5_400, 5_734, 458, 1_368, 467, 786, 2_462, 5_246, 1_159, 633, 865, 4_519, 457, 582, 852, 2_557, 427, 916, 508, 405, 34_324, 497, 391, 408, 11_342, 1_244, 385, 100, 938, 985, 456, 574, 362, 12_597, 3_200, 3_129, 1_172, 66] # noqa: E231 # fmt: on self.assertListEqual(a , self.big_tokenizer.encode(a ) ) @require_torch @slow def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Dict: """simple docstring""" import torch from transformers import BigBirdConfig, BigBirdModel # Build sequence lowercase__ = list(self.big_tokenizer.get_vocab().keys() )[:10] lowercase__ = ' '.join(a ) lowercase__ = self.big_tokenizer.encode_plus(a , return_tensors='pt' , return_token_type_ids=a ) lowercase__ = self.big_tokenizer.batch_encode_plus( [sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=a ) lowercase__ = BigBirdConfig(attention_type='original_full' ) lowercase__ = BigBirdModel(a ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**a ) model(**a ) @slow def SCREAMING_SNAKE_CASE_ ( self : int )-> Dict: """simple docstring""" lowercase__ = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base' ) lowercase__ = tokenizer.decode(tokenizer('Paris is the [MASK].' ).input_ids ) self.assertTrue(decoded_text == '[CLS] Paris is the[MASK].[SEP]' ) @slow def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> List[Any]: """simple docstring""" lowercase__ = {'input_ids': [[65, 39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114, 66], [65, 448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=a , model_name='google/bigbird-roberta-base' , revision='215c99f1600e06f83acce68422f2035b2b5c3510' , )
45
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class SCREAMING_SNAKE_CASE (UpperCAmelCase ): _UpperCamelCase : Tuple = 'ClapFeatureExtractor' _UpperCamelCase : Union[str, Any] = ('RobertaTokenizer', 'RobertaTokenizerFast') def __init__( self : List[Any] , a : int , a : str )-> Any: """simple docstring""" super().__init__(a , a ) def __call__( self : Any , a : Tuple=None , a : Optional[int]=None , a : int=None , **a : Optional[int] )-> Union[str, Any]: """simple docstring""" lowercase__ = kwargs.pop('sampling_rate' , a ) if text is None and audios is None: raise ValueError('You have to specify either text or audios. Both cannot be none.' ) if text is not None: lowercase__ = self.tokenizer(a , return_tensors=a , **a ) if audios is not None: lowercase__ = self.feature_extractor( a , sampling_rate=a , return_tensors=a , **a ) if text is not None and audios is not None: lowercase__ = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**a ) , tensor_type=a ) def SCREAMING_SNAKE_CASE_ ( self : str , *a : Dict , **a : int )-> Optional[int]: """simple docstring""" return self.tokenizer.batch_decode(*a , **a ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] , *a : int , **a : Dict )-> Dict: """simple docstring""" return self.tokenizer.decode(*a , **a ) @property def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Optional[int]: """simple docstring""" lowercase__ = self.tokenizer.model_input_names lowercase__ = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
45
1
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import GLPNImageProcessor class SCREAMING_SNAKE_CASE (unittest.TestCase ): def __init__( self : List[Any] , a : Tuple , a : Tuple=7 , a : List[str]=3 , a : Dict=18 , a : int=30 , a : List[Any]=400 , a : Any=True , a : Any=32 , a : str=True , )-> int: """simple docstring""" lowercase__ = parent lowercase__ = batch_size lowercase__ = num_channels lowercase__ = image_size lowercase__ = min_resolution lowercase__ = max_resolution lowercase__ = do_resize lowercase__ = size_divisor lowercase__ = do_rescale def SCREAMING_SNAKE_CASE_ ( self : Dict )-> int: """simple docstring""" return { "do_resize": self.do_resize, "size_divisor": self.size_divisor, "do_rescale": self.do_rescale, } @require_torch @require_vision class SCREAMING_SNAKE_CASE (UpperCAmelCase , unittest.TestCase ): _UpperCamelCase : Optional[int] = GLPNImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> List[Any]: """simple docstring""" lowercase__ = GLPNImageProcessingTester(self ) @property def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Any: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE_ ( self : int )-> Dict: """simple docstring""" lowercase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(a , 'do_resize' ) ) self.assertTrue(hasattr(a , 'size_divisor' ) ) self.assertTrue(hasattr(a , 'resample' ) ) self.assertTrue(hasattr(a , 'do_rescale' ) ) def SCREAMING_SNAKE_CASE_ ( self : Any )-> Optional[int]: """simple docstring""" pass def SCREAMING_SNAKE_CASE_ ( self : int )-> List[str]: """simple docstring""" lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a ) for image in image_inputs: self.assertIsInstance(a , Image.Image ) # Test not batched input (GLPNImageProcessor doesn't support batching) lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> str: """simple docstring""" lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a ) for image in image_inputs: self.assertIsInstance(a , np.ndarray ) # Test not batched input (GLPNImageProcessor doesn't support batching) lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Optional[int]: """simple docstring""" lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a ) for image in image_inputs: self.assertIsInstance(a , torch.Tensor ) # Test not batched input (GLPNImageProcessor doesn't support batching) lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
45
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_barthez import BarthezTokenizer else: lowercase_ = None lowercase_ = logging.get_logger(__name__) lowercase_ = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""} lowercase_ = { """vocab_file""": { """moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""", """moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""", """moussaKam/barthez-orangesum-title""": ( """https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model""" ), }, """tokenizer_file""": { """moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json""", """moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json""", """moussaKam/barthez-orangesum-title""": ( """https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json""" ), }, } lowercase_ = { """moussaKam/mbarthez""": 1_024, """moussaKam/barthez""": 1_024, """moussaKam/barthez-orangesum-title""": 1_024, } lowercase_ = """▁""" class SCREAMING_SNAKE_CASE (UpperCAmelCase ): _UpperCamelCase : Dict = VOCAB_FILES_NAMES _UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase : Optional[Any] = ['input_ids', 'attention_mask'] _UpperCamelCase : int = BarthezTokenizer def __init__( self : List[Any] , a : Union[str, Any]=None , a : Optional[Any]=None , a : Dict="<s>" , a : Union[str, Any]="</s>" , a : List[str]="</s>" , a : Optional[Any]="<s>" , a : int="<unk>" , a : str="<pad>" , a : Optional[int]="<mask>" , **a : Union[str, Any] , )-> Tuple: """simple docstring""" lowercase__ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token super().__init__( a , tokenizer_file=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , **a , ) lowercase__ = vocab_file lowercase__ = False if not self.vocab_file else True def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : List[int] , a : Optional[List[int]] = None )-> List[int]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowercase__ = [self.cls_token_id] lowercase__ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : List[int] , a : Optional[List[int]] = None )-> List[int]: """simple docstring""" lowercase__ = [self.sep_token_id] lowercase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def SCREAMING_SNAKE_CASE_ ( self : Dict , a : str , a : Optional[str] = None )-> Tuple[str]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(a ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowercase__ = os.path.join( a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(a ): copyfile(self.vocab_file , a ) return (out_vocab_file,)
45
1
import argparse import gc import json import os import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler lowercase_ = 16 lowercase_ = 32 def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Optional[Any]: return int(x / 2**20 ) class SCREAMING_SNAKE_CASE : def __enter__( self : Optional[int] )-> List[str]: """simple docstring""" gc.collect() torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero lowercase__ = torch.cuda.memory_allocated() return self def __exit__( self : Any , *a : int )-> Optional[int]: """simple docstring""" gc.collect() torch.cuda.empty_cache() lowercase__ = torch.cuda.memory_allocated() lowercase__ = torch.cuda.max_memory_allocated() lowercase__ = bamb(self.end - self.begin ) lowercase__ = bamb(self.peak - self.begin ) # print(f"delta used/peak {self.used:4d}/{self.peaked:4d}") def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 16 , _SCREAMING_SNAKE_CASE = "bert-base-cased" , _SCREAMING_SNAKE_CASE = 320 , _SCREAMING_SNAKE_CASE = 160 , ) -> Dict: lowercase__ = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) lowercase__ = load_dataset( 'glue' , 'mrpc' , split={'train': F"""train[:{n_train}]""", 'validation': F"""validation[:{n_val}]"""} ) def tokenize_function(_SCREAMING_SNAKE_CASE ): # max_length=None => use the model max length (it's actually the default) lowercase__ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset lowercase__ = datasets.map( _SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_SCREAMING_SNAKE_CASE ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowercase__ = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(_SCREAMING_SNAKE_CASE ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=128 , return_tensors='pt' ) return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='pt' ) # Instantiate dataloaders. lowercase__ = DataLoader( tokenized_datasets['train'] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE ) lowercase__ = DataLoader( tokenized_datasets['validation'] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE ) return train_dataloader, eval_dataloader def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]: # Initialize accelerator lowercase__ = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowercase__ = config['lr'] lowercase__ = int(config['num_epochs'] ) lowercase__ = int(config['seed'] ) lowercase__ = int(config['batch_size'] ) lowercase__ = args.model_name_or_path set_seed(_SCREAMING_SNAKE_CASE ) lowercase__ , lowercase__ = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , args.n_train , args.n_val ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowercase__ = AutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE ) # Instantiate optimizer lowercase__ = ( AdamW if accelerator.state.deepspeed_plugin is None or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) lowercase__ = optimizer_cls(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE ) if accelerator.state.deepspeed_plugin is not None: lowercase__ = accelerator.state.deepspeed_plugin.deepspeed_config[ 'gradient_accumulation_steps' ] else: lowercase__ = 1 lowercase__ = (len(_SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): lowercase__ = get_linear_schedule_with_warmup( optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=0 , num_training_steps=_SCREAMING_SNAKE_CASE , ) else: lowercase__ = DummyScheduler(_SCREAMING_SNAKE_CASE , total_num_steps=_SCREAMING_SNAKE_CASE , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = accelerator.prepare( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # We need to keep track of how many total steps we have iterated over lowercase__ = 0 # We also need to keep track of the stating epoch so files are named properly lowercase__ = 0 # Now we train the model lowercase__ = {} for epoch in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): with TorchTracemalloc() as tracemalloc: model.train() for step, batch in enumerate(_SCREAMING_SNAKE_CASE ): lowercase__ = model(**_SCREAMING_SNAKE_CASE ) lowercase__ = outputs.loss lowercase__ = loss / gradient_accumulation_steps accelerator.backward(_SCREAMING_SNAKE_CASE ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage accelerator.print('Memory before entering the train : {}'.format(bamb(tracemalloc.begin ) ) ) accelerator.print('Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.used ) ) accelerator.print('Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.peaked ) ) accelerator.print( 'Total Peak Memory consumed during the train (max): {}'.format( tracemalloc.peaked + bamb(tracemalloc.begin ) ) ) lowercase__ = tracemalloc.peaked + bamb(tracemalloc.begin ) if args.peak_memory_upper_bound is not None: assert ( train_total_peak_memory[F"""epoch-{epoch}"""] <= args.peak_memory_upper_bound ), "Peak memory usage exceeded the upper bound" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , 'peak_memory_utilization.json' ) , 'w' ) as f: json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __UpperCamelCase () -> int: lowercase__ = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' ) parser.add_argument( '--model_name_or_path' , type=_SCREAMING_SNAKE_CASE , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_SCREAMING_SNAKE_CASE , ) parser.add_argument( '--output_dir' , type=_SCREAMING_SNAKE_CASE , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , ) parser.add_argument( '--peak_memory_upper_bound' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help='The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.' , ) parser.add_argument( '--n_train' , type=_SCREAMING_SNAKE_CASE , default=320 , help='Number of training examples to use.' , ) parser.add_argument( '--n_val' , type=_SCREAMING_SNAKE_CASE , default=160 , help='Number of validation examples to use.' , ) parser.add_argument( '--num_epochs' , type=_SCREAMING_SNAKE_CASE , default=1 , help='Number of train epochs.' , ) lowercase__ = parser.parse_args() lowercase__ = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16} training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
45
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionSAGPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class SCREAMING_SNAKE_CASE (UpperCAmelCase , UpperCAmelCase , unittest.TestCase ): _UpperCamelCase : List[Any] = StableDiffusionSAGPipeline _UpperCamelCase : str = TEXT_TO_IMAGE_PARAMS _UpperCamelCase : Dict = TEXT_TO_IMAGE_BATCH_PARAMS _UpperCamelCase : str = TEXT_TO_IMAGE_IMAGE_PARAMS _UpperCamelCase : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS _UpperCamelCase : Union[str, Any] = False def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Dict: """simple docstring""" torch.manual_seed(0 ) lowercase__ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) lowercase__ = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=a , set_alpha_to_one=a , ) torch.manual_seed(0 ) lowercase__ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) torch.manual_seed(0 ) lowercase__ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) lowercase__ = CLIPTextModel(a ) lowercase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) lowercase__ = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Optional[int] , a : Any=0 )-> Union[str, Any]: """simple docstring""" if str(a ).startswith('mps' ): lowercase__ = torch.manual_seed(a ) else: lowercase__ = torch.Generator(device=a ).manual_seed(a ) lowercase__ = { 'prompt': '.', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 1.0, 'sag_scale': 1.0, 'output_type': 'numpy', } return inputs def SCREAMING_SNAKE_CASE_ ( self : Any )-> Any: """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class SCREAMING_SNAKE_CASE (unittest.TestCase ): def SCREAMING_SNAKE_CASE_ ( self : str )-> int: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]: """simple docstring""" lowercase__ = StableDiffusionSAGPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' ) lowercase__ = sag_pipe.to(a ) sag_pipe.set_progress_bar_config(disable=a ) lowercase__ = '.' lowercase__ = torch.manual_seed(0 ) lowercase__ = sag_pipe( [prompt] , generator=a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' ) lowercase__ = output.images lowercase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowercase__ = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2 def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]: """simple docstring""" lowercase__ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' ) lowercase__ = sag_pipe.to(a ) sag_pipe.set_progress_bar_config(disable=a ) lowercase__ = '.' lowercase__ = torch.manual_seed(0 ) lowercase__ = sag_pipe( [prompt] , generator=a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' ) lowercase__ = output.images lowercase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowercase__ = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2 def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> List[Any]: """simple docstring""" lowercase__ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' ) lowercase__ = sag_pipe.to(a ) sag_pipe.set_progress_bar_config(disable=a ) lowercase__ = '.' lowercase__ = torch.manual_seed(0 ) lowercase__ = sag_pipe( [prompt] , width=768 , height=512 , generator=a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' , ) lowercase__ = output.images assert image.shape == (1, 512, 768, 3)
45
1
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, ViTImageProcessor, ViTMAEConfig, ViTMAEForPreTraining, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version lowercase_ = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.31.0""") require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""") @dataclass class SCREAMING_SNAKE_CASE : _UpperCamelCase : Optional[str] = field( default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} ) _UpperCamelCase : Optional[str] = field( default=UpperCAmelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} ) _UpperCamelCase : Optional[str] = field( default=UpperCAmelCase , metadata={'help': 'The column name of the images in the files.'} ) _UpperCamelCase : Optional[str] = field(default=UpperCAmelCase , metadata={'help': 'A folder containing the training data.'} ) _UpperCamelCase : Optional[str] = field(default=UpperCAmelCase , metadata={'help': 'A folder containing the validation data.'} ) _UpperCamelCase : Optional[float] = field( default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} ) _UpperCamelCase : Optional[int] = field( default=UpperCAmelCase , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of training examples to this ' 'value if set.' ) } , ) _UpperCamelCase : Optional[int] = field( default=UpperCAmelCase , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of evaluation examples to this ' 'value if set.' ) } , ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Union[str, Any]: """simple docstring""" lowercase__ = {} if self.train_dir is not None: lowercase__ = self.train_dir if self.validation_dir is not None: lowercase__ = self.validation_dir lowercase__ = data_files if data_files else None @dataclass class SCREAMING_SNAKE_CASE : _UpperCamelCase : str = field( default=UpperCAmelCase , metadata={ 'help': ( 'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.' ) } , ) _UpperCamelCase : Optional[str] = field( default=UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name_or_path'} ) _UpperCamelCase : Optional[str] = field( default=UpperCAmelCase , metadata={ 'help': ( 'Override some existing default config settings when a model is trained from scratch. Example: ' 'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index' ) } , ) _UpperCamelCase : Optional[str] = field( default=UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} ) _UpperCamelCase : str = field( default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , ) _UpperCamelCase : str = field(default=UpperCAmelCase , metadata={'help': 'Name or path of preprocessor config.'} ) _UpperCamelCase : bool = field( default=UpperCAmelCase , metadata={ 'help': ( 'Will use the token generated when running `huggingface-cli login` (necessary to use this script ' 'with private models).' ) } , ) _UpperCamelCase : float = field( default=0.75 , metadata={'help': 'The ratio of the number of masked tokens in the input sequence.'} ) _UpperCamelCase : bool = field( default=UpperCAmelCase , metadata={'help': 'Whether or not to train with normalized pixel values as target.'} ) @dataclass class SCREAMING_SNAKE_CASE (UpperCAmelCase ): _UpperCamelCase : float = field( default=1E-3 , metadata={'help': 'Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'} ) def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> int: lowercase__ = torch.stack([example['pixel_values'] for example in examples] ) return {"pixel_values": pixel_values} def __UpperCamelCase () -> List[str]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowercase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowercase__ , lowercase__ , lowercase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowercase__ , lowercase__ , lowercase__ = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('run_mae' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() lowercase__ = training_args.get_process_log_level() logger.setLevel(_SCREAMING_SNAKE_CASE ) transformers.utils.logging.set_verbosity(_SCREAMING_SNAKE_CASE ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(F"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. lowercase__ = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: lowercase__ = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. """ 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Initialize our dataset. lowercase__ = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. lowercase__ = None if 'validation' in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , _SCREAMING_SNAKE_CASE ) and data_args.train_val_split > 0.0: lowercase__ = ds['train'].train_test_split(data_args.train_val_split ) lowercase__ = split['train'] lowercase__ = split['test'] # Load pretrained model and image processor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowercase__ = { 'cache_dir': model_args.cache_dir, 'revision': model_args.model_revision, 'use_auth_token': True if model_args.use_auth_token else None, } if model_args.config_name: lowercase__ = ViTMAEConfig.from_pretrained(model_args.config_name , **_SCREAMING_SNAKE_CASE ) elif model_args.model_name_or_path: lowercase__ = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **_SCREAMING_SNAKE_CASE ) else: lowercase__ = ViTMAEConfig() logger.warning('You are instantiating a new config instance from scratch.' ) if model_args.config_overrides is not None: logger.info(F"""Overriding config: {model_args.config_overrides}""" ) config.update_from_string(model_args.config_overrides ) logger.info(F"""New config: {config}""" ) # adapt config config.update( { 'mask_ratio': model_args.mask_ratio, 'norm_pix_loss': model_args.norm_pix_loss, } ) # create image processor if model_args.image_processor_name: lowercase__ = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **_SCREAMING_SNAKE_CASE ) elif model_args.model_name_or_path: lowercase__ = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **_SCREAMING_SNAKE_CASE ) else: lowercase__ = ViTImageProcessor() # create model if model_args.model_name_or_path: lowercase__ = ViTMAEForPreTraining.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('Training new model from scratch' ) lowercase__ = ViTMAEForPreTraining(_SCREAMING_SNAKE_CASE ) if training_args.do_train: lowercase__ = ds['train'].column_names else: lowercase__ = ds['validation'].column_names if data_args.image_column_name is not None: lowercase__ = data_args.image_column_name elif "image" in column_names: lowercase__ = 'image' elif "img" in column_names: lowercase__ = 'img' else: lowercase__ = column_names[0] # transformations as done in original MAE paper # source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py if "shortest_edge" in image_processor.size: lowercase__ = image_processor.size['shortest_edge'] else: lowercase__ = (image_processor.size['height'], image_processor.size['width']) lowercase__ = Compose( [ Lambda(lambda _SCREAMING_SNAKE_CASE : img.convert('RGB' ) if img.mode != "RGB" else img ), RandomResizedCrop(_SCREAMING_SNAKE_CASE , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) def preprocess_images(_SCREAMING_SNAKE_CASE ): lowercase__ = [transforms(_SCREAMING_SNAKE_CASE ) for image in examples[image_column_name]] return examples if training_args.do_train: if "train" not in ds: raise ValueError('--do_train requires a train dataset' ) if data_args.max_train_samples is not None: lowercase__ = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(_SCREAMING_SNAKE_CASE ) if training_args.do_eval: if "validation" not in ds: raise ValueError('--do_eval requires a validation dataset' ) if data_args.max_eval_samples is not None: lowercase__ = ( ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(_SCREAMING_SNAKE_CASE ) # Compute absolute learning rate lowercase__ = ( training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size ) if training_args.base_learning_rate is not None: lowercase__ = training_args.base_learning_rate * total_train_batch_size / 256 # Initialize our trainer lowercase__ = Trainer( model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , ) # Training if training_args.do_train: lowercase__ = None if training_args.resume_from_checkpoint is not None: lowercase__ = training_args.resume_from_checkpoint elif last_checkpoint is not None: lowercase__ = last_checkpoint lowercase__ = trainer.train(resume_from_checkpoint=_SCREAMING_SNAKE_CASE ) trainer.save_model() trainer.log_metrics('train' , train_result.metrics ) trainer.save_metrics('train' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: lowercase__ = trainer.evaluate() trainer.log_metrics('eval' , _SCREAMING_SNAKE_CASE ) trainer.save_metrics('eval' , _SCREAMING_SNAKE_CASE ) # Write model card and (optionally) push to hub lowercase__ = { 'tasks': 'masked-auto-encoding', 'dataset': data_args.dataset_name, 'tags': ['masked-auto-encoding'], } if training_args.push_to_hub: trainer.push_to_hub(**_SCREAMING_SNAKE_CASE ) else: trainer.create_model_card(**_SCREAMING_SNAKE_CASE ) def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Dict: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
45
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { """facebook/deit-base-distilled-patch16-224""": ( """https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json""" ), # See all DeiT models at https://huggingface.co/models?filter=deit } class SCREAMING_SNAKE_CASE (UpperCAmelCase ): _UpperCamelCase : Any = 'deit' def __init__( self : Any , a : Union[str, Any]=768 , a : Optional[Any]=12 , a : Union[str, Any]=12 , a : Optional[int]=3_072 , a : Optional[int]="gelu" , a : Optional[Any]=0.0 , a : List[Any]=0.0 , a : int=0.02 , a : List[str]=1E-1_2 , a : Optional[int]=224 , a : Tuple=16 , a : List[Any]=3 , a : List[str]=True , a : Any=16 , **a : Union[str, Any] , )-> int: """simple docstring""" super().__init__(**a ) lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = intermediate_size lowercase__ = hidden_act lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = initializer_range lowercase__ = layer_norm_eps lowercase__ = image_size lowercase__ = patch_size lowercase__ = num_channels lowercase__ = qkv_bias lowercase__ = encoder_stride class SCREAMING_SNAKE_CASE (UpperCAmelCase ): _UpperCamelCase : List[Any] = version.parse('1.11' ) @property def SCREAMING_SNAKE_CASE_ ( self : int )-> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def SCREAMING_SNAKE_CASE_ ( self : Any )-> float: """simple docstring""" return 1E-4
45
1
from __future__ import annotations class SCREAMING_SNAKE_CASE : def __init__( self : List[Any] , a : int = 0 )-> Any: """simple docstring""" lowercase__ = key def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : str , a : int )-> list[str]: """simple docstring""" assert isinstance(a , a ) and isinstance(a , a ) lowercase__ = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(a ) ^ key ) for ch in content] def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : str , a : int )-> list[str]: """simple docstring""" assert isinstance(a , a ) and isinstance(a , a ) lowercase__ = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(a ) ^ key ) for ch in content] def SCREAMING_SNAKE_CASE_ ( self : str , a : str , a : int = 0 )-> str: """simple docstring""" assert isinstance(a , a ) and isinstance(a , a ) lowercase__ = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned lowercase__ = '' for ch in content: ans += chr(ord(a ) ^ key ) return ans def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : str , a : int = 0 )-> str: """simple docstring""" assert isinstance(a , a ) and isinstance(a , a ) lowercase__ = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned lowercase__ = '' for ch in content: ans += chr(ord(a ) ^ key ) return ans def SCREAMING_SNAKE_CASE_ ( self : str , a : str , a : int = 0 )-> bool: """simple docstring""" assert isinstance(a , a ) and isinstance(a , a ) try: with open(a ) as fin, open('encrypt.out' , 'w+' ) as fout: # actual encrypt-process for line in fin: fout.write(self.encrypt_string(a , a ) ) except OSError: return False return True def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : str , a : int )-> bool: """simple docstring""" assert isinstance(a , a ) and isinstance(a , a ) try: with open(a ) as fin, open('decrypt.out' , 'w+' ) as fout: # actual encrypt-process for line in fin: fout.write(self.decrypt_string(a , a ) ) except OSError: return False return True # Tests # crypt = XORCipher() # key = 67 # # test encrypt # print(crypt.encrypt("hallo welt",key)) # # test decrypt # print(crypt.decrypt(crypt.encrypt("hallo welt",key), key)) # # test encrypt_string # print(crypt.encrypt_string("hallo welt",key)) # # test decrypt_string # print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key)) # if (crypt.encrypt_file("test.txt",key)): # print("encrypt successful") # else: # print("encrypt unsuccessful") # if (crypt.decrypt_file("encrypt.out",key)): # print("decrypt successful") # else: # print("decrypt unsuccessful")
45
import os import zipfile import requests from get_ci_error_statistics import download_artifact, get_artifacts_links def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 ) -> List[Any]: lowercase__ = None if token is not None: lowercase__ = {'Accept': 'application/vnd.github+json', 'Authorization': F"""Bearer {token}"""} # The id of a workflow (not of a workflow run) lowercase__ = '636036' lowercase__ = F"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs""" # On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results url += F"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}""" lowercase__ = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json() return result["workflow_runs"] def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Union[str, Any]: lowercase__ = get_daily_ci_runs(_SCREAMING_SNAKE_CASE ) lowercase__ = None for workflow_run in workflow_runs: if workflow_run["status"] == "completed": lowercase__ = workflow_run['id'] break return workflow_run_id def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: lowercase__ = get_last_daily_ci_runs(_SCREAMING_SNAKE_CASE ) if workflow_run_id is not None: lowercase__ = get_artifacts_links(worflow_run_id=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE ) for artifact_name in artifact_names: if artifact_name in artifacts_links: lowercase__ = artifacts_links[artifact_name] download_artifact( artifact_name=_SCREAMING_SNAKE_CASE , artifact_url=_SCREAMING_SNAKE_CASE , output_dir=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE ) def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict: get_last_daily_ci_artifacts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) lowercase__ = {} for artifact_name in artifact_names: lowercase__ = os.path.join(_SCREAMING_SNAKE_CASE , F"""{artifact_name}.zip""" ) if os.path.isfile(_SCREAMING_SNAKE_CASE ): lowercase__ = {} with zipfile.ZipFile(_SCREAMING_SNAKE_CASE ) as z: for filename in z.namelist(): if not os.path.isdir(_SCREAMING_SNAKE_CASE ): # read the file with z.open(_SCREAMING_SNAKE_CASE ) as f: lowercase__ = f.read().decode('UTF-8' ) return results
45
1
import logging import os import sys from dataclasses import dataclass, field from importlib import import_module from typing import Dict, List, Optional, Tuple import numpy as np from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch import nn from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask import transformers from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process lowercase_ = logging.getLogger(__name__) @dataclass class SCREAMING_SNAKE_CASE : _UpperCamelCase : str = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) _UpperCamelCase : Optional[str] = field( default=UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) _UpperCamelCase : Optional[str] = field( default='NER' , metadata={'help': 'Task type to fine tune in training (e.g. NER, POS, etc)'} ) _UpperCamelCase : Optional[str] = field( default=UpperCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) _UpperCamelCase : bool = field(default=UpperCAmelCase , metadata={'help': 'Set this flag to use fast tokenization.'} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. _UpperCamelCase : Optional[str] = field( default=UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) @dataclass class SCREAMING_SNAKE_CASE : _UpperCamelCase : str = field( metadata={'help': 'The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'} ) _UpperCamelCase : Optional[str] = field( default=UpperCAmelCase , metadata={'help': 'Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'} , ) _UpperCamelCase : int = field( default=1_28 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) _UpperCamelCase : bool = field( default=UpperCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) def __UpperCamelCase () -> Union[str, Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowercase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowercase__ , lowercase__ , lowercase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowercase__ , lowercase__ , lowercase__ = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" ' --overwrite_output_dir to overcome.' ) lowercase__ = import_module('tasks' ) try: lowercase__ = getattr(_SCREAMING_SNAKE_CASE , model_args.task_type ) lowercase__ = token_classification_task_clazz() except AttributeError: raise ValueError( F"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """ F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s' , _SCREAMING_SNAKE_CASE ) # Set seed set_seed(training_args.seed ) # Prepare CONLL-2003 task lowercase__ = token_classification_task.get_labels(data_args.labels ) lowercase__ = dict(enumerate(_SCREAMING_SNAKE_CASE ) ) lowercase__ = len(_SCREAMING_SNAKE_CASE ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowercase__ = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_SCREAMING_SNAKE_CASE , idalabel=_SCREAMING_SNAKE_CASE , labelaid={label: i for i, label in enumerate(_SCREAMING_SNAKE_CASE )} , cache_dir=model_args.cache_dir , ) lowercase__ = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , ) lowercase__ = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , ) # Get datasets lowercase__ = ( TokenClassificationDataset( token_classification_task=_SCREAMING_SNAKE_CASE , data_dir=data_args.data_dir , tokenizer=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) lowercase__ = ( TokenClassificationDataset( token_classification_task=_SCREAMING_SNAKE_CASE , data_dir=data_args.data_dir , tokenizer=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def align_predictions(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple[List[int], List[int]]: lowercase__ = np.argmax(_SCREAMING_SNAKE_CASE , axis=2 ) lowercase__ , lowercase__ = preds.shape lowercase__ = [[] for _ in range(_SCREAMING_SNAKE_CASE )] lowercase__ = [[] for _ in range(_SCREAMING_SNAKE_CASE )] for i in range(_SCREAMING_SNAKE_CASE ): for j in range(_SCREAMING_SNAKE_CASE ): if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index: out_label_list[i].append(label_map[label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) return preds_list, out_label_list def compute_metrics(_SCREAMING_SNAKE_CASE ) -> Dict: lowercase__ , lowercase__ = align_predictions(p.predictions , p.label_ids ) return { "accuracy_score": accuracy_score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "precision": precision_score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "recall": recall_score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "f1": fa_score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), } # Data collator lowercase__ = DataCollatorWithPadding(_SCREAMING_SNAKE_CASE , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer lowercase__ = Trainer( model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , compute_metrics=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_process_zero(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation lowercase__ = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) lowercase__ = trainer.evaluate() lowercase__ = os.path.join(training_args.output_dir , 'eval_results.txt' ) if trainer.is_world_process_zero(): with open(_SCREAMING_SNAKE_CASE , 'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in result.items(): logger.info(' %s = %s' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) writer.write('%s = %s\n' % (key, value) ) results.update(_SCREAMING_SNAKE_CASE ) # Predict if training_args.do_predict: lowercase__ = TokenClassificationDataset( token_classification_task=_SCREAMING_SNAKE_CASE , data_dir=data_args.data_dir , tokenizer=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , ) lowercase__ , lowercase__ , lowercase__ = trainer.predict(_SCREAMING_SNAKE_CASE ) lowercase__ , lowercase__ = align_predictions(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) lowercase__ = os.path.join(training_args.output_dir , 'test_results.txt' ) if trainer.is_world_process_zero(): with open(_SCREAMING_SNAKE_CASE , 'w' ) as writer: for key, value in metrics.items(): logger.info(' %s = %s' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) writer.write('%s = %s\n' % (key, value) ) # Save predictions lowercase__ = os.path.join(training_args.output_dir , 'test_predictions.txt' ) if trainer.is_world_process_zero(): with open(_SCREAMING_SNAKE_CASE , 'w' ) as writer: with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f: token_classification_task.write_predictions_to_file(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return results def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Optional[Any]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
45
import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device lowercase_ = False class SCREAMING_SNAKE_CASE (unittest.TestCase ): pass @nightly @require_torch_gpu class SCREAMING_SNAKE_CASE (unittest.TestCase ): def SCREAMING_SNAKE_CASE_ ( self : str )-> Optional[int]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Union[str, Any]: """simple docstring""" lowercase__ = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa ) pipe.to(a ) pipe.set_progress_bar_config(disable=a ) lowercase__ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' ) lowercase__ = torch.manual_seed(0 ) lowercase__ = pipe.dual_guided( prompt='first prompt' , image=a , text_to_image_strength=0.75 , generator=a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(a ) lowercase__ = VersatileDiffusionPipeline.from_pretrained(a , torch_dtype=torch.floataa ) pipe.to(a ) pipe.set_progress_bar_config(disable=a ) lowercase__ = generator.manual_seed(0 ) lowercase__ = pipe.dual_guided( prompt='first prompt' , image=a , text_to_image_strength=0.75 , generator=a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass" def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> List[str]: """simple docstring""" lowercase__ = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa ) pipe.to(a ) pipe.set_progress_bar_config(disable=a ) lowercase__ = 'cyberpunk 2077' lowercase__ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' ) lowercase__ = torch.manual_seed(0 ) lowercase__ = pipe.dual_guided( prompt=a , image=a , text_to_image_strength=0.75 , generator=a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images lowercase__ = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) lowercase__ = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 lowercase__ = 'A painting of a squirrel eating a burger ' lowercase__ = torch.manual_seed(0 ) lowercase__ = pipe.text_to_image( prompt=a , generator=a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images lowercase__ = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) lowercase__ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 lowercase__ = pipe.image_variation(a , generator=a , output_type='numpy' ).images lowercase__ = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) lowercase__ = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
45
1
from __future__ import annotations def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , ) -> tuple[int, float, str]: lowercase__ = cipher_alphabet or [chr(_SCREAMING_SNAKE_CASE ) for i in range(97 , 123 )] # If the argument is None or the user provided an empty dictionary if not frequencies_dict: # Frequencies of letters in the english language (how much they show up) lowercase__ = { 'a': 0.0_8_4_9_7, 'b': 0.0_1_4_9_2, 'c': 0.0_2_2_0_2, 'd': 0.0_4_2_5_3, 'e': 0.1_1_1_6_2, 'f': 0.0_2_2_2_8, 'g': 0.0_2_0_1_5, 'h': 0.0_6_0_9_4, 'i': 0.0_7_5_4_6, 'j': 0.0_0_1_5_3, 'k': 0.0_1_2_9_2, 'l': 0.0_4_0_2_5, 'm': 0.0_2_4_0_6, 'n': 0.0_6_7_4_9, 'o': 0.0_7_5_0_7, 'p': 0.0_1_9_2_9, 'q': 0.0_0_0_9_5, 'r': 0.0_7_5_8_7, 's': 0.0_6_3_2_7, 't': 0.0_9_3_5_6, 'u': 0.0_2_7_5_8, 'v': 0.0_0_9_7_8, 'w': 0.0_2_5_6_0, 'x': 0.0_0_1_5_0, 'y': 0.0_1_9_9_4, 'z': 0.0_0_0_7_7, } else: # Custom frequencies dictionary lowercase__ = frequencies_dict if not case_sensitive: lowercase__ = ciphertext.lower() # Chi squared statistic values lowercase__ = {} # cycle through all of the shifts for shift in range(len(_SCREAMING_SNAKE_CASE ) ): lowercase__ = '' # decrypt the message with the shift for letter in ciphertext: try: # Try to index the letter in the alphabet lowercase__ = (alphabet_letters.index(letter.lower() ) - shift) % len( _SCREAMING_SNAKE_CASE ) decrypted_with_shift += ( alphabet_letters[new_key].upper() if case_sensitive and letter.isupper() else alphabet_letters[new_key] ) except ValueError: # Append the character if it isn't in the alphabet decrypted_with_shift += letter lowercase__ = 0.0 # Loop through each letter in the decoded message with the shift for letter in decrypted_with_shift: if case_sensitive: lowercase__ = letter.lower() if letter in frequencies: # Get the amount of times the letter occurs in the message lowercase__ = decrypted_with_shift.lower().count(_SCREAMING_SNAKE_CASE ) # Get the excepcted amount of times the letter should appear based # on letter frequencies lowercase__ = frequencies[letter] * occurrences # Complete the chi squared statistic formula lowercase__ = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value else: if letter.lower() in frequencies: # Get the amount of times the letter occurs in the message lowercase__ = decrypted_with_shift.count(_SCREAMING_SNAKE_CASE ) # Get the excepcted amount of times the letter should appear based # on letter frequencies lowercase__ = frequencies[letter] * occurrences # Complete the chi squared statistic formula lowercase__ = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value # Add the data to the chi_squared_statistic_values dictionary lowercase__ = ( chi_squared_statistic, decrypted_with_shift, ) # Get the most likely cipher by finding the cipher with the smallest chi squared # statistic def chi_squared_statistic_values_sorting_key(_SCREAMING_SNAKE_CASE ) -> tuple[float, str]: return chi_squared_statistic_values[key] lowercase__ = min( _SCREAMING_SNAKE_CASE , key=_SCREAMING_SNAKE_CASE , ) # Get all the data from the most likely cipher (key, decoded message) ( ( lowercase__ ) , ( lowercase__ ) , ) = chi_squared_statistic_values[most_likely_cipher] # Return the data on the most likely shift return ( most_likely_cipher, most_likely_cipher_chi_squared_value, decoded_most_likely_cipher, )
45
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> bool: if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise ValueError('Input series is not valid, valid series - [2, 4, 6]' ) if len(_SCREAMING_SNAKE_CASE ) == 0: raise ValueError('Input list must be a non empty list' ) if len(_SCREAMING_SNAKE_CASE ) == 1: return True lowercase__ = series[1] - series[0] for index in range(len(_SCREAMING_SNAKE_CASE ) - 1 ): if series[index + 1] - series[index] != common_diff: return False return True def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> float: if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise ValueError('Input series is not valid, valid series - [2, 4, 6]' ) if len(_SCREAMING_SNAKE_CASE ) == 0: raise ValueError('Input list must be a non empty list' ) lowercase__ = 0 for val in series: answer += val return answer / len(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod()
45
1
from typing import Dict, List, Optional from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { """nielsr/canine-s""": 2_048, } # Unicode defines 1,114,112 total “codepoints” lowercase_ = 1_114_112 # Below: Constants defining canonical codepoints for special, pseudo-characters. # Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py lowercase_ = 0 lowercase_ = 0xE000 lowercase_ = 0xE001 lowercase_ = 0xE002 lowercase_ = 0xE003 lowercase_ = 0xE004 # Maps special codepoints to human-readable names. lowercase_ = { # Special symbols are represented using codepoints values that are valid, # but designated as "Private Use", meaning that they will never be assigned # characters by the Unicode Consortium, and are thus safe for use here. # # NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly # excluded and should fail with a hard error. CLS: "[CLS]", SEP: "[SEP]", BOS: "[BOS]", MASK: "[MASK]", PAD: "[PAD]", RESERVED: "[RESERVED]", } # Maps special codepoint human-readable names to their codepoint values. lowercase_ = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()} class SCREAMING_SNAKE_CASE (UpperCAmelCase ): _UpperCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : Optional[Any] , a : Optional[Any]=chr(a ) , a : Dict=chr(a ) , a : Any=chr(a ) , a : Union[str, Any]=chr(a ) , a : Optional[Any]=chr(a ) , a : Any=chr(a ) , a : Optional[int]=False , a : List[Any]=2_048 , **a : Tuple , )-> Tuple: """simple docstring""" lowercase__ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else bos_token lowercase__ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else eos_token lowercase__ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else sep_token lowercase__ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else cls_token lowercase__ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowercase__ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token super().__init__( bos_token=a , eos_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , add_prefix_space=a , model_max_length=a , **a , ) # Creates a mapping for looking up the IDs of special symbols. lowercase__ = {} for codepoint, name in SPECIAL_CODEPOINTS.items(): lowercase__ = codepoint # Creates a mapping for looking up the string forms of special symbol IDs. lowercase__ = { codepoint: name for name, codepoint in self._special_codepoints.items() } lowercase__ = UNICODE_VOCAB_SIZE lowercase__ = len(self._special_codepoints ) @property def SCREAMING_SNAKE_CASE_ ( self : int )-> int: """simple docstring""" return self._unicode_vocab_size def SCREAMING_SNAKE_CASE_ ( self : int , a : str )-> List[str]: """simple docstring""" return list(a ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : str )-> int: """simple docstring""" try: return ord(a ) except TypeError: raise ValueError(f"""invalid token: '{token}'""" ) def SCREAMING_SNAKE_CASE_ ( self : int , a : int )-> str: """simple docstring""" try: if index in SPECIAL_CODEPOINTS: return SPECIAL_CODEPOINTS[index] return chr(a ) except TypeError: raise ValueError(f"""invalid id: {index}""" ) def SCREAMING_SNAKE_CASE_ ( self : str , a : List[Any] )-> Tuple: """simple docstring""" return "".join(a ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : List[int] , a : Optional[List[int]] = None )-> List[int]: """simple docstring""" lowercase__ = [self.sep_token_id] lowercase__ = [self.cls_token_id] lowercase__ = cls + token_ids_a + sep if token_ids_a is not None: result += token_ids_a + sep return result def SCREAMING_SNAKE_CASE_ ( self : str , a : List[int] , a : Optional[List[int]] = None , a : bool = False )-> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=a , token_ids_a=a , already_has_special_tokens=a ) lowercase__ = [1] + ([0] * len(a )) + [1] if token_ids_a is not None: result += ([0] * len(a )) + [1] return result def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : List[int] , a : Optional[List[int]] = None )-> List[int]: """simple docstring""" lowercase__ = [self.sep_token_id] lowercase__ = [self.cls_token_id] lowercase__ = len(cls + token_ids_a + sep ) * [0] if token_ids_a is not None: result += len(token_ids_a + sep ) * [1] return result def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : str , a : Optional[str] = None )-> Tuple: """simple docstring""" return ()
45
from __future__ import annotations import math from collections.abc import Callable def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 100 , ) -> float: lowercase__ = x_start lowercase__ = fnc(_SCREAMING_SNAKE_CASE ) lowercase__ = 0.0 for _ in range(_SCREAMING_SNAKE_CASE ): # Approximates curve as a sequence of linear lines and sums their length lowercase__ = (x_end - x_start) / steps + xa lowercase__ = fnc(_SCREAMING_SNAKE_CASE ) length += math.hypot(xa - xa , fxa - fxa ) # Increment step lowercase__ = xa lowercase__ = fxa return length if __name__ == "__main__": def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str: return math.sin(10 * x ) print("""f(x) = sin(10 * x)""") print("""The length of the curve from x = -10 to x = 10 is:""") lowercase_ = 10 while i <= 100_000: print(f'''With {i} steps: {line_length(f, -10, 10, i)}''') i *= 10
45
1
from __future__ import annotations import numpy as np def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> tuple[np.ndarray, np.ndarray]: lowercase__ , lowercase__ = np.shape(_SCREAMING_SNAKE_CASE ) if rows != columns: lowercase__ = ( '\'table\' has to be of square shaped array but got a ' F"""{rows}x{columns} array:\n{table}""" ) raise ValueError(_SCREAMING_SNAKE_CASE ) lowercase__ = np.zeros((rows, columns) ) lowercase__ = np.zeros((rows, columns) ) for i in range(_SCREAMING_SNAKE_CASE ): for j in range(_SCREAMING_SNAKE_CASE ): lowercase__ = sum(lower[i][k] * upper[k][j] for k in range(_SCREAMING_SNAKE_CASE ) ) if upper[j][j] == 0: raise ArithmeticError('No LU decomposition exists' ) lowercase__ = (table[i][j] - total) / upper[j][j] lowercase__ = 1 for j in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): lowercase__ = sum(lower[i][k] * upper[k][j] for k in range(_SCREAMING_SNAKE_CASE ) ) lowercase__ = table[i][j] - total return lower, upper if __name__ == "__main__": import doctest doctest.testmod()
45
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowercase_ = { """configuration_squeezebert""": [ """SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SqueezeBertConfig""", """SqueezeBertOnnxConfig""", ], """tokenization_squeezebert""": ["""SqueezeBertTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["""SqueezeBertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """SqueezeBertForMaskedLM""", """SqueezeBertForMultipleChoice""", """SqueezeBertForQuestionAnswering""", """SqueezeBertForSequenceClassification""", """SqueezeBertForTokenClassification""", """SqueezeBertModel""", """SqueezeBertModule""", """SqueezeBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_squeezebert import ( SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, SqueezeBertConfig, SqueezeBertOnnxConfig, ) from .tokenization_squeezebert import SqueezeBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_squeezebert import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, SqueezeBertModule, SqueezeBertPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
45
1
import math def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]: if 0 not in (x, y): # We use the relation x^y = y*log10(x), where 10 is the base. return y * math.logaa(_SCREAMING_SNAKE_CASE ) else: if x == 0: # 0 raised to any number is 0 return 0 elif y == 0: return 1 # any number raised to 0 is 1 raise AssertionError('This should never happen' ) if __name__ == "__main__": # Main function # Read two numbers from input and typecast them to int using map function. # Here x is the base and y is the power. lowercase_ = """Enter the base and the power separated by a comma: """ lowercase_ , lowercase_ = map(int, input(prompt).split(""",""")) lowercase_ , lowercase_ = map(int, input(prompt).split(""",""")) # We find the log of each number, using the function res(), which takes two # arguments. lowercase_ = res(xa, ya) lowercase_ = res(xa, ya) # We check for the largest number if resa > resa: print("""Largest number is""", xa, """^""", ya) elif resa > resa: print("""Largest number is""", xa, """^""", ya) else: print("""Both are equal""")
45
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = 50 ) -> int: lowercase__ = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(f'''{solution() = }''')
45
1
from math import isqrt def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> bool: return all(number % divisor != 0 for divisor in range(2 , isqrt(_SCREAMING_SNAKE_CASE ) + 1 ) ) def __UpperCamelCase (_SCREAMING_SNAKE_CASE = 10**6 ) -> int: lowercase__ = 0 lowercase__ = 1 lowercase__ = 7 while prime_candidate < max_prime: primes_count += is_prime(_SCREAMING_SNAKE_CASE ) cube_index += 1 prime_candidate += 6 * cube_index return primes_count if __name__ == "__main__": print(f'''{solution() = }''')
45
import numpy as np import torch import tqdm from ...models.unet_ad import UNetaDModel from ...pipelines import DiffusionPipeline from ...utils import randn_tensor from ...utils.dummy_pt_objects import DDPMScheduler class SCREAMING_SNAKE_CASE (UpperCAmelCase ): def __init__( self : Optional[Any] , a : UNetaDModel , a : UNetaDModel , a : DDPMScheduler , a : Any , )-> Dict: """simple docstring""" super().__init__() lowercase__ = value_function lowercase__ = unet lowercase__ = scheduler lowercase__ = env lowercase__ = env.get_dataset() lowercase__ = {} for key in self.data.keys(): try: lowercase__ = self.data[key].mean() except: # noqa: E722 pass lowercase__ = {} for key in self.data.keys(): try: lowercase__ = self.data[key].std() except: # noqa: E722 pass lowercase__ = env.observation_space.shape[0] lowercase__ = env.action_space.shape[0] def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Any , a : int )-> Dict: """simple docstring""" return (x_in - self.means[key]) / self.stds[key] def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : str , a : List[str] )-> str: """simple docstring""" return x_in * self.stds[key] + self.means[key] def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : Tuple )-> Tuple: """simple docstring""" if type(a ) is dict: return {k: self.to_torch(a ) for k, v in x_in.items()} elif torch.is_tensor(a ): return x_in.to(self.unet.device ) return torch.tensor(a , device=self.unet.device ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Optional[int] , a : Dict , a : Optional[Any] )-> List[Any]: """simple docstring""" for key, val in cond.items(): lowercase__ = val.clone() return x_in def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Optional[Any] , a : Any , a : Optional[Any] , a : Optional[int] )-> List[Any]: """simple docstring""" lowercase__ = x.shape[0] lowercase__ = None for i in tqdm.tqdm(self.scheduler.timesteps ): # create batch of timesteps to pass into model lowercase__ = torch.full((batch_size,) , a , device=self.unet.device , dtype=torch.long ) for _ in range(a ): with torch.enable_grad(): x.requires_grad_() # permute to match dimension for pre-trained models lowercase__ = self.value_function(x.permute(0 , 2 , 1 ) , a ).sample lowercase__ = torch.autograd.grad([y.sum()] , [x] )[0] lowercase__ = self.scheduler._get_variance(a ) lowercase__ = torch.exp(0.5 * posterior_variance ) lowercase__ = model_std * grad lowercase__ = 0 lowercase__ = x.detach() lowercase__ = x + scale * grad lowercase__ = self.reset_xa(a , a , self.action_dim ) lowercase__ = self.unet(x.permute(0 , 2 , 1 ) , a ).sample.permute(0 , 2 , 1 ) # TODO: verify deprecation of this kwarg lowercase__ = self.scheduler.step(a , a , a , predict_epsilon=a )['prev_sample'] # apply conditions to the trajectory (set the initial state) lowercase__ = self.reset_xa(a , a , self.action_dim ) lowercase__ = self.to_torch(a ) return x, y def __call__( self : Any , a : Tuple , a : int=64 , a : Tuple=32 , a : List[Any]=2 , a : List[str]=0.1 )-> List[Any]: """simple docstring""" lowercase__ = self.normalize(a , 'observations' ) lowercase__ = obs[None].repeat(a , axis=0 ) lowercase__ = {0: self.to_torch(a )} lowercase__ = (batch_size, planning_horizon, self.state_dim + self.action_dim) # generate initial noise and apply our conditions (to make the trajectories start at current state) lowercase__ = randn_tensor(a , device=self.unet.device ) lowercase__ = self.reset_xa(a , a , self.action_dim ) lowercase__ = self.to_torch(a ) # run the diffusion process lowercase__ , lowercase__ = self.run_diffusion(a , a , a , a ) # sort output trajectories by value lowercase__ = y.argsort(0 , descending=a ).squeeze() lowercase__ = x[sorted_idx] lowercase__ = sorted_values[:, :, : self.action_dim] lowercase__ = actions.detach().cpu().numpy() lowercase__ = self.de_normalize(a , key='actions' ) # select the action with the highest value if y is not None: lowercase__ = 0 else: # if we didn't run value guiding, select a random action lowercase__ = np.random.randint(0 , a ) lowercase__ = denorm_actions[selected_index, 0] return denorm_actions
45
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { """facebook/deit-base-distilled-patch16-224""": ( """https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json""" ), # See all DeiT models at https://huggingface.co/models?filter=deit } class SCREAMING_SNAKE_CASE (UpperCAmelCase ): _UpperCamelCase : Any = 'deit' def __init__( self : Any , a : Union[str, Any]=768 , a : Optional[Any]=12 , a : Union[str, Any]=12 , a : Optional[int]=3_072 , a : Optional[int]="gelu" , a : Optional[Any]=0.0 , a : List[Any]=0.0 , a : int=0.02 , a : List[str]=1E-1_2 , a : Optional[int]=224 , a : Tuple=16 , a : List[Any]=3 , a : List[str]=True , a : Any=16 , **a : Union[str, Any] , )-> int: """simple docstring""" super().__init__(**a ) lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = intermediate_size lowercase__ = hidden_act lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = initializer_range lowercase__ = layer_norm_eps lowercase__ = image_size lowercase__ = patch_size lowercase__ = num_channels lowercase__ = qkv_bias lowercase__ = encoder_stride class SCREAMING_SNAKE_CASE (UpperCAmelCase ): _UpperCamelCase : List[Any] = version.parse('1.11' ) @property def SCREAMING_SNAKE_CASE_ ( self : int )-> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def SCREAMING_SNAKE_CASE_ ( self : Any )-> float: """simple docstring""" return 1E-4
45
from PIL import Image def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Image: def brightness(_SCREAMING_SNAKE_CASE ) -> float: return 128 + level + (c - 128) if not -2_5_5.0 <= level <= 2_5_5.0: raise ValueError('level must be between -255.0 (black) and 255.0 (white)' ) return img.point(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": # Load image with Image.open("""image_data/lena.jpg""") as img: # Change brightness to 100 lowercase_ = change_brightness(img, 100) brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
45
1
import math import os import re import sys import unittest from pathlib import Path from typing import Tuple from unittest.mock import patch from parameterized import parameterized from transformers.testing_utils import ( CaptureStderr, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, get_torch_dist_unique_port, require_apex, require_bitsandbytes, require_fairscale, require_torch, require_torch_gpu, require_torch_multi_gpu, require_torch_non_multi_gpu, slow, ) from transformers.trainer_callback import TrainerState from transformers.trainer_utils import set_seed lowercase_ = os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(f'''{bindir}/../../examples/pytorch/translation'''): from run_translation import main # noqa set_seed(42) lowercase_ = """sshleifer/student_marian_en_ro_6_1""" lowercase_ = """sshleifer/tiny-mbart""" @require_torch class SCREAMING_SNAKE_CASE (UpperCAmelCase ): def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Union[str, Any]=False , a : Optional[Any]=None , a : int=True , a : List[Any]=True , a : Optional[int]=True , a : Optional[Any]=True , )-> Optional[Any]: """simple docstring""" lowercase__ = self.run_trainer( eval_steps=1 , max_len=12 , model_name=a , num_train_epochs=1 , distributed=a , extra_args_str=a , predict_with_generate=a , do_train=a , do_eval=a , do_predict=a , ) lowercase__ = TrainerState.load_from_json(os.path.join(a , 'trainer_state.json' ) ).log_history if not do_eval: return lowercase__ = [log for log in logs if 'eval_loss' in log.keys()] lowercase__ = eval_metrics[0] if predict_with_generate: assert "eval_bleu" in first_step_stats lowercase__ = eval_metrics[-1] assert isinstance(last_step_stats['eval_bleu'] , a ) assert not math.isnan(float(last_step_stats['eval_loss'] ) ), "eval_loss must not be `nan`" @require_torch_non_multi_gpu def SCREAMING_SNAKE_CASE_ ( self : str )-> Any: """simple docstring""" self.run_seqaseq_quick() @require_torch_multi_gpu def SCREAMING_SNAKE_CASE_ ( self : Dict )-> int: """simple docstring""" self.run_seqaseq_quick(distributed=a ) @require_torch_multi_gpu def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Dict: """simple docstring""" self.run_seqaseq_quick(distributed=a ) @unittest.skip('Requires an update of the env running those tests' ) @require_torch_multi_gpu @require_fairscale def SCREAMING_SNAKE_CASE_ ( self : Any )-> int: """simple docstring""" self.run_seqaseq_quick(distributed=a , extra_args_str='--sharded_ddp simple' ) @unittest.skip('Requires an update of the env running those tests' ) @require_torch_multi_gpu @require_fairscale def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> List[Any]: """simple docstring""" self.run_seqaseq_quick(distributed=a , extra_args_str='--sharded_ddp simple --fp16' ) @unittest.skip('Requires an update of the env running those tests' ) @require_torch_multi_gpu @require_fairscale def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> List[Any]: """simple docstring""" self.run_seqaseq_quick(distributed=a , extra_args_str='--sharded_ddp zero_dp_2' , predict_with_generate=a ) @unittest.skip('Requires an update of the env running those tests' ) @require_torch_multi_gpu @require_fairscale def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> int: """simple docstring""" self.run_seqaseq_quick( distributed=a , extra_args_str='--sharded_ddp zero_dp_2 --fp16' , predict_with_generate=a ) @require_apex @require_torch_gpu def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Dict: """simple docstring""" self.run_seqaseq_quick(distributed=a , extra_args_str='--fp16 --fp16_backend=apex' ) # test 2nd time - was getting eval_loss': nan' # to reproduce the problem set distributed=False self.run_seqaseq_quick(distributed=a , extra_args_str='--fp16 --fp16_backend=apex' ) @parameterized.expand(['base', 'low', 'high', 'mixed'] ) @require_torch_multi_gpu def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : int )-> int: """simple docstring""" lowercase__ = { # test with the default log_level - should be info and thus log info once 'base': {'extra_args_str': '', 'n_matches': 1}, # test with low log_level and log_level_replica - should be noisy on all processes # now the info string should appear twice on 2 processes 'low': {'extra_args_str': '--log_level debug --log_level_replica debug', 'n_matches': 2}, # test with high log_level and low log_level_replica # now the info string should appear once only on the replica 'high': {'extra_args_str': '--log_level error --log_level_replica debug', 'n_matches': 1}, # test with high log_level and log_level_replica - should be quiet on all processes 'mixed': {'extra_args_str': '--log_level error --log_level_replica error', 'n_matches': 0}, } lowercase__ = experiments[experiment_id] lowercase__ = {'distributed': True, 'predict_with_generate': False, 'do_eval': False, 'do_predict': False} lowercase__ = 'Running training' with CaptureStderr() as cl: self.run_seqaseq_quick(**a , extra_args_str=data['extra_args_str'] ) lowercase__ = len(re.findall(a , cl.err ) ) self.assertEqual(a , data['n_matches'] ) @slow def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> List[str]: """simple docstring""" lowercase__ = self.run_trainer( eval_steps=2 , max_len=128 , model_name=a , learning_rate=3E-4 , num_train_epochs=10 , distributed=a , ) # Check metrics lowercase__ = TrainerState.load_from_json(os.path.join(a , 'trainer_state.json' ) ).log_history lowercase__ = [log for log in logs if 'eval_loss' in log.keys()] lowercase__ = eval_metrics[0] lowercase__ = eval_metrics[-1] assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing" assert isinstance(last_step_stats['eval_bleu'] , a ) # test if do_predict saves generations and metrics lowercase__ = os.listdir(a ) lowercase__ = {os.path.basename(a ) for p in contents} assert "generated_predictions.txt" in contents assert "predict_results.json" in contents @slow @require_bitsandbytes def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> Dict: """simple docstring""" from transformers.training_args import OptimizerNames def train_and_return_metrics(a : str ) -> Tuple[int, float]: lowercase__ = '--skip_memory_metrics 0' lowercase__ = self.run_trainer( max_len=128 , model_name=a , learning_rate=3E-4 , num_train_epochs=1 , optim=a , distributed=a , extra_args_str=a , do_eval=a , do_predict=a , n_gpus_to_use=1 , ) # Check metrics lowercase__ = TrainerState.load_from_json(Path(a , 'trainer_state.json' ) ).log_history lowercase__ = int(logs[0]['train_mem_gpu_peaked_delta'] / 2**20 ) lowercase__ = int(logs[0]['train_mem_gpu_alloc_delta'] / 2**20 ) lowercase__ = logs[0]['train_loss'] return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss lowercase__ , lowercase__ , lowercase__ = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value ) lowercase__ , lowercase__ , lowercase__ = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value ) lowercase__ = gpu_alloc_mem_orig - gpu_alloc_mem_bnb lowercase__ = gpu_peak_mem_orig + gpu_alloc_mem_orig lowercase__ = gpu_peak_mem_bnb + gpu_alloc_mem_bnb lowercase__ = gpu_total_mem_orig - gpu_total_mem_bnb # sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which # doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized # in 2 bytes and the diff in optim memory usage is derived as so: # # - normal 25*8=~200MB (8 bytes per param) # - bnb 25*2= ~50MB (2 bytes per param) # # Thus we should expect ~150MB total memory saved. # # Peak memory should be the same - the total should be different by about that same margin # # After leaving a small margin to accommodate for differences between gpus let's check # that we have at least 120MB in savings lowercase__ = 120 # uncomment the following if this test starts failing - requires py38 for a new print feature # gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb # print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB") # print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB") # print(f"{gpu_alloc_mem_diff=}MB") # print(f"{gpu_peak_mem_diff=}MB") # print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB") # print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB") self.assertGreater( a , a , 'should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got' f""" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and""" f""" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB""" , ) self.assertGreater( a , a , 'should use ~150MB less total gpu memory with BNB, compared to without it for this model but got' f""" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and""" f""" gpu_total_mem_bnb={gpu_total_mem_bnb}MB""" , ) self.assertEqual( a , a , f"""loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}""" ) def SCREAMING_SNAKE_CASE_ ( self : int , a : int , a : str , a : int , a : float = 3E-3 , a : str = "adafactor" , a : bool = False , a : str = None , a : int = 0 , a : bool = True , a : bool = True , a : bool = True , a : bool = True , a : int = None , )-> Dict: """simple docstring""" lowercase__ = self.test_file_dir / '../fixtures/tests_samples/wmt_en_ro' lowercase__ = self.get_auto_remove_tmp_dir() lowercase__ = f""" --model_name_or_path {model_name} --train_file {data_dir}/train.json --validation_file {data_dir}/val.json --test_file {data_dir}/test.json --output_dir {output_dir} --overwrite_output_dir --max_train_samples 8 --max_source_length {max_len} --max_target_length {max_len} --do_train --num_train_epochs {str(a )} --per_device_train_batch_size 4 --learning_rate {learning_rate} --warmup_steps 8 --logging_steps 0 --logging_strategy no --save_steps {str(a )} --group_by_length --label_smoothing_factor 0.1 --target_lang ro_RO --source_lang en_XX """.split() lowercase__ = f""" --do_eval --per_device_eval_batch_size 4 --max_eval_samples 8 --val_max_target_length {max_len} --evaluation_strategy steps --eval_steps {str(a )} """.split() lowercase__ = '\n --do_predict\n '.split() lowercase__ = [] if do_train: args += args_train if do_eval: args += args_eval if do_predict: args += args_predict if predict_with_generate: args += "--predict_with_generate".split() if do_train: if optim == "adafactor": args += "--adafactor".split() else: args += f"""--optim {optim}""".split() if extra_args_str is not None: args += extra_args_str.split() if distributed: if n_gpus_to_use is None: lowercase__ = get_gpu_count() lowercase__ = get_torch_dist_unique_port() lowercase__ = f""" -m torch.distributed.run --nproc_per_node={n_gpus_to_use} --master_port={master_port} {self.examples_dir_str}/pytorch/translation/run_translation.py """.split() lowercase__ = [sys.executable] + distributed_args + args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(a , env=self.get_env() ) else: lowercase__ = ['run_translation.py'] + args with patch.object(a , 'argv' , a ): main() return output_dir
45
import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class SCREAMING_SNAKE_CASE (unittest.TestCase ): def __init__( self : Any , a : str , a : List[Any]=7 , a : int=3 , a : int=18 , a : Optional[Any]=30 , a : Optional[int]=400 , a : int=True , a : Tuple=None , a : Optional[Any]=True , a : str=False , a : str=True , a : int=True , a : Tuple=[0.5, 0.5, 0.5] , a : Any=[0.5, 0.5, 0.5] , )-> Optional[int]: """simple docstring""" lowercase__ = parent lowercase__ = batch_size lowercase__ = num_channels lowercase__ = image_size lowercase__ = min_resolution lowercase__ = max_resolution lowercase__ = do_resize lowercase__ = size if size is not None else {'height': 18, 'width': 20} lowercase__ = do_thumbnail lowercase__ = do_align_axis lowercase__ = do_pad lowercase__ = do_normalize lowercase__ = image_mean lowercase__ = image_std def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Tuple: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class SCREAMING_SNAKE_CASE (UpperCAmelCase , unittest.TestCase ): _UpperCamelCase : Optional[Any] = DonutImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE_ ( self : int )-> List[Any]: """simple docstring""" lowercase__ = DonutImageProcessingTester(self ) @property def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Optional[int]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE_ ( self : Any )-> int: """simple docstring""" lowercase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(a , 'do_resize' ) ) self.assertTrue(hasattr(a , 'size' ) ) self.assertTrue(hasattr(a , 'do_thumbnail' ) ) self.assertTrue(hasattr(a , 'do_align_long_axis' ) ) self.assertTrue(hasattr(a , 'do_pad' ) ) self.assertTrue(hasattr(a , 'do_normalize' ) ) self.assertTrue(hasattr(a , 'image_mean' ) ) self.assertTrue(hasattr(a , 'image_std' ) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Dict: """simple docstring""" lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'height': 18, 'width': 20} ) lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'height': 42, 'width': 42} ) # Previous config had dimensions in (width, height) order lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) ) self.assertEqual(image_processor.size , {'height': 84, 'width': 42} ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Dict: """simple docstring""" pass @is_flaky() def SCREAMING_SNAKE_CASE_ ( self : str )-> Optional[int]: """simple docstring""" lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a ) for image in image_inputs: self.assertIsInstance(a , Image.Image ) # Test not batched input lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched lowercase__ = image_processing(a , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) @is_flaky() def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Tuple: """simple docstring""" lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a ) for image in image_inputs: self.assertIsInstance(a , np.ndarray ) # Test not batched input lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched lowercase__ = image_processing(a , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) @is_flaky() def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Dict: """simple docstring""" lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a ) for image in image_inputs: self.assertIsInstance(a , torch.Tensor ) # Test not batched input lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched lowercase__ = image_processing(a , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , )
45
1
import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor lowercase_ = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE (UpperCAmelCase ): def __init__( self : str , *a : Any , **a : int )-> None: """simple docstring""" warnings.warn( 'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use MobileViTImageProcessor instead.' , a , ) super().__init__(*a , **a )
45
import math def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]: if 0 not in (x, y): # We use the relation x^y = y*log10(x), where 10 is the base. return y * math.logaa(_SCREAMING_SNAKE_CASE ) else: if x == 0: # 0 raised to any number is 0 return 0 elif y == 0: return 1 # any number raised to 0 is 1 raise AssertionError('This should never happen' ) if __name__ == "__main__": # Main function # Read two numbers from input and typecast them to int using map function. # Here x is the base and y is the power. lowercase_ = """Enter the base and the power separated by a comma: """ lowercase_ , lowercase_ = map(int, input(prompt).split(""",""")) lowercase_ , lowercase_ = map(int, input(prompt).split(""",""")) # We find the log of each number, using the function res(), which takes two # arguments. lowercase_ = res(xa, ya) lowercase_ = res(xa, ya) # We check for the largest number if resa > resa: print("""Largest number is""", xa, """^""", ya) elif resa > resa: print("""Largest number is""", xa, """^""", ya) else: print("""Both are equal""")
45
1
from math import sqrt def __UpperCamelCase (_SCREAMING_SNAKE_CASE = 1000000 ) -> int: lowercase__ = 0 lowercase__ = 0 lowercase__ = 42 while num_cuboids <= limit: max_cuboid_size += 1 for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ): if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer(): num_cuboids += ( min(_SCREAMING_SNAKE_CASE , sum_shortest_sides // 2 ) - max(1 , sum_shortest_sides - max_cuboid_size ) + 1 ) return max_cuboid_size if __name__ == "__main__": print(f'''{solution() = }''')
45
class SCREAMING_SNAKE_CASE : # Public class to implement a graph def __init__( self : int , a : int , a : int , a : list[list[bool]] )-> None: """simple docstring""" lowercase__ = row lowercase__ = col lowercase__ = graph def SCREAMING_SNAKE_CASE_ ( self : Dict , a : int , a : int , a : list[list[bool]] )-> bool: """simple docstring""" return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : int , a : int , a : list[list[bool]] )-> None: """simple docstring""" lowercase__ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order lowercase__ = [-1, 0, 1, -1, 1, -1, 0, 1] lowercase__ = True # Make those cells visited for k in range(8 ): if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , a ): self.diffs(i + row_nbr[k] , j + col_nbr[k] , a ) def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> int: # And finally, count all islands. """simple docstring""" lowercase__ = [[False for j in range(self.COL )] for i in range(self.ROW )] lowercase__ = 0 for i in range(self.ROW ): for j in range(self.COL ): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(a , a , a ) count += 1 return count
45
1
import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class SCREAMING_SNAKE_CASE : def __init__( self : Optional[int] , a : List[str] , a : int=13 , a : List[Any]=7 , a : Optional[Any]=True , a : Dict=True , a : str=99 , a : str=32 , a : int=5 , a : int=4 , a : Dict=37 , a : Optional[int]="gelu" , a : Any=0.1 , a : Any=0.1 , a : int=50 , a : Optional[Any]=0.02 , a : Any=True , a : Optional[int]=None , )-> Any: """simple docstring""" lowercase__ = parent lowercase__ = batch_size lowercase__ = seq_length lowercase__ = is_training lowercase__ = use_input_mask lowercase__ = vocab_size lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = intermediate_size lowercase__ = hidden_act lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = max_position_embeddings lowercase__ = initializer_range lowercase__ = use_labels lowercase__ = scope def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> str: """simple docstring""" lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__ = None if self.use_input_mask: lowercase__ = random_attention_mask([self.batch_size, self.seq_length] ) if self.use_labels: lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__ = self.get_config() return config, input_ids, input_mask, token_labels def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Optional[Any]: """simple docstring""" return BertGenerationConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=a , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Any: """simple docstring""" ( ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ) = self.prepare_config_and_inputs() lowercase__ = True lowercase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) lowercase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : int , a : List[Any] , a : List[Any] , a : Dict , **a : List[Any] , )-> Dict: """simple docstring""" lowercase__ = BertGenerationEncoder(config=a ) model.to(a ) model.eval() lowercase__ = model(a , attention_mask=a ) lowercase__ = model(a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE_ ( self : int , a : List[str] , a : Any , a : Dict , a : int , a : Any , a : int , **a : Optional[int] , )-> Dict: """simple docstring""" lowercase__ = True lowercase__ = BertGenerationEncoder(config=a ) model.to(a ) model.eval() lowercase__ = model( a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , ) lowercase__ = model( a , attention_mask=a , encoder_hidden_states=a , ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE_ ( self : Any , a : Optional[int] , a : Union[str, Any] , a : Optional[Any] , a : Any , a : List[str] , a : Any , **a : List[Any] , )-> Optional[Any]: """simple docstring""" lowercase__ = True lowercase__ = True lowercase__ = BertGenerationDecoder(config=a ).to(a ).eval() # first forward pass lowercase__ = model( a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , use_cache=a , ) lowercase__ = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids lowercase__ = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowercase__ = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and lowercase__ = torch.cat([input_ids, next_tokens] , dim=-1 ) lowercase__ = torch.cat([input_mask, next_mask] , dim=-1 ) lowercase__ = model( a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , output_hidden_states=a , )['hidden_states'][0] lowercase__ = model( a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , past_key_values=a , output_hidden_states=a , )['hidden_states'][0] # select random slice lowercase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item() lowercase__ = output_from_no_past[:, -3:, random_slice_idx].detach() lowercase__ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(a , a , atol=1E-3 ) ) def SCREAMING_SNAKE_CASE_ ( self : Dict , a : List[str] , a : Optional[int] , a : int , a : Any , *a : Union[str, Any] , )-> Tuple: """simple docstring""" lowercase__ = BertGenerationDecoder(a ) model.to(a ) model.eval() lowercase__ = model(a , attention_mask=a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> Any: """simple docstring""" lowercase__ , lowercase__ , lowercase__ , lowercase__ = self.prepare_config_and_inputs() lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE (UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ): _UpperCamelCase : int = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () _UpperCamelCase : Union[str, Any] = (BertGenerationDecoder,) if is_torch_available() else () _UpperCamelCase : str = ( {'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder} if is_torch_available() else {} ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> List[Any]: """simple docstring""" lowercase__ = BertGenerationEncoderTester(self ) lowercase__ = ConfigTester(self , config_class=a , hidden_size=37 ) def SCREAMING_SNAKE_CASE_ ( self : str )-> Tuple: """simple docstring""" self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE_ ( self : int )-> str: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a ) def SCREAMING_SNAKE_CASE_ ( self : str )-> List[str]: """simple docstring""" lowercase__ , lowercase__ , lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs() lowercase__ = 'bert' self.model_tester.create_and_check_model(a , a , a , a ) def SCREAMING_SNAKE_CASE_ ( self : Any )-> Any: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*a ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> List[Any]: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*a ) def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]: """simple docstring""" ( ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ) = self.model_tester.prepare_config_and_inputs_for_decoder() lowercase__ = None self.model_tester.create_and_check_model_as_decoder( a , a , a , a , a , a , ) def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*a ) @slow def SCREAMING_SNAKE_CASE_ ( self : str )-> Optional[int]: """simple docstring""" lowercase__ = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' ) self.assertIsNotNone(a ) @require_torch class SCREAMING_SNAKE_CASE (unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE_ ( self : Any )-> Optional[Any]: """simple docstring""" lowercase__ = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' ) lowercase__ = torch.tensor([[101, 7_592, 1_010, 2_026, 3_899, 2_003, 10_140, 102]] ) with torch.no_grad(): lowercase__ = model(a )[0] lowercase__ = torch.Size([1, 8, 1_024] ) self.assertEqual(output.shape , a ) lowercase__ = torch.tensor( [[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , a , atol=1E-4 ) ) @require_torch class SCREAMING_SNAKE_CASE (unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> Any: """simple docstring""" lowercase__ = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' ) lowercase__ = torch.tensor([[101, 7_592, 1_010, 2_026, 3_899, 2_003, 10_140, 102]] ) with torch.no_grad(): lowercase__ = model(a )[0] lowercase__ = torch.Size([1, 8, 50_358] ) self.assertEqual(output.shape , a ) lowercase__ = torch.tensor( [[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , a , atol=1E-4 ) )
45
from string import ascii_uppercase lowercase_ = {str(ord(c) - 55): c for c in ascii_uppercase} def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise TypeError('int() can\'t convert non-string with explicit base' ) if num < 0: raise ValueError('parameter must be positive int' ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise TypeError('\'str\' object cannot be interpreted as an integer' ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise TypeError('\'float\' object cannot be interpreted as an integer' ) if base in (0, 1): raise ValueError('base must be >= 2' ) if base > 36: raise ValueError('base must be <= 36' ) lowercase__ = '' lowercase__ = 0 lowercase__ = 0 while div != 1: lowercase__ , lowercase__ = divmod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if base >= 11 and 9 < mod < 36: lowercase__ = ALPHABET_VALUES[str(_SCREAMING_SNAKE_CASE )] else: lowercase__ = str(_SCREAMING_SNAKE_CASE ) new_value += actual_value lowercase__ = num // base lowercase__ = div if div == 0: return str(new_value[::-1] ) elif div == 1: new_value += str(_SCREAMING_SNAKE_CASE ) return str(new_value[::-1] ) return new_value[::-1] if __name__ == "__main__": import doctest doctest.testmod() for base in range(2, 37): for num in range(1_000): assert int(decimal_to_any(num, base), base) == num, ( num, base, decimal_to_any(num, base), int(decimal_to_any(num, base), base), )
45
1
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str: if number > 0: raise ValueError('input must be a negative integer' ) lowercase__ = len(bin(_SCREAMING_SNAKE_CASE )[3:] ) lowercase__ = bin(abs(_SCREAMING_SNAKE_CASE ) - (1 << binary_number_length) )[3:] lowercase__ = ( ( '1' + '0' * (binary_number_length - len(_SCREAMING_SNAKE_CASE )) + twos_complement_number ) if number < 0 else '0' ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
45
import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class SCREAMING_SNAKE_CASE : def __init__( self : List[Any] , a : Any , a : Optional[int]=13 , a : Tuple=30 , a : Union[str, Any]=2 , a : List[str]=3 , a : Dict=True , a : List[str]=True , a : List[Any]=32 , a : List[str]=5 , a : Optional[int]=4 , a : List[str]=37 , a : Dict="gelu" , a : Dict=0.1 , a : List[str]=0.1 , a : int=10 , a : List[str]=0.02 , a : int=None , a : List[str]=2 , )-> Dict: """simple docstring""" lowercase__ = parent lowercase__ = batch_size lowercase__ = image_size lowercase__ = patch_size lowercase__ = num_channels lowercase__ = is_training lowercase__ = use_labels lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = intermediate_size lowercase__ = hidden_act lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = type_sequence_label_size lowercase__ = initializer_range lowercase__ = scope lowercase__ = encoder_stride # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) lowercase__ = (image_size // patch_size) ** 2 lowercase__ = num_patches + 1 def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Any: """simple docstring""" lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__ = None if self.use_labels: lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__ = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> List[Any]: """simple docstring""" return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : List[Any] , a : List[str] , a : Dict )-> Optional[Any]: """simple docstring""" lowercase__ = ViTModel(config=a ) model.to(a ) model.eval() lowercase__ = model(a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE_ ( self : int , a : Optional[Any] , a : int , a : Tuple )-> Union[str, Any]: """simple docstring""" lowercase__ = ViTForMaskedImageModeling(config=a ) model.to(a ) model.eval() lowercase__ = model(a ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowercase__ = 1 lowercase__ = ViTForMaskedImageModeling(a ) model.to(a ) model.eval() lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowercase__ = model(a ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : List[str] , a : int , a : List[Any] )-> str: """simple docstring""" lowercase__ = self.type_sequence_label_size lowercase__ = ViTForImageClassification(a ) model.to(a ) model.eval() lowercase__ = model(a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowercase__ = 1 lowercase__ = ViTForImageClassification(a ) model.to(a ) model.eval() lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowercase__ = model(a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> List[str]: """simple docstring""" lowercase__ = self.prepare_config_and_inputs() ( ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ) = config_and_inputs lowercase__ = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE (UpperCAmelCase , UpperCAmelCase , unittest.TestCase ): _UpperCamelCase : Any = ( ( ViTModel, ViTForImageClassification, ViTForMaskedImageModeling, ) if is_torch_available() else () ) _UpperCamelCase : Union[str, Any] = ( {'feature-extraction': ViTModel, 'image-classification': ViTForImageClassification} if is_torch_available() else {} ) _UpperCamelCase : int = True _UpperCamelCase : int = False _UpperCamelCase : Union[str, Any] = False _UpperCamelCase : Dict = False def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> List[str]: """simple docstring""" lowercase__ = ViTModelTester(self ) lowercase__ = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=37 ) def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Union[str, Any]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='ViT does not use inputs_embeds' ) def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]: """simple docstring""" pass def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Union[str, Any]: """simple docstring""" lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ = model_class(a ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowercase__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(a , nn.Linear ) ) def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Optional[int]: """simple docstring""" lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ = model_class(a ) lowercase__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ = [*signature.parameters.keys()] lowercase__ = ['pixel_values'] self.assertListEqual(arg_names[:1] , a ) def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Union[str, Any]: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a ) def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Tuple: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*a ) def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> int: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a ) @slow def SCREAMING_SNAKE_CASE_ ( self : Dict )-> List[Any]: """simple docstring""" for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ = ViTModel.from_pretrained(a ) self.assertIsNotNone(a ) def __UpperCamelCase () -> str: lowercase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class SCREAMING_SNAKE_CASE (unittest.TestCase ): @cached_property def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> List[Any]: """simple docstring""" return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None @slow def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Union[str, Any]: """simple docstring""" lowercase__ = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224' ).to(a ) lowercase__ = self.default_image_processor lowercase__ = prepare_img() lowercase__ = image_processor(images=a , return_tensors='pt' ).to(a ) # forward pass with torch.no_grad(): lowercase__ = model(**a ) # verify the logits lowercase__ = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , a ) lowercase__ = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1E-4 ) ) @slow def SCREAMING_SNAKE_CASE_ ( self : int )-> List[str]: """simple docstring""" lowercase__ = ViTModel.from_pretrained('facebook/dino-vits8' ).to(a ) lowercase__ = ViTImageProcessor.from_pretrained('facebook/dino-vits8' , size=480 ) lowercase__ = prepare_img() lowercase__ = image_processor(images=a , return_tensors='pt' ) lowercase__ = inputs.pixel_values.to(a ) # forward pass with torch.no_grad(): lowercase__ = model(a , interpolate_pos_encoding=a ) # verify the logits lowercase__ = torch.Size((1, 3_601, 384) ) self.assertEqual(outputs.last_hidden_state.shape , a ) lowercase__ = torch.tensor( [[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(a ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , a , atol=1E-4 ) ) @slow @require_accelerate @require_torch_gpu def SCREAMING_SNAKE_CASE_ ( self : str )-> str: """simple docstring""" lowercase__ = ViTModel.from_pretrained('facebook/dino-vits8' , torch_dtype=torch.floataa , device_map='auto' ) lowercase__ = self.default_image_processor lowercase__ = prepare_img() lowercase__ = image_processor(images=a , return_tensors='pt' ) lowercase__ = inputs.pixel_values.to(a ) # forward pass to make sure inference works in fp16 with torch.no_grad(): lowercase__ = model(a )
45
1
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class SCREAMING_SNAKE_CASE (UpperCAmelCase , unittest.TestCase ): _UpperCamelCase : List[str] = KandinskyInpaintPipeline _UpperCamelCase : Optional[Any] = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image'] _UpperCamelCase : List[Any] = [ 'prompt', 'negative_prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image', ] _UpperCamelCase : int = [ 'generator', 'height', 'width', 'latents', 'guidance_scale', 'negative_prompt', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] _UpperCamelCase : str = False @property def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Tuple: """simple docstring""" return 32 @property def SCREAMING_SNAKE_CASE_ ( self : Any )-> List[Any]: """simple docstring""" return 32 @property def SCREAMING_SNAKE_CASE_ ( self : int )-> Any: """simple docstring""" return self.time_input_dim @property def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> List[Any]: """simple docstring""" return self.time_input_dim * 4 @property def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> List[str]: """simple docstring""" return 100 @property def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> int: """simple docstring""" lowercase__ = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' ) return tokenizer @property def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Optional[Any]: """simple docstring""" torch.manual_seed(0 ) lowercase__ = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , ) lowercase__ = MultilingualCLIP(a ) lowercase__ = text_encoder.eval() return text_encoder @property def SCREAMING_SNAKE_CASE_ ( self : Any )-> Optional[Any]: """simple docstring""" torch.manual_seed(0 ) lowercase__ = { 'in_channels': 9, # Out channels is double in channels because predicts mean and variance 'out_channels': 8, 'addition_embed_type': 'text_image', 'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'), 'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'), 'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn', 'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2), 'layers_per_block': 1, 'encoder_hid_dim': self.text_embedder_hidden_size, 'encoder_hid_dim_type': 'text_image_proj', 'cross_attention_dim': self.cross_attention_dim, 'attention_head_dim': 4, 'resnet_time_scale_shift': 'scale_shift', 'class_embed_type': None, } lowercase__ = UNetaDConditionModel(**a ) return model @property def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[int]: """simple docstring""" return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> List[str]: """simple docstring""" torch.manual_seed(0 ) lowercase__ = VQModel(**self.dummy_movq_kwargs ) return model def SCREAMING_SNAKE_CASE_ ( self : Dict )-> int: """simple docstring""" lowercase__ = self.dummy_text_encoder lowercase__ = self.dummy_tokenizer lowercase__ = self.dummy_unet lowercase__ = self.dummy_movq lowercase__ = DDIMScheduler( num_train_timesteps=1_000 , beta_schedule='linear' , beta_start=0.00085 , beta_end=0.012 , clip_sample=a , set_alpha_to_one=a , steps_offset=1 , prediction_type='epsilon' , thresholding=a , ) lowercase__ = { 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'unet': unet, 'scheduler': scheduler, 'movq': movq, } return components def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : Tuple , a : Any=0 )-> Optional[Any]: """simple docstring""" lowercase__ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(a ) ).to(a ) lowercase__ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(a ) # create init_image lowercase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(a ) ).to(a ) lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowercase__ = Image.fromarray(np.uinta(a ) ).convert('RGB' ).resize((256, 256) ) # create mask lowercase__ = np.ones((64, 64) , dtype=np.floataa ) lowercase__ = 0 if str(a ).startswith('mps' ): lowercase__ = torch.manual_seed(a ) else: lowercase__ = torch.Generator(device=a ).manual_seed(a ) lowercase__ = { 'prompt': 'horse', 'image': init_image, 'mask_image': mask, 'image_embeds': image_embeds, 'negative_image_embeds': negative_image_embeds, 'generator': generator, 'height': 64, 'width': 64, 'num_inference_steps': 2, 'guidance_scale': 4.0, 'output_type': 'np', } return inputs def SCREAMING_SNAKE_CASE_ ( self : int )-> str: """simple docstring""" lowercase__ = 'cpu' lowercase__ = self.get_dummy_components() lowercase__ = self.pipeline_class(**a ) lowercase__ = pipe.to(a ) pipe.set_progress_bar_config(disable=a ) lowercase__ = pipe(**self.get_dummy_inputs(a ) ) lowercase__ = output.images lowercase__ = pipe( **self.get_dummy_inputs(a ) , return_dict=a , )[0] lowercase__ = image[0, -3:, -3:, -1] lowercase__ = image_from_tuple[0, -3:, -3:, -1] print(f"""image.shape {image.shape}""" ) assert image.shape == (1, 64, 64, 3) lowercase__ = np.array( [0.8326919, 0.73790467, 0.20918581, 0.9309612, 0.5511791, 0.43713328, 0.5513321, 0.49922934, 0.59497786] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" def SCREAMING_SNAKE_CASE_ ( self : Dict )-> List[str]: """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class SCREAMING_SNAKE_CASE (unittest.TestCase ): def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Optional[Any]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> Dict: """simple docstring""" lowercase__ = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy' ) lowercase__ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' ) lowercase__ = np.ones((768, 768) , dtype=np.floataa ) lowercase__ = 0 lowercase__ = 'a hat' lowercase__ = KandinskyPriorPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa ) pipe_prior.to(a ) lowercase__ = KandinskyInpaintPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-1-inpaint' , torch_dtype=torch.floataa ) lowercase__ = pipeline.to(a ) pipeline.set_progress_bar_config(disable=a ) lowercase__ = torch.Generator(device='cpu' ).manual_seed(0 ) lowercase__ , lowercase__ = pipe_prior( a , generator=a , num_inference_steps=5 , negative_prompt='' , ).to_tuple() lowercase__ = pipeline( a , image=a , mask_image=a , image_embeds=a , negative_image_embeds=a , generator=a , num_inference_steps=100 , height=768 , width=768 , output_type='np' , ) lowercase__ = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(a , a )
45
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[Any]: stooge(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) - 1 ) return arr def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int: if i >= h: return # If first element is smaller than the last then swap them if arr[i] > arr[h]: lowercase__ , lowercase__ = arr[h], arr[i] # If there are more than 2 elements in the array if h - i + 1 > 2: lowercase__ = (int)((h - i + 1) / 3 ) # Recursively sort first 2/3 elements stooge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (h - t) ) # Recursively sort last 2/3 elements stooge(_SCREAMING_SNAKE_CASE , i + t , (_SCREAMING_SNAKE_CASE) ) # Recursively sort first 2/3 elements stooge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (h - t) ) if __name__ == "__main__": lowercase_ = input("""Enter numbers separated by a comma:\n""").strip() lowercase_ = [int(item) for item in user_input.split(""",""")] print(stooge_sort(unsorted))
45
1
import unittest from knapsack import greedy_knapsack as kp class SCREAMING_SNAKE_CASE (unittest.TestCase ): def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> Dict: """simple docstring""" lowercase__ = [10, 20, 30, 40, 50, 60] lowercase__ = [2, 4, 6, 8, 10, 12] lowercase__ = 100 self.assertEqual(kp.calc_profit(a , a , a ) , 210 ) def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Any: """simple docstring""" self.assertRaisesRegex(a , 'max_weight must greater than zero.' ) def SCREAMING_SNAKE_CASE_ ( self : Any )-> Optional[Any]: """simple docstring""" self.assertRaisesRegex(a , 'Weight can not be negative.' ) def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Tuple: """simple docstring""" self.assertRaisesRegex(a , 'Profit can not be negative.' ) def SCREAMING_SNAKE_CASE_ ( self : Dict )-> List[str]: """simple docstring""" self.assertRaisesRegex(a , 'max_weight must greater than zero.' ) def SCREAMING_SNAKE_CASE_ ( self : str )-> Union[str, Any]: """simple docstring""" self.assertRaisesRegex( a , 'The length of profit and weight must be same.' ) if __name__ == "__main__": unittest.main()
45
from scipy.stats import spearmanr import datasets lowercase_ = """ The Spearman rank-order correlation coefficient is a measure of the relationship between two datasets. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Positive correlations imply that as data in dataset x increases, so does data in dataset y. Negative correlations imply that as x increases, y decreases. Correlations of -1 or +1 imply an exact monotonic relationship. Unlike the Pearson correlation, the Spearman correlation does not assume that both datasets are normally distributed. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Spearman correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. """ lowercase_ = """ Args: predictions (`List[float]`): Predicted labels, as returned by a model. references (`List[float]`): Ground truth labels. return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns only the spearmanr score. Defaults to `False`. Returns: spearmanr (`float`): Spearman correlation coefficient. p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input. Examples: Example 1: >>> spearmanr_metric = datasets.load_metric(\"spearmanr\") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4]) >>> print(results) {'spearmanr': -0.7} Example 2: >>> spearmanr_metric = datasets.load_metric(\"spearmanr\") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], ... predictions=[10, 9, 2.5, 6, 4], ... return_pvalue=True) >>> print(results['spearmanr']) -0.7 >>> print(round(results['spearmanr_pvalue'], 2)) 0.19 """ lowercase_ = R"""\ @book{kokoska2000crc, title={CRC standard probability and statistics tables and formulae}, author={Kokoska, Stephen and Zwillinger, Daniel}, year={2000}, publisher={Crc Press} } @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE (datasets.Metric ): def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Tuple: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('float' ), 'references': datasets.Value('float' ), } ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , ) def SCREAMING_SNAKE_CASE_ ( self : int , a : str , a : Any , a : str=False )-> Optional[int]: """simple docstring""" lowercase__ = spearmanr(a , a ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
45
1
import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel lowercase_ = logging.getLogger(__name__) def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: # save results if os.path.exists(_SCREAMING_SNAKE_CASE ): if os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , 'config.json' ) ) and os.path.isfile( os.path.join(_SCREAMING_SNAKE_CASE , 'config.json' ) ): os.remove(os.path.join(_SCREAMING_SNAKE_CASE , 'config.json' ) ) if os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , 'pytorch_model.bin' ) ) and os.path.isfile( os.path.join(_SCREAMING_SNAKE_CASE , 'pytorch_model.bin' ) ): os.remove(os.path.join(_SCREAMING_SNAKE_CASE , 'pytorch_model.bin' ) ) else: os.makedirs(_SCREAMING_SNAKE_CASE ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]: lowercase__ = 2 if unlogit: lowercase__ = torch.pow(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) lowercase__ = p * torch.log(_SCREAMING_SNAKE_CASE ) lowercase__ = 0 return -plogp.sum(dim=-1 ) def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Dict: logger.info('lv, h >\t' + '\t'.join(F"""{x + 1}""" for x in range(len(_SCREAMING_SNAKE_CASE ) ) ) ) for row in range(len(_SCREAMING_SNAKE_CASE ) ): if tensor.dtype != torch.long: logger.info(F"""layer {row + 1}:\t""" + '\t'.join(F"""{x:.5f}""" for x in tensor[row].cpu().data ) ) else: logger.info(F"""layer {row + 1}:\t""" + '\t'.join(F"""{x:d}""" for x in tensor[row].cpu().data ) ) def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False ) -> str: lowercase__ , lowercase__ = model.config.num_hidden_layers, model.config.num_attention_heads lowercase__ = torch.zeros(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).to(args.device ) lowercase__ = torch.zeros(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).to(args.device ) if head_mask is None: lowercase__ = torch.ones(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).to(args.device ) head_mask.requires_grad_(requires_grad=_SCREAMING_SNAKE_CASE ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: lowercase__ = None lowercase__ = 0.0 lowercase__ = 0.0 for step, inputs in enumerate(tqdm(_SCREAMING_SNAKE_CASE , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ): lowercase__ = tuple(t.to(args.device ) for t in inputs ) ((lowercase__) , ) = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) lowercase__ = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE ) # (loss), lm_logits, presents, (all hidden_states), (attentions) lowercase__ , lowercase__ , lowercase__ = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(_SCREAMING_SNAKE_CASE ): lowercase__ = entropy(attn.detach() , _SCREAMING_SNAKE_CASE ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(_SCREAMING_SNAKE_CASE ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: lowercase__ = 2 lowercase__ = torch.pow(torch.pow(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20 if not args.dont_normalize_global_importance: lowercase__ = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info('Attention entropies' ) print_ad_tensor(_SCREAMING_SNAKE_CASE ) if compute_importance: logger.info('Head importance scores' ) print_ad_tensor(_SCREAMING_SNAKE_CASE ) logger.info('Head ranked by importance scores' ) lowercase__ = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) lowercase__ = torch.arange( head_importance.numel() , device=args.device ) lowercase__ = head_ranks.view_as(_SCREAMING_SNAKE_CASE ) print_ad_tensor(_SCREAMING_SNAKE_CASE ) return attn_entropy, head_importance, total_loss def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: lowercase__ , lowercase__ , lowercase__ = compute_heads_importance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , compute_entropy=_SCREAMING_SNAKE_CASE ) lowercase__ = 1 / loss # instead of downsteam score use the LM loss logger.info('Pruning: original score: %f, threshold: %f' , _SCREAMING_SNAKE_CASE , original_score * args.masking_threshold ) lowercase__ = torch.ones_like(_SCREAMING_SNAKE_CASE ) lowercase__ = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) lowercase__ = original_score while current_score >= original_score * args.masking_threshold: lowercase__ = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads lowercase__ = float('Inf' ) lowercase__ = head_importance.view(-1 ).sort()[1] if len(_SCREAMING_SNAKE_CASE ) <= num_to_mask: print('BREAK BY num_to_mask' ) break # mask heads lowercase__ = current_heads_to_mask[:num_to_mask] logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) ) lowercase__ = new_head_mask.view(-1 ) lowercase__ = 0.0 lowercase__ = new_head_mask.view_as(_SCREAMING_SNAKE_CASE ) lowercase__ = new_head_mask.clone().detach() print_ad_tensor(_SCREAMING_SNAKE_CASE ) # Compute metric and head importance again lowercase__ , lowercase__ , lowercase__ = compute_heads_importance( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , compute_entropy=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE ) lowercase__ = 1 / loss logger.info( 'Masking: current score: %f, remaining heads %d (%.1f percents)' , _SCREAMING_SNAKE_CASE , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , ) logger.info('Final head mask' ) print_ad_tensor(_SCREAMING_SNAKE_CASE ) np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() ) return head_mask def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int: lowercase__ = datetime.now() lowercase__ , lowercase__ , lowercase__ = compute_heads_importance( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , compute_entropy=_SCREAMING_SNAKE_CASE , compute_importance=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE ) lowercase__ = 1 / loss lowercase__ = datetime.now() - before_time lowercase__ = sum(p.numel() for p in model.parameters() ) lowercase__ = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(_SCREAMING_SNAKE_CASE ) ) } for k, v in heads_to_prune.items(): if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): lowercase__ = [ v, ] assert sum(len(_SCREAMING_SNAKE_CASE ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(_SCREAMING_SNAKE_CASE ) lowercase__ = sum(p.numel() for p in model.parameters() ) lowercase__ = datetime.now() lowercase__ , lowercase__ , lowercase__ = compute_heads_importance( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , compute_entropy=_SCREAMING_SNAKE_CASE , compute_importance=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE , actually_pruned=_SCREAMING_SNAKE_CASE , ) lowercase__ = 1 / loss lowercase__ = datetime.now() - before_time logger.info( 'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , pruned_num_params / original_num_params * 100 , ) logger.info('Pruning: score with masking: %f score with pruning: %f' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 100 ) save_model(_SCREAMING_SNAKE_CASE , args.output_dir ) def __UpperCamelCase () -> Union[str, Any]: lowercase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--data_dir' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , ) parser.add_argument( '--model_name_or_path' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help='Path to pretrained model or model identifier from huggingface.co/models' , ) parser.add_argument( '--output_dir' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help='The output directory where the model predictions and checkpoints will be written.' , ) # Other parameters parser.add_argument( '--config_name' , default='' , type=_SCREAMING_SNAKE_CASE , help='Pretrained config name or path if not the same as model_name_or_path' , ) parser.add_argument( '--tokenizer_name' , default='' , type=_SCREAMING_SNAKE_CASE , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , ) parser.add_argument( '--cache_dir' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , help='Where do you want to store the pre-trained models downloaded from s3' , ) parser.add_argument( '--data_subset' , type=_SCREAMING_SNAKE_CASE , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' ) parser.add_argument( '--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' ) parser.add_argument( '--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' ) parser.add_argument( '--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' ) parser.add_argument( '--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , ) parser.add_argument( '--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' ) parser.add_argument( '--masking_threshold' , default=0.9 , type=_SCREAMING_SNAKE_CASE , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , ) parser.add_argument( '--masking_amount' , default=0.1 , type=_SCREAMING_SNAKE_CASE , help='Amount to heads to masking at each masking step.' ) parser.add_argument('--metric_name' , default='acc' , type=_SCREAMING_SNAKE_CASE , help='Metric to use for head masking.' ) parser.add_argument( '--max_seq_length' , default=128 , type=_SCREAMING_SNAKE_CASE , help=( 'The maximum total input sequence length after WordPiece tokenization. \n' 'Sequences longer than this will be truncated, sequences shorter padded.' ) , ) parser.add_argument('--batch_size' , default=1 , type=_SCREAMING_SNAKE_CASE , help='Batch size.' ) parser.add_argument('--seed' , type=_SCREAMING_SNAKE_CASE , default=42 ) parser.add_argument('--local_rank' , type=_SCREAMING_SNAKE_CASE , default=-1 , help='local_rank for distributed training on gpus' ) parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' ) parser.add_argument('--server_ip' , type=_SCREAMING_SNAKE_CASE , default='' , help='Can be used for distant debugging.' ) parser.add_argument('--server_port' , type=_SCREAMING_SNAKE_CASE , default='' , help='Can be used for distant debugging.' ) lowercase__ = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('Waiting for debugger attach' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_SCREAMING_SNAKE_CASE ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: lowercase__ = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' ) lowercase__ = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) lowercase__ = torch.device('cuda' , args.local_rank ) lowercase__ = 1 torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) lowercase__ = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: lowercase__ = nn.parallel.DistributedDataParallel( _SCREAMING_SNAKE_CASE , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=_SCREAMING_SNAKE_CASE ) elif args.n_gpu > 1: lowercase__ = nn.DataParallel(_SCREAMING_SNAKE_CASE ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=_SCREAMING_SNAKE_CASE ) torch.save(_SCREAMING_SNAKE_CASE , os.path.join(args.output_dir , 'run_args.bin' ) ) logger.info('Training/evaluation parameters %s' , _SCREAMING_SNAKE_CASE ) # Prepare dataset lowercase__ = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) lowercase__ = (torch.from_numpy(_SCREAMING_SNAKE_CASE ),) lowercase__ = TensorDataset(*_SCREAMING_SNAKE_CASE ) lowercase__ = RandomSampler(_SCREAMING_SNAKE_CASE ) lowercase__ = DataLoader(_SCREAMING_SNAKE_CASE , sampler=_SCREAMING_SNAKE_CASE , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: lowercase__ = mask_heads(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) prune_heads(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
45
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = 50 ) -> int: lowercase__ = [1] * (length + 1) for row_length in range(3 , length + 1 ): for block_length in range(3 , row_length + 1 ): for block_start in range(row_length - block_length ): ways_number[row_length] += ways_number[ row_length - block_start - block_length - 1 ] ways_number[row_length] += 1 return ways_number[length] if __name__ == "__main__": print(f'''{solution() = }''')
45
1
import requests def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None: lowercase__ = {'Content-Type': 'application/json'} lowercase__ = requests.post(_SCREAMING_SNAKE_CASE , json={'text': message_body} , headers=_SCREAMING_SNAKE_CASE ) if response.status_code != 200: lowercase__ = ( 'Request to slack returned an error ' F"""{response.status_code}, the response is:\n{response.text}""" ) raise ValueError(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": # Set the slack url to the one provided by Slack when you create the webhook at # https://my.slack.com/services/new/incoming-webhook/ send_slack_message("""<YOUR MESSAGE BODY>""", """<SLACK CHANNEL URL>""")
45
import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() lowercase_ = logging.get_logger(__name__) lowercase_ = { """b0""": efficientnet.EfficientNetBa, """b1""": efficientnet.EfficientNetBa, """b2""": efficientnet.EfficientNetBa, """b3""": efficientnet.EfficientNetBa, """b4""": efficientnet.EfficientNetBa, """b5""": efficientnet.EfficientNetBa, """b6""": efficientnet.EfficientNetBa, """b7""": efficientnet.EfficientNetBa, } lowercase_ = { """b0""": { """hidden_dim""": 1_280, """width_coef""": 1.0, """depth_coef""": 1.0, """image_size""": 224, """dropout_rate""": 0.2, """dw_padding""": [], }, """b1""": { """hidden_dim""": 1_280, """width_coef""": 1.0, """depth_coef""": 1.1, """image_size""": 240, """dropout_rate""": 0.2, """dw_padding""": [16], }, """b2""": { """hidden_dim""": 1_408, """width_coef""": 1.1, """depth_coef""": 1.2, """image_size""": 260, """dropout_rate""": 0.3, """dw_padding""": [5, 8, 16], }, """b3""": { """hidden_dim""": 1_536, """width_coef""": 1.2, """depth_coef""": 1.4, """image_size""": 300, """dropout_rate""": 0.3, """dw_padding""": [5, 18], }, """b4""": { """hidden_dim""": 1_792, """width_coef""": 1.4, """depth_coef""": 1.8, """image_size""": 380, """dropout_rate""": 0.4, """dw_padding""": [6], }, """b5""": { """hidden_dim""": 2_048, """width_coef""": 1.6, """depth_coef""": 2.2, """image_size""": 456, """dropout_rate""": 0.4, """dw_padding""": [13, 27], }, """b6""": { """hidden_dim""": 2_304, """width_coef""": 1.8, """depth_coef""": 2.6, """image_size""": 528, """dropout_rate""": 0.5, """dw_padding""": [31], }, """b7""": { """hidden_dim""": 2_560, """width_coef""": 2.0, """depth_coef""": 3.1, """image_size""": 600, """dropout_rate""": 0.5, """dw_padding""": [18], }, } def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str: lowercase__ = EfficientNetConfig() lowercase__ = CONFIG_MAP[model_name]['hidden_dim'] lowercase__ = CONFIG_MAP[model_name]['width_coef'] lowercase__ = CONFIG_MAP[model_name]['depth_coef'] lowercase__ = CONFIG_MAP[model_name]['image_size'] lowercase__ = CONFIG_MAP[model_name]['dropout_rate'] lowercase__ = CONFIG_MAP[model_name]['dw_padding'] lowercase__ = 'huggingface/label-files' lowercase__ = 'imagenet-1k-id2label.json' lowercase__ = 1000 lowercase__ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) ) lowercase__ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} lowercase__ = idalabel lowercase__ = {v: k for k, v in idalabel.items()} return config def __UpperCamelCase () -> Tuple: lowercase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg' lowercase__ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ) return im def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Optional[Any]: lowercase__ = CONFIG_MAP[model_name]['image_size'] lowercase__ = EfficientNetImageProcessor( size={'height': size, 'width': size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=_SCREAMING_SNAKE_CASE , ) return preprocessor def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Tuple: lowercase__ = [v.split('_' )[0].split('block' )[1] for v in original_param_names if v.startswith('block' )] lowercase__ = sorted(set(_SCREAMING_SNAKE_CASE ) ) lowercase__ = len(_SCREAMING_SNAKE_CASE ) lowercase__ = {b: str(_SCREAMING_SNAKE_CASE ) for b, i in zip(_SCREAMING_SNAKE_CASE , range(_SCREAMING_SNAKE_CASE ) )} lowercase__ = [] rename_keys.append(('stem_conv/kernel:0', 'embeddings.convolution.weight') ) rename_keys.append(('stem_bn/gamma:0', 'embeddings.batchnorm.weight') ) rename_keys.append(('stem_bn/beta:0', 'embeddings.batchnorm.bias') ) rename_keys.append(('stem_bn/moving_mean:0', 'embeddings.batchnorm.running_mean') ) rename_keys.append(('stem_bn/moving_variance:0', 'embeddings.batchnorm.running_var') ) for b in block_names: lowercase__ = block_name_mapping[b] rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") ) rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") ) rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") ) rename_keys.append( (F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") ) rename_keys.append( (F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") ) rename_keys.append( (F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") ) rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") ) rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") ) rename_keys.append( (F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") ) rename_keys.append( (F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") ) rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") ) rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") ) rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") ) rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") ) rename_keys.append( (F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") ) rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") ) rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") ) rename_keys.append( (F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") ) rename_keys.append( (F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") ) rename_keys.append(('top_conv/kernel:0', 'encoder.top_conv.weight') ) rename_keys.append(('top_bn/gamma:0', 'encoder.top_bn.weight') ) rename_keys.append(('top_bn/beta:0', 'encoder.top_bn.bias') ) rename_keys.append(('top_bn/moving_mean:0', 'encoder.top_bn.running_mean') ) rename_keys.append(('top_bn/moving_variance:0', 'encoder.top_bn.running_var') ) lowercase__ = {} for item in rename_keys: if item[0] in original_param_names: lowercase__ = 'efficientnet.' + item[1] lowercase__ = 'classifier.weight' lowercase__ = 'classifier.bias' return key_mapping def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: for key, value in tf_params.items(): if "normalization" in key: continue lowercase__ = key_mapping[key] if "_conv" in key and "kernel" in key: lowercase__ = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: lowercase__ = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: lowercase__ = torch.from_numpy(np.transpose(_SCREAMING_SNAKE_CASE ) ) else: lowercase__ = torch.from_numpy(_SCREAMING_SNAKE_CASE ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(_SCREAMING_SNAKE_CASE ) @torch.no_grad() def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: lowercase__ = model_classes[model_name]( include_top=_SCREAMING_SNAKE_CASE , weights='imagenet' , input_tensor=_SCREAMING_SNAKE_CASE , input_shape=_SCREAMING_SNAKE_CASE , pooling=_SCREAMING_SNAKE_CASE , classes=1000 , classifier_activation='softmax' , ) lowercase__ = original_model.trainable_variables lowercase__ = original_model.non_trainable_variables lowercase__ = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: lowercase__ = param.numpy() lowercase__ = list(tf_params.keys() ) # Load HuggingFace model lowercase__ = get_efficientnet_config(_SCREAMING_SNAKE_CASE ) lowercase__ = EfficientNetForImageClassification(_SCREAMING_SNAKE_CASE ).eval() lowercase__ = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print('Converting parameters...' ) lowercase__ = rename_keys(_SCREAMING_SNAKE_CASE ) replace_params(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Initialize preprocessor and preprocess input image lowercase__ = convert_image_processor(_SCREAMING_SNAKE_CASE ) lowercase__ = preprocessor(images=prepare_img() , return_tensors='pt' ) # HF model inference hf_model.eval() with torch.no_grad(): lowercase__ = hf_model(**_SCREAMING_SNAKE_CASE ) lowercase__ = outputs.logits.detach().numpy() # Original model inference lowercase__ = False lowercase__ = CONFIG_MAP[model_name]['image_size'] lowercase__ = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) lowercase__ = image.img_to_array(_SCREAMING_SNAKE_CASE ) lowercase__ = np.expand_dims(_SCREAMING_SNAKE_CASE , axis=0 ) lowercase__ = original_model.predict(_SCREAMING_SNAKE_CASE ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ), "The predicted logits are not the same." print('Model outputs match!' ) if save_model: # Create folder to save model if not os.path.isdir(_SCREAMING_SNAKE_CASE ): os.mkdir(_SCREAMING_SNAKE_CASE ) # Save converted model and image processor hf_model.save_pretrained(_SCREAMING_SNAKE_CASE ) preprocessor.save_pretrained(_SCREAMING_SNAKE_CASE ) if push_to_hub: # Push model and image processor to hub print(F"""Pushing converted {model_name} to the hub...""" ) lowercase__ = F"""efficientnet-{model_name}""" preprocessor.push_to_hub(_SCREAMING_SNAKE_CASE ) hf_model.push_to_hub(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""b0""", type=str, help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""hf_model""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""") parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") lowercase_ = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
45
1
import torch def __UpperCamelCase () -> Dict: if torch.cuda.is_available(): lowercase__ = torch.cuda.device_count() else: lowercase__ = 0 print(F"""Successfully ran on {num_gpus} GPUs""" ) if __name__ == "__main__": main()
45
import argparse import json import subprocess def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]: lowercase__ = [] lowercase__ = ( F"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\"""" ' https://api.github.com/repos/huggingface/transformers/actions/runners' ) lowercase__ = subprocess.run(_SCREAMING_SNAKE_CASE , shell=_SCREAMING_SNAKE_CASE , stdout=subprocess.PIPE ) lowercase__ = output.stdout.decode('utf-8' ) lowercase__ = json.loads(_SCREAMING_SNAKE_CASE ) lowercase__ = status['runners'] for runner in runners: if runner["name"] in target_runners: if runner["status"] == "offline": offline_runners.append(_SCREAMING_SNAKE_CASE ) # save the result so we can report them on Slack with open('offline_runners.txt' , 'w' ) as fp: fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) ) if len(_SCREAMING_SNAKE_CASE ) > 0: lowercase__ = '\n'.join([x['name'] for x in offline_runners] ) raise ValueError(F"""The following runners are offline:\n{failed}""" ) if __name__ == "__main__": def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str: return values.split(',' ) lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--target_runners""", default=None, type=list_str, required=True, help="""Comma-separated list of runners to check status.""", ) parser.add_argument( """--token""", default=None, type=str, required=True, help="""A token that has actions:read permission.""" ) lowercase_ = parser.parse_args() get_runner_status(args.target_runners, args.token)
45
1
import argparse import torch from torch import nn from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[str]: lowercase__ = [ 'encoder.version', 'decoder.version', 'model.encoder.version', 'model.decoder.version', 'decoder.output_projection.weight', '_float_tensor', 'encoder.embed_positions._float_tensor', 'decoder.embed_positions._float_tensor', ] for k in ignore_keys: state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Tuple: lowercase__ = list(s_dict.keys() ) for key in keys: if "transformer_layers" in key: lowercase__ = s_dict.pop(_SCREAMING_SNAKE_CASE ) elif "subsample" in key: lowercase__ = s_dict.pop(_SCREAMING_SNAKE_CASE ) def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[str]: lowercase__ , lowercase__ = emb.weight.shape lowercase__ = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE ) lowercase__ = emb.weight.data return lin_layer def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]: lowercase__ = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' ) lowercase__ = mam_aaa['args'] lowercase__ = mam_aaa['model'] lowercase__ = state_dict['decoder.output_projection.weight'] remove_ignore_keys_(_SCREAMING_SNAKE_CASE ) rename_keys(_SCREAMING_SNAKE_CASE ) lowercase__ = state_dict['decoder.embed_tokens.weight'].shape[0] lowercase__ = args.share_decoder_input_output_embed lowercase__ = [int(_SCREAMING_SNAKE_CASE ) for i in args.conv_kernel_sizes.split(',' )] lowercase__ = SpeechaTextConfig( vocab_size=_SCREAMING_SNAKE_CASE , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , num_conv_layers=len(_SCREAMING_SNAKE_CASE ) , conv_channels=args.conv_channels , conv_kernel_sizes=_SCREAMING_SNAKE_CASE , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=_SCREAMING_SNAKE_CASE , num_beams=5 , max_length=200 , use_cache=_SCREAMING_SNAKE_CASE , decoder_start_token_id=2 , early_stopping=_SCREAMING_SNAKE_CASE , ) lowercase__ = SpeechaTextForConditionalGeneration(_SCREAMING_SNAKE_CASE ) lowercase__ , lowercase__ = model.model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) > 0 and not set(_SCREAMING_SNAKE_CASE ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( 'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,' F""" but all the following weights are missing {missing}""" ) if tie_embeds: lowercase__ = make_linear_from_emb(model.model.decoder.embed_tokens ) else: lowercase__ = lm_head_weights model.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument("""--fairseq_path""", type=str, help="""Path to the fairseq model (.pt) file.""") parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") lowercase_ = parser.parse_args() convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
45
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class SCREAMING_SNAKE_CASE (UpperCAmelCase ): _UpperCamelCase : Tuple = 'ClapFeatureExtractor' _UpperCamelCase : Union[str, Any] = ('RobertaTokenizer', 'RobertaTokenizerFast') def __init__( self : List[Any] , a : int , a : str )-> Any: """simple docstring""" super().__init__(a , a ) def __call__( self : Any , a : Tuple=None , a : Optional[int]=None , a : int=None , **a : Optional[int] )-> Union[str, Any]: """simple docstring""" lowercase__ = kwargs.pop('sampling_rate' , a ) if text is None and audios is None: raise ValueError('You have to specify either text or audios. Both cannot be none.' ) if text is not None: lowercase__ = self.tokenizer(a , return_tensors=a , **a ) if audios is not None: lowercase__ = self.feature_extractor( a , sampling_rate=a , return_tensors=a , **a ) if text is not None and audios is not None: lowercase__ = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**a ) , tensor_type=a ) def SCREAMING_SNAKE_CASE_ ( self : str , *a : Dict , **a : int )-> Optional[int]: """simple docstring""" return self.tokenizer.batch_decode(*a , **a ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] , *a : int , **a : Dict )-> Dict: """simple docstring""" return self.tokenizer.decode(*a , **a ) @property def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Optional[int]: """simple docstring""" lowercase__ = self.tokenizer.model_input_names lowercase__ = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
45
1
from __future__ import annotations from math import gcd def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 2 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 3 , ) -> int | None: # A value less than 2 can cause an infinite loop in the algorithm. if num < 2: raise ValueError('The input value cannot be less than 2' ) # Because of the relationship between ``f(f(x))`` and ``f(x)``, this # algorithm struggles to find factors that are divisible by two. # As a workaround, we specifically check for two and even inputs. # See: https://math.stackexchange.com/a/2856214/165820 if num > 2 and num % 2 == 0: return 2 # Pollard's Rho algorithm requires a function that returns pseudorandom # values between 0 <= X < ``num``. It doesn't need to be random in the # sense that the output value is cryptographically secure or difficult # to calculate, it only needs to be random in the sense that all output # values should be equally likely to appear. # For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num`` # However, the success of Pollard's algorithm isn't guaranteed and is # determined in part by the initial seed and the chosen random function. # To make retries easier, we will instead use ``f(x) = (x**2 + C) % num`` # where ``C`` is a value that we can modify between each attempt. def rand_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int: return (pow(_SCREAMING_SNAKE_CASE , 2 ) + step) % modulus for _ in range(_SCREAMING_SNAKE_CASE ): # These track the position within the cycle detection logic. lowercase__ = seed lowercase__ = seed while True: # At each iteration, the tortoise moves one step and the hare moves two. lowercase__ = rand_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) lowercase__ = rand_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) lowercase__ = rand_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # At some point both the tortoise and the hare will enter a cycle whose # length ``p`` is a divisor of ``num``. Once in that cycle, at some point # the tortoise and hare will end up on the same value modulo ``p``. # We can detect when this happens because the position difference between # the tortoise and the hare will share a common divisor with ``num``. lowercase__ = gcd(hare - tortoise , _SCREAMING_SNAKE_CASE ) if divisor == 1: # No common divisor yet, just keep searching. continue else: # We found a common divisor! if divisor == num: # Unfortunately, the divisor is ``num`` itself and is useless. break else: # The divisor is a nontrivial factor of ``num``! return divisor # If we made it here, then this attempt failed. # We need to pick a new starting seed for the tortoise and hare # in addition to a new step value for the random function. # To keep this example implementation deterministic, the # new values will be generated based on currently available # values instead of using something like ``random.randint``. # We can use the hare's position as the new seed. # This is actually what Richard Brent's the "optimized" variant does. lowercase__ = hare # The new step value for the random function can just be incremented. # At first the results will be similar to what the old function would # have produced, but the value will quickly diverge after a bit. step += 1 # We haven't found a divisor within the requested number of attempts. # We were unlucky or ``num`` itself is actually prime. return None if __name__ == "__main__": import argparse lowercase_ = argparse.ArgumentParser() parser.add_argument( """num""", type=int, help="""The value to find a divisor of""", ) parser.add_argument( """--attempts""", type=int, default=3, help="""The number of attempts before giving up""", ) lowercase_ = parser.parse_args() lowercase_ = pollard_rho(args.num, attempts=args.attempts) if divisor is None: print(f'''{args.num} is probably prime''') else: lowercase_ = args.num // divisor print(f'''{args.num} = {divisor} * {quotient}''')
45
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_barthez import BarthezTokenizer else: lowercase_ = None lowercase_ = logging.get_logger(__name__) lowercase_ = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""} lowercase_ = { """vocab_file""": { """moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""", """moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""", """moussaKam/barthez-orangesum-title""": ( """https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model""" ), }, """tokenizer_file""": { """moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json""", """moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json""", """moussaKam/barthez-orangesum-title""": ( """https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json""" ), }, } lowercase_ = { """moussaKam/mbarthez""": 1_024, """moussaKam/barthez""": 1_024, """moussaKam/barthez-orangesum-title""": 1_024, } lowercase_ = """▁""" class SCREAMING_SNAKE_CASE (UpperCAmelCase ): _UpperCamelCase : Dict = VOCAB_FILES_NAMES _UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase : Optional[Any] = ['input_ids', 'attention_mask'] _UpperCamelCase : int = BarthezTokenizer def __init__( self : List[Any] , a : Union[str, Any]=None , a : Optional[Any]=None , a : Dict="<s>" , a : Union[str, Any]="</s>" , a : List[str]="</s>" , a : Optional[Any]="<s>" , a : int="<unk>" , a : str="<pad>" , a : Optional[int]="<mask>" , **a : Union[str, Any] , )-> Tuple: """simple docstring""" lowercase__ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token super().__init__( a , tokenizer_file=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , **a , ) lowercase__ = vocab_file lowercase__ = False if not self.vocab_file else True def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : List[int] , a : Optional[List[int]] = None )-> List[int]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowercase__ = [self.cls_token_id] lowercase__ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : List[int] , a : Optional[List[int]] = None )-> List[int]: """simple docstring""" lowercase__ = [self.sep_token_id] lowercase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def SCREAMING_SNAKE_CASE_ ( self : Dict , a : str , a : Optional[str] = None )-> Tuple[str]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(a ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowercase__ = os.path.join( a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(a ): copyfile(self.vocab_file , a ) return (out_vocab_file,)
45
1
from __future__ import annotations from sys import maxsize from typing import Generic, TypeVar lowercase_ = TypeVar("""T""") def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> int: return (position - 1) // 2 def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> int: return (2 * position) + 1 def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> int: return (2 * position) + 2 class SCREAMING_SNAKE_CASE (Generic[T] ): def __init__( self : Optional[Any] )-> None: """simple docstring""" lowercase__ = [] lowercase__ = {} lowercase__ = 0 def __len__( self : List[Any] )-> int: """simple docstring""" return self.elements def __repr__( self : str )-> str: """simple docstring""" return str(self.heap ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> bool: """simple docstring""" return self.elements == 0 def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : T , a : int )-> None: """simple docstring""" self.heap.append((elem, weight) ) lowercase__ = self.elements self.elements += 1 self._bubble_up(a ) def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> T: """simple docstring""" if self.elements > 1: self._swap_nodes(0 , self.elements - 1 ) lowercase__ , lowercase__ = self.heap.pop() del self.position_map[elem] self.elements -= 1 if self.elements > 0: lowercase__ , lowercase__ = self.heap[0] self._bubble_down(a ) return elem def SCREAMING_SNAKE_CASE_ ( self : Any , a : T , a : int )-> None: """simple docstring""" lowercase__ = self.position_map[elem] lowercase__ = (elem, weight) if position > 0: lowercase__ = get_parent_position(a ) lowercase__ , lowercase__ = self.heap[parent_position] if parent_weight > weight: self._bubble_up(a ) else: self._bubble_down(a ) else: self._bubble_down(a ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : T )-> None: """simple docstring""" lowercase__ = self.position_map[elem] if curr_pos == 0: return None lowercase__ = get_parent_position(a ) lowercase__ , lowercase__ = self.heap[curr_pos] lowercase__ , lowercase__ = self.heap[parent_position] if parent_weight > weight: self._swap_nodes(a , a ) return self._bubble_up(a ) return None def SCREAMING_SNAKE_CASE_ ( self : str , a : T )-> None: """simple docstring""" lowercase__ = self.position_map[elem] lowercase__ , lowercase__ = self.heap[curr_pos] lowercase__ = get_child_left_position(a ) lowercase__ = get_child_right_position(a ) if child_left_position < self.elements and child_right_position < self.elements: lowercase__ , lowercase__ = self.heap[child_left_position] lowercase__ , lowercase__ = self.heap[child_right_position] if child_right_weight < child_left_weight and child_right_weight < weight: self._swap_nodes(a , a ) return self._bubble_down(a ) if child_left_position < self.elements: lowercase__ , lowercase__ = self.heap[child_left_position] if child_left_weight < weight: self._swap_nodes(a , a ) return self._bubble_down(a ) else: return None if child_right_position < self.elements: lowercase__ , lowercase__ = self.heap[child_right_position] if child_right_weight < weight: self._swap_nodes(a , a ) return self._bubble_down(a ) return None def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : int , a : int )-> None: """simple docstring""" lowercase__ = self.heap[nodea_pos][0] lowercase__ = self.heap[nodea_pos][0] lowercase__ , lowercase__ = ( self.heap[nodea_pos], self.heap[nodea_pos], ) lowercase__ = nodea_pos lowercase__ = nodea_pos class SCREAMING_SNAKE_CASE (Generic[T] ): def __init__( self : Union[str, Any] )-> None: """simple docstring""" lowercase__ = {} lowercase__ = 0 def __repr__( self : Dict )-> str: """simple docstring""" return str(self.connections ) def __len__( self : Tuple )-> int: """simple docstring""" return self.nodes def SCREAMING_SNAKE_CASE_ ( self : Any , a : T )-> None: """simple docstring""" if node not in self.connections: lowercase__ = {} self.nodes += 1 def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : T , a : T , a : int )-> None: """simple docstring""" self.add_node(a ) self.add_node(a ) lowercase__ = weight lowercase__ = weight def __UpperCamelCase (_SCREAMING_SNAKE_CASE , ) -> tuple[dict[T, int], dict[T, T | None]]: lowercase__ = {node: maxsize for node in graph.connections} lowercase__ = {node: None for node in graph.connections} lowercase__ = MinPriorityQueue() for node, weight in dist.items(): priority_queue.push(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if priority_queue.is_empty(): return dist, parent # initialization lowercase__ = priority_queue.extract_min() lowercase__ = 0 for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: lowercase__ = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(_SCREAMING_SNAKE_CASE , dist[neighbour] ) lowercase__ = node # running prim's algorithm while not priority_queue.is_empty(): lowercase__ = priority_queue.extract_min() for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: lowercase__ = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(_SCREAMING_SNAKE_CASE , dist[neighbour] ) lowercase__ = node return dist, parent
45
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionSAGPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class SCREAMING_SNAKE_CASE (UpperCAmelCase , UpperCAmelCase , unittest.TestCase ): _UpperCamelCase : List[Any] = StableDiffusionSAGPipeline _UpperCamelCase : str = TEXT_TO_IMAGE_PARAMS _UpperCamelCase : Dict = TEXT_TO_IMAGE_BATCH_PARAMS _UpperCamelCase : str = TEXT_TO_IMAGE_IMAGE_PARAMS _UpperCamelCase : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS _UpperCamelCase : Union[str, Any] = False def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Dict: """simple docstring""" torch.manual_seed(0 ) lowercase__ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) lowercase__ = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=a , set_alpha_to_one=a , ) torch.manual_seed(0 ) lowercase__ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) torch.manual_seed(0 ) lowercase__ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) lowercase__ = CLIPTextModel(a ) lowercase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) lowercase__ = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Optional[int] , a : Any=0 )-> Union[str, Any]: """simple docstring""" if str(a ).startswith('mps' ): lowercase__ = torch.manual_seed(a ) else: lowercase__ = torch.Generator(device=a ).manual_seed(a ) lowercase__ = { 'prompt': '.', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 1.0, 'sag_scale': 1.0, 'output_type': 'numpy', } return inputs def SCREAMING_SNAKE_CASE_ ( self : Any )-> Any: """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class SCREAMING_SNAKE_CASE (unittest.TestCase ): def SCREAMING_SNAKE_CASE_ ( self : str )-> int: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]: """simple docstring""" lowercase__ = StableDiffusionSAGPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' ) lowercase__ = sag_pipe.to(a ) sag_pipe.set_progress_bar_config(disable=a ) lowercase__ = '.' lowercase__ = torch.manual_seed(0 ) lowercase__ = sag_pipe( [prompt] , generator=a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' ) lowercase__ = output.images lowercase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowercase__ = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2 def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]: """simple docstring""" lowercase__ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' ) lowercase__ = sag_pipe.to(a ) sag_pipe.set_progress_bar_config(disable=a ) lowercase__ = '.' lowercase__ = torch.manual_seed(0 ) lowercase__ = sag_pipe( [prompt] , generator=a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' ) lowercase__ = output.images lowercase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) lowercase__ = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2 def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> List[Any]: """simple docstring""" lowercase__ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' ) lowercase__ = sag_pipe.to(a ) sag_pipe.set_progress_bar_config(disable=a ) lowercase__ = '.' lowercase__ = torch.manual_seed(0 ) lowercase__ = sag_pipe( [prompt] , width=768 , height=512 , generator=a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' , ) lowercase__ = output.images assert image.shape == (1, 512, 768, 3)
45
1