code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, ByTaTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): __lowerCAmelCase = "pt" elif is_tf_available(): __lowerCAmelCase = "tf" else: __lowerCAmelCase = "jax" class __SCREAMING_SNAKE_CASE ( lowercase , unittest.TestCase): __SCREAMING_SNAKE_CASE : Tuple = ByTaTokenizer __SCREAMING_SNAKE_CASE : Tuple = False def UpperCAmelCase__ ( self : Union[str, Any] ): super().setUp() _UpperCAmelCase = ByTaTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def UpperCAmelCase__ ( self : Any ): return ByTaTokenizer.from_pretrained("google/byt5-small" ) def UpperCAmelCase__ ( self : Any , **__UpperCamelCase : str ): return self.tokenizer_class.from_pretrained(self.tmpdirname , **__UpperCamelCase ) def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : str=False , __UpperCamelCase : Dict=20 , __UpperCamelCase : int=5 ): # XXX The default common tokenizer tests assume that every ID is decodable on its own. # This assumption is invalid for ByT5 because single bytes might not be # valid utf-8 (byte 128 for instance). # Here we're overriding the smallest possible method to provide # a clean sequence without making the same assumption. _UpperCAmelCase = [] for i in range(len(__UpperCamelCase ) ): try: _UpperCAmelCase = tokenizer.decode([i] , clean_up_tokenization_spaces=__UpperCamelCase ) except UnicodeDecodeError: pass toks.append((i, tok) ) _UpperCAmelCase = list(filter(lambda __UpperCamelCase : re.match(r"^[ a-zA-Z]+$" , t[1] ) , __UpperCamelCase ) ) _UpperCAmelCase = list(filter(lambda __UpperCamelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__UpperCamelCase ) , __UpperCamelCase ) ) if max_length is not None and len(__UpperCamelCase ) > max_length: _UpperCAmelCase = toks[:max_length] if min_length is not None and len(__UpperCamelCase ) < min_length and len(__UpperCamelCase ) > 0: while len(__UpperCamelCase ) < min_length: _UpperCAmelCase = toks + toks # toks_str = [t[1] for t in toks] _UpperCAmelCase = [t[0] for t in toks] # Ensure consistency _UpperCAmelCase = tokenizer.decode(__UpperCamelCase , clean_up_tokenization_spaces=__UpperCamelCase ) if " " not in output_txt and len(__UpperCamelCase ) > 1: _UpperCAmelCase = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__UpperCamelCase ) + " " + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__UpperCamelCase ) ) if with_prefix_space: _UpperCAmelCase = " " + output_txt _UpperCAmelCase = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) return output_txt, output_ids def UpperCAmelCase__ ( self : Optional[int] ): _UpperCAmelCase = self.ta_base_tokenizer _UpperCAmelCase = tokenizer(["hi</s>", "I went to the gym</s>", "</s>"] ) _UpperCAmelCase = tokenizer(["hi", "I went to the gym", ""] ) self.assertListEqual(batch_with_eos_added["input_ids"] , batch_without_eos_added["input_ids"] ) def UpperCAmelCase__ ( self : Dict ): _UpperCAmelCase = self.ta_base_tokenizer _UpperCAmelCase = "Unicode €." _UpperCAmelCase = tokenizer(__UpperCamelCase ) _UpperCAmelCase = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1] self.assertEqual(encoded["input_ids"] , __UpperCamelCase ) # decoding _UpperCAmelCase = tokenizer.decode(__UpperCamelCase ) self.assertEqual(__UpperCamelCase , "Unicode €.</s>" ) _UpperCAmelCase = tokenizer("e è é ê ë" ) _UpperCAmelCase = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1] self.assertEqual(encoded["input_ids"] , __UpperCamelCase ) # decoding _UpperCAmelCase = tokenizer.decode(__UpperCamelCase ) self.assertEqual(__UpperCamelCase , "e è é ê ë</s>" ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ) , "e è é ê ë</s>" ) def UpperCAmelCase__ ( self : Tuple ): _UpperCAmelCase = self.ta_base_tokenizer _UpperCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."] # fmt: off _UpperCAmelCase = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0] # fmt: on _UpperCAmelCase = tokenizer(__UpperCamelCase , padding=__UpperCamelCase , return_tensors=__UpperCamelCase ) self.assertIsInstance(__UpperCamelCase , __UpperCamelCase ) if FRAMEWORK != "jax": _UpperCAmelCase = list(batch.input_ids.numpy()[0] ) else: _UpperCAmelCase = list(batch.input_ids.tolist()[0] ) self.assertListEqual(__UpperCamelCase , __UpperCamelCase ) self.assertEqual((2, 37) , batch.input_ids.shape ) self.assertEqual((2, 37) , batch.attention_mask.shape ) def UpperCAmelCase__ ( self : Optional[Any] ): _UpperCAmelCase = self.ta_base_tokenizer _UpperCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."] _UpperCAmelCase = tokenizer(__UpperCamelCase , padding=__UpperCamelCase , return_tensors=__UpperCamelCase ) # check if input_ids are returned and no decoder_input_ids self.assertIn("input_ids" , __UpperCamelCase ) self.assertIn("attention_mask" , __UpperCamelCase ) self.assertNotIn("decoder_input_ids" , __UpperCamelCase ) self.assertNotIn("decoder_attention_mask" , __UpperCamelCase ) def UpperCAmelCase__ ( self : int ): _UpperCAmelCase = self.ta_base_tokenizer _UpperCAmelCase = [ "Summary of the text.", "Another summary.", ] _UpperCAmelCase = tokenizer( text_target=__UpperCamelCase , max_length=32 , padding="max_length" , truncation=__UpperCamelCase , return_tensors=__UpperCamelCase ) self.assertEqual(32 , targets["input_ids"].shape[1] ) def UpperCAmelCase__ ( self : Any ): _UpperCAmelCase = self.ta_base_tokenizer _UpperCAmelCase = ["A long paragraph for summarization. </s>"] _UpperCAmelCase = ["Summary of the text. </s>"] # fmt: off _UpperCAmelCase = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1] _UpperCAmelCase = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1] # fmt: on _UpperCAmelCase = tokenizer(__UpperCamelCase , text_target=__UpperCamelCase ) self.assertEqual(__UpperCamelCase , batch["input_ids"][0] ) self.assertEqual(__UpperCamelCase , batch["labels"][0] ) def UpperCAmelCase__ ( self : Optional[int] ): # safety check on max_len default value so we are sure the test works _UpperCAmelCase = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test _UpperCAmelCase = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc _UpperCAmelCase = tempfile.mkdtemp() _UpperCAmelCase = " He is very happy, UNwant\u00E9d,running" _UpperCAmelCase = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) tokenizer.save_pretrained(__UpperCamelCase ) _UpperCAmelCase = tokenizer.__class__.from_pretrained(__UpperCamelCase ) _UpperCAmelCase = after_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) self.assertListEqual(__UpperCamelCase , __UpperCamelCase ) shutil.rmtree(__UpperCamelCase ) _UpperCAmelCase = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc _UpperCAmelCase = tempfile.mkdtemp() _UpperCAmelCase = " He is very happy, UNwant\u00E9d,running" tokenizer.add_tokens(["bim", "bambam"] ) _UpperCAmelCase = tokenizer.additional_special_tokens additional_special_tokens.append("new_additional_special_token" ) tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} ) _UpperCAmelCase = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) tokenizer.save_pretrained(__UpperCamelCase ) _UpperCAmelCase = tokenizer.__class__.from_pretrained(__UpperCamelCase ) _UpperCAmelCase = after_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) self.assertListEqual(__UpperCamelCase , __UpperCamelCase ) self.assertIn("new_additional_special_token" , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) _UpperCAmelCase = tokenizer.__class__.from_pretrained(__UpperCamelCase , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(__UpperCamelCase ) def UpperCAmelCase__ ( self : Optional[int] ): _UpperCAmelCase = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(__UpperCamelCase ) with open(os.path.join(__UpperCamelCase , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file: _UpperCAmelCase = json.load(__UpperCamelCase ) with open(os.path.join(__UpperCamelCase , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file: _UpperCAmelCase = json.load(__UpperCamelCase ) _UpperCAmelCase = [F'''<extra_id_{i}>''' for i in range(125 )] _UpperCAmelCase = added_tokens_extra_ids + [ "an_additional_special_token" ] _UpperCAmelCase = added_tokens_extra_ids + [ "an_additional_special_token" ] with open(os.path.join(__UpperCamelCase , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile: json.dump(__UpperCamelCase , __UpperCamelCase ) with open(os.path.join(__UpperCamelCase , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile: json.dump(__UpperCamelCase , __UpperCamelCase ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files _UpperCAmelCase = tokenizer_class.from_pretrained( __UpperCamelCase , ) self.assertIn( "an_additional_special_token" , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( ["an_additional_special_token"] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained _UpperCAmelCase = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" , lstrip=__UpperCamelCase )] _UpperCAmelCase = tokenizer_class.from_pretrained( __UpperCamelCase , additional_special_tokens=__UpperCamelCase , ) self.assertIn("a_new_additional_special_token" , tokenizer.additional_special_tokens ) self.assertEqual( ["a_new_additional_special_token"] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ) , ) def UpperCAmelCase__ ( self : Optional[int] ): _UpperCAmelCase = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(__UpperCamelCase ) _UpperCAmelCase = tokenizer_class.from_pretrained(__UpperCamelCase ) self.assertTrue(tokenizer.decode([255] ) == "" ) def UpperCAmelCase__ ( self : Optional[Any] ): pass def UpperCAmelCase__ ( self : List[str] ): pass def UpperCAmelCase__ ( self : List[str] ): pass def UpperCAmelCase__ ( self : int ): pass def UpperCAmelCase__ ( self : List[Any] ): # The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings # and special added tokens as tokens _UpperCAmelCase = self.get_tokenizers(fast=__UpperCamelCase , do_lower_case=__UpperCamelCase ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): _UpperCAmelCase = ["t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "x", "t", "</s>"] _UpperCAmelCase = tokenizer.convert_tokens_to_string(__UpperCamelCase ) self.assertIsInstance(__UpperCamelCase , __UpperCamelCase ) def UpperCAmelCase__ ( self : List[Any] ): _UpperCAmelCase = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): _UpperCAmelCase = [ "bos_token", "eos_token", "unk_token", "sep_token", "pad_token", "cls_token", "mask_token", ] _UpperCAmelCase = 0 _UpperCAmelCase = tokenizer.convert_ids_to_tokens( __UpperCamelCase , skip_special_tokens=__UpperCamelCase ) for attr in attributes_list: setattr(__UpperCamelCase , attr + "_id" , __UpperCamelCase ) self.assertEqual(getattr(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase ) self.assertEqual(getattr(__UpperCamelCase , attr + "_id" ) , __UpperCamelCase ) setattr(__UpperCamelCase , attr + "_id" , __UpperCamelCase ) self.assertEqual(getattr(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase ) self.assertEqual(getattr(__UpperCamelCase , attr + "_id" ) , __UpperCamelCase ) setattr(__UpperCamelCase , "additional_special_tokens_ids" , [] ) self.assertListEqual(getattr(__UpperCamelCase , "additional_special_tokens" ) , [] ) self.assertListEqual(getattr(__UpperCamelCase , "additional_special_tokens_ids" ) , [] ) setattr(__UpperCamelCase , "additional_special_tokens_ids" , [token_id_to_test_setters] ) self.assertListEqual(getattr(__UpperCamelCase , "additional_special_tokens" ) , [token_to_test_setters] ) self.assertListEqual(getattr(__UpperCamelCase , "additional_special_tokens_ids" ) , [token_id_to_test_setters] )
684
import requests __lowerCAmelCase = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey=" def __lowerCamelCase ( _lowerCAmelCase ) -> None: # fetching a list of articles in json format _UpperCAmelCase = requests.get(_NEWS_API + bbc_news_api_key ).json() # each article in the list is a dict for i, article in enumerate(bbc_news_page["articles"] , 1 ): print(F'''{i}.) {article["title"]}''' ) if __name__ == "__main__": fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
684
1
from __future__ import annotations from sys import maxsize from typing import Generic, TypeVar __lowerCAmelCase = TypeVar("T") def __lowerCamelCase ( _lowerCAmelCase ) -> int: return (position - 1) // 2 def __lowerCamelCase ( _lowerCAmelCase ) -> int: return (2 * position) + 1 def __lowerCamelCase ( _lowerCAmelCase ) -> int: return (2 * position) + 2 class __SCREAMING_SNAKE_CASE ( Generic[T]): def __init__( self : int ): _UpperCAmelCase = [] _UpperCAmelCase = {} _UpperCAmelCase = 0 def __len__( self : List[Any] ): return self.elements def __repr__( self : List[str] ): return str(self.heap ) def UpperCAmelCase__ ( self : str ): # Check if the priority queue is empty return self.elements == 0 def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : T , __UpperCamelCase : int ): # Add an element with given priority to the queue self.heap.append((elem, weight) ) _UpperCAmelCase = self.elements self.elements += 1 self._bubble_up(__UpperCamelCase ) def UpperCAmelCase__ ( self : List[str] ): # Remove and return the element with lowest weight (highest priority) if self.elements > 1: self._swap_nodes(0 , self.elements - 1 ) _UpperCAmelCase , _UpperCAmelCase = self.heap.pop() del self.position_map[elem] self.elements -= 1 if self.elements > 0: _UpperCAmelCase , _UpperCAmelCase = self.heap[0] self._bubble_down(__UpperCamelCase ) return elem def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : T , __UpperCamelCase : int ): # Update the weight of the given key _UpperCAmelCase = self.position_map[elem] _UpperCAmelCase = (elem, weight) if position > 0: _UpperCAmelCase = get_parent_position(__UpperCamelCase ) _UpperCAmelCase , _UpperCAmelCase = self.heap[parent_position] if parent_weight > weight: self._bubble_up(__UpperCamelCase ) else: self._bubble_down(__UpperCamelCase ) else: self._bubble_down(__UpperCamelCase ) def UpperCAmelCase__ ( self : Any , __UpperCamelCase : T ): # Place a node at the proper position (upward movement) [to be used internally # only] _UpperCAmelCase = self.position_map[elem] if curr_pos == 0: return None _UpperCAmelCase = get_parent_position(__UpperCamelCase ) _UpperCAmelCase , _UpperCAmelCase = self.heap[curr_pos] _UpperCAmelCase , _UpperCAmelCase = self.heap[parent_position] if parent_weight > weight: self._swap_nodes(__UpperCamelCase , __UpperCamelCase ) return self._bubble_up(__UpperCamelCase ) return None def UpperCAmelCase__ ( self : Optional[int] , __UpperCamelCase : T ): # Place a node at the proper position (downward movement) [to be used # internally only] _UpperCAmelCase = self.position_map[elem] _UpperCAmelCase , _UpperCAmelCase = self.heap[curr_pos] _UpperCAmelCase = get_child_left_position(__UpperCamelCase ) _UpperCAmelCase = get_child_right_position(__UpperCamelCase ) if child_left_position < self.elements and child_right_position < self.elements: _UpperCAmelCase , _UpperCAmelCase = self.heap[child_left_position] _UpperCAmelCase , _UpperCAmelCase = self.heap[child_right_position] if child_right_weight < child_left_weight and child_right_weight < weight: self._swap_nodes(__UpperCamelCase , __UpperCamelCase ) return self._bubble_down(__UpperCamelCase ) if child_left_position < self.elements: _UpperCAmelCase , _UpperCAmelCase = self.heap[child_left_position] if child_left_weight < weight: self._swap_nodes(__UpperCamelCase , __UpperCamelCase ) return self._bubble_down(__UpperCamelCase ) else: return None if child_right_position < self.elements: _UpperCAmelCase , _UpperCAmelCase = self.heap[child_right_position] if child_right_weight < weight: self._swap_nodes(__UpperCamelCase , __UpperCamelCase ) return self._bubble_down(__UpperCamelCase ) return None def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : int ): # Swap the nodes at the given positions _UpperCAmelCase = self.heap[nodea_pos][0] _UpperCAmelCase = self.heap[nodea_pos][0] _UpperCAmelCase , _UpperCAmelCase = ( self.heap[nodea_pos], self.heap[nodea_pos], ) _UpperCAmelCase = nodea_pos _UpperCAmelCase = nodea_pos class __SCREAMING_SNAKE_CASE ( Generic[T]): def __init__( self : Optional[Any] ): _UpperCAmelCase = {} _UpperCAmelCase = 0 def __repr__( self : int ): return str(self.connections ) def __len__( self : Tuple ): return self.nodes def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : T ): # Add a node in the graph if it is not in the graph if node not in self.connections: _UpperCAmelCase = {} self.nodes += 1 def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : T , __UpperCamelCase : T , __UpperCamelCase : int ): # Add an edge between 2 nodes in the graph self.add_node(__UpperCamelCase ) self.add_node(__UpperCamelCase ) _UpperCAmelCase = weight _UpperCAmelCase = weight def __lowerCamelCase ( _lowerCAmelCase , ) -> tuple[dict[T, int], dict[T, T | None]]: _UpperCAmelCase = {node: maxsize for node in graph.connections} _UpperCAmelCase = {node: None for node in graph.connections} _UpperCAmelCase = MinPriorityQueue() for node, weight in dist.items(): priority_queue.push(_lowerCAmelCase , _lowerCAmelCase ) if priority_queue.is_empty(): return dist, parent # initialization _UpperCAmelCase = priority_queue.extract_min() _UpperCAmelCase = 0 for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: _UpperCAmelCase = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(_lowerCAmelCase , dist[neighbour] ) _UpperCAmelCase = node # running prim's algorithm while not priority_queue.is_empty(): _UpperCAmelCase = priority_queue.extract_min() for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: _UpperCAmelCase = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(_lowerCAmelCase , dist[neighbour] ) _UpperCAmelCase = node return dist, parent
684
import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class __SCREAMING_SNAKE_CASE ( unittest.TestCase): def UpperCAmelCase__ ( self : Any ): _UpperCAmelCase = 10 def UpperCAmelCase__ ( self : Optional[int] ): _UpperCAmelCase = [1, 2, 3, 4] _UpperCAmelCase = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase ) def UpperCAmelCase__ ( self : List[Any] ): _UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] _UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase ) def UpperCAmelCase__ ( self : int ): _UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] _UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase ) def UpperCAmelCase__ ( self : Union[str, Any] ): _UpperCAmelCase = "It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this." _UpperCAmelCase , _UpperCAmelCase = process_story(__UpperCamelCase ) self.assertEqual(__UpperCamelCase , [] ) def UpperCAmelCase__ ( self : str ): _UpperCAmelCase = "" _UpperCAmelCase , _UpperCAmelCase = process_story(__UpperCamelCase ) self.assertEqual(__UpperCamelCase , [] ) self.assertEqual(__UpperCamelCase , [] ) def UpperCAmelCase__ ( self : Optional[Any] ): _UpperCAmelCase = ( "It was the year of Our Lord one thousand seven hundred and " "seventy-five\n\nSpiritual revelations were conceded to England " "at that favoured period, as at this.\n@highlight\n\nIt was the best of times" ) _UpperCAmelCase , _UpperCAmelCase = process_story(__UpperCamelCase ) _UpperCAmelCase = [ "It was the year of Our Lord one thousand seven hundred and seventy-five.", "Spiritual revelations were conceded to England at that favoured period, as at this.", ] self.assertEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = ["It was the best of times."] self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def UpperCAmelCase__ ( self : Dict ): _UpperCAmelCase = torch.tensor([1, 2, 3, 4] ) _UpperCAmelCase = torch.tensor([1, 1, 1, 1] ) np.testing.assert_array_equal(build_mask(__UpperCamelCase , 0 ).numpy() , expected.numpy() ) def UpperCAmelCase__ ( self : Optional[Any] ): _UpperCAmelCase = torch.tensor([1, 2, 3, 4, 23, 23, 23] ) _UpperCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(__UpperCamelCase , 23 ).numpy() , expected.numpy() ) def UpperCAmelCase__ ( self : Optional[int] ): _UpperCAmelCase = torch.tensor([8, 2, 3, 4, 1, 1, 1] ) _UpperCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(__UpperCamelCase , 1 ).numpy() , expected.numpy() ) def UpperCAmelCase__ ( self : Dict ): _UpperCAmelCase = 101 _UpperCAmelCase = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] ) _UpperCAmelCase = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] ) _UpperCAmelCase = compute_token_type_ids(__UpperCamelCase , __UpperCamelCase ) np.testing.assert_array_equal(__UpperCamelCase , __UpperCamelCase )
684
1
import argparse import math import traceback import dateutil.parser as date_parser import requests def __lowerCamelCase ( _lowerCAmelCase ) -> Any: _UpperCAmelCase = {} _UpperCAmelCase = job["started_at"] _UpperCAmelCase = job["completed_at"] _UpperCAmelCase = date_parser.parse(_lowerCAmelCase ) _UpperCAmelCase = date_parser.parse(_lowerCAmelCase ) _UpperCAmelCase = round((end_datetime - start_datetime).total_seconds() / 60.0 ) _UpperCAmelCase = start _UpperCAmelCase = end _UpperCAmelCase = duration_in_min return job_info def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=None ) -> str: _UpperCAmelCase = None if token is not None: _UpperCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''} _UpperCAmelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100''' _UpperCAmelCase = requests.get(_lowerCAmelCase , headers=_lowerCAmelCase ).json() _UpperCAmelCase = {} try: job_time.update({job["name"]: extract_time_from_single_job(_lowerCAmelCase ) for job in result["jobs"]} ) _UpperCAmelCase = math.ceil((result["total_count"] - 100) / 100 ) for i in range(_lowerCAmelCase ): _UpperCAmelCase = requests.get(url + F'''&page={i + 2}''' , headers=_lowerCAmelCase ).json() job_time.update({job["name"]: extract_time_from_single_job(_lowerCAmelCase ) for job in result["jobs"]} ) return job_time except Exception: print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' ) return {} if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.") __lowerCAmelCase = parser.parse_args() __lowerCAmelCase = get_job_time(args.workflow_run_id) __lowerCAmelCase = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True)) for k, v in job_time.items(): print(F'''{k}: {v["duration"]}''')
684
from __future__ import annotations from collections import namedtuple def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> tuple: _UpperCAmelCase = namedtuple("result" , "name value" ) if (voltage, current, power).count(0 ) != 1: raise ValueError("Only one argument must be 0" ) elif power < 0: raise ValueError( "Power cannot be negative in any electrical/electronics system" ) elif voltage == 0: return result("voltage" , power / current ) elif current == 0: return result("current" , power / voltage ) elif power == 0: return result("power" , float(round(abs(voltage * current ) , 2 ) ) ) else: raise ValueError("Exactly one argument must be 0" ) if __name__ == "__main__": import doctest doctest.testmod()
684
1
from __future__ import annotations def __lowerCamelCase ( _lowerCAmelCase = 4 ) -> list[list[int]]: _UpperCAmelCase = abs(_lowerCAmelCase ) or 4 return [[1 + x + y * row_size for x in range(_lowerCAmelCase )] for y in range(_lowerCAmelCase )] def __lowerCamelCase ( _lowerCAmelCase ) -> list[list[int]]: return reverse_row(transpose(_lowerCAmelCase ) ) # OR.. transpose(reverse_column(matrix)) def __lowerCamelCase ( _lowerCAmelCase ) -> list[list[int]]: return reverse_row(reverse_column(_lowerCAmelCase ) ) # OR.. reverse_column(reverse_row(matrix)) def __lowerCamelCase ( _lowerCAmelCase ) -> list[list[int]]: return reverse_column(transpose(_lowerCAmelCase ) ) # OR.. transpose(reverse_row(matrix)) def __lowerCamelCase ( _lowerCAmelCase ) -> list[list[int]]: _UpperCAmelCase = [list(_lowerCAmelCase ) for x in zip(*_lowerCAmelCase )] return matrix def __lowerCamelCase ( _lowerCAmelCase ) -> list[list[int]]: _UpperCAmelCase = matrix[::-1] return matrix def __lowerCamelCase ( _lowerCAmelCase ) -> list[list[int]]: _UpperCAmelCase = [x[::-1] for x in matrix] return matrix def __lowerCamelCase ( _lowerCAmelCase ) -> None: for i in matrix: print(*_lowerCAmelCase ) if __name__ == "__main__": __lowerCAmelCase = make_matrix() print("\norigin:\n") print_matrix(matrix) print("\nrotate 90 counterclockwise:\n") print_matrix(rotate_aa(matrix)) __lowerCAmelCase = make_matrix() print("\norigin:\n") print_matrix(matrix) print("\nrotate 180:\n") print_matrix(rotate_aaa(matrix)) __lowerCAmelCase = make_matrix() print("\norigin:\n") print_matrix(matrix) print("\nrotate 270 counterclockwise:\n") print_matrix(rotate_aaa(matrix))
684
import argparse import math import traceback import dateutil.parser as date_parser import requests def __lowerCamelCase ( _lowerCAmelCase ) -> Any: _UpperCAmelCase = {} _UpperCAmelCase = job["started_at"] _UpperCAmelCase = job["completed_at"] _UpperCAmelCase = date_parser.parse(_lowerCAmelCase ) _UpperCAmelCase = date_parser.parse(_lowerCAmelCase ) _UpperCAmelCase = round((end_datetime - start_datetime).total_seconds() / 60.0 ) _UpperCAmelCase = start _UpperCAmelCase = end _UpperCAmelCase = duration_in_min return job_info def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=None ) -> str: _UpperCAmelCase = None if token is not None: _UpperCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''} _UpperCAmelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100''' _UpperCAmelCase = requests.get(_lowerCAmelCase , headers=_lowerCAmelCase ).json() _UpperCAmelCase = {} try: job_time.update({job["name"]: extract_time_from_single_job(_lowerCAmelCase ) for job in result["jobs"]} ) _UpperCAmelCase = math.ceil((result["total_count"] - 100) / 100 ) for i in range(_lowerCAmelCase ): _UpperCAmelCase = requests.get(url + F'''&page={i + 2}''' , headers=_lowerCAmelCase ).json() job_time.update({job["name"]: extract_time_from_single_job(_lowerCAmelCase ) for job in result["jobs"]} ) return job_time except Exception: print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' ) return {} if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.") __lowerCAmelCase = parser.parse_args() __lowerCAmelCase = get_job_time(args.workflow_run_id) __lowerCAmelCase = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True)) for k, v in job_time.items(): print(F'''{k}: {v["duration"]}''')
684
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"} class __SCREAMING_SNAKE_CASE ( lowercase): __SCREAMING_SNAKE_CASE : Any = """openai-gpt""" __SCREAMING_SNAKE_CASE : Optional[int] = { """max_position_embeddings""": """n_positions""", """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self : Optional[Any] , __UpperCamelCase : int=40_478 , __UpperCamelCase : int=512 , __UpperCamelCase : Any=768 , __UpperCamelCase : List[str]=12 , __UpperCamelCase : List[Any]=12 , __UpperCamelCase : Dict="gelu" , __UpperCamelCase : int=0.1 , __UpperCamelCase : str=0.1 , __UpperCamelCase : Optional[Any]=0.1 , __UpperCamelCase : Union[str, Any]=1e-5 , __UpperCamelCase : List[str]=0.02 , __UpperCamelCase : int="cls_index" , __UpperCamelCase : int=True , __UpperCamelCase : List[str]=None , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : Tuple=0.1 , **__UpperCamelCase : int , ): _UpperCAmelCase = vocab_size _UpperCAmelCase = n_positions _UpperCAmelCase = n_embd _UpperCAmelCase = n_layer _UpperCAmelCase = n_head _UpperCAmelCase = afn _UpperCAmelCase = resid_pdrop _UpperCAmelCase = embd_pdrop _UpperCAmelCase = attn_pdrop _UpperCAmelCase = layer_norm_epsilon _UpperCAmelCase = initializer_range _UpperCAmelCase = summary_type _UpperCAmelCase = summary_use_proj _UpperCAmelCase = summary_activation _UpperCAmelCase = summary_first_dropout _UpperCAmelCase = summary_proj_to_labels super().__init__(**__UpperCamelCase )
684
import argparse import math import os from copy import deepcopy import torch from audio_diffusion.models import DiffusionAttnUnetaD from diffusion import sampling from torch import nn from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel __lowerCAmelCase = { "gwf-440k": { "url": "https://model-server.zqevans2.workers.dev/gwf-440k.ckpt", "sample_rate": 4_8_0_0_0, "sample_size": 6_5_5_3_6, }, "jmann-small-190k": { "url": "https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt", "sample_rate": 4_8_0_0_0, "sample_size": 6_5_5_3_6, }, "jmann-large-580k": { "url": "https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt", "sample_rate": 4_8_0_0_0, "sample_size": 1_3_1_0_7_2, }, "maestro-uncond-150k": { "url": "https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt", "sample_rate": 1_6_0_0_0, "sample_size": 6_5_5_3_6, }, "unlocked-uncond-250k": { "url": "https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt", "sample_rate": 1_6_0_0_0, "sample_size": 6_5_5_3_6, }, "honk-140k": { "url": "https://model-server.zqevans2.workers.dev/honk-140k.ckpt", "sample_rate": 1_6_0_0_0, "sample_size": 6_5_5_3_6, }, } def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: return torch.atana(_lowerCAmelCase , _lowerCAmelCase ) / math.pi * 2 def __lowerCamelCase ( _lowerCAmelCase ) -> Union[str, Any]: _UpperCAmelCase = torch.sin(t * math.pi / 2 ) ** 2 _UpperCAmelCase = (1 - sigma**2) ** 0.5 return alpha_sigma_to_t(_lowerCAmelCase , _lowerCAmelCase ) class __SCREAMING_SNAKE_CASE ( lowercase): pass class __SCREAMING_SNAKE_CASE ( nn.Module): def __init__( self : str , __UpperCamelCase : Optional[int] ): super().__init__() _UpperCAmelCase = DiffusionAttnUnetaD(__UpperCamelCase , n_attn_layers=4 ) _UpperCAmelCase = deepcopy(self.diffusion ) _UpperCAmelCase = torch.quasirandom.SobolEngine(1 , scramble=__UpperCamelCase ) def __lowerCamelCase ( _lowerCAmelCase ) -> int: _UpperCAmelCase = MODELS_MAP[model_name]["url"] os.system(F'''wget {url} ./''' ) return F'''./{model_name}.ckpt''' __lowerCAmelCase = { "1": "resnets.0", "2": "attentions.0", "3": "resnets.1", "4": "attentions.1", "5": "resnets.2", "6": "attentions.2", } __lowerCAmelCase = { "8": "resnets.0", "9": "attentions.0", "10": "resnets.1", "11": "attentions.1", "12": "resnets.2", "13": "attentions.2", } __lowerCAmelCase = { "1": "resnets.0", "2": "attentions.0", "3": "resnets.1", "4": "attentions.1", "5": "resnets.2", "6": "attentions.2", "8": "resnets.3", "9": "attentions.3", "10": "resnets.4", "11": "attentions.4", "12": "resnets.5", "13": "attentions.5", } __lowerCAmelCase = { "0": "resnets.0", "1": "resnets.1", "2": "resnets.2", "4": "resnets.0", "5": "resnets.1", "6": "resnets.2", } __lowerCAmelCase = { "skip": "conv_skip", "main.0": "conv_1", "main.1": "group_norm_1", "main.3": "conv_2", "main.4": "group_norm_2", } __lowerCAmelCase = { "norm": "group_norm", "qkv_proj": ["query", "key", "value"], "out_proj": ["proj_attn"], } def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]: if name.startswith("skip" ): return name.replace("skip" , RES_CONV_MAP["skip"] ) # name has to be of format main.{digit} if not name.startswith("main." ): raise ValueError(F'''ResConvBlock error with {name}''' ) return name.replace(name[:6] , RES_CONV_MAP[name[:6]] ) def __lowerCamelCase ( _lowerCAmelCase ) -> Optional[Any]: for key, value in ATTN_MAP.items(): if name.startswith(_lowerCAmelCase ) and not isinstance(_lowerCAmelCase , _lowerCAmelCase ): return name.replace(_lowerCAmelCase , _lowerCAmelCase ) elif name.startswith(_lowerCAmelCase ): return [name.replace(_lowerCAmelCase , _lowerCAmelCase ) for v in value] raise ValueError(F'''Attn error with {name}''' ) def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=13 ) -> List[Any]: _UpperCAmelCase = input_string if string.split("." )[0] == "timestep_embed": return string.replace("timestep_embed" , "time_proj" ) _UpperCAmelCase = 0 if string.startswith("net.3." ): depth += 1 _UpperCAmelCase = string[6:] elif string.startswith("net." ): _UpperCAmelCase = string[4:] while string.startswith("main.7." ): depth += 1 _UpperCAmelCase = string[7:] if string.startswith("main." ): _UpperCAmelCase = string[5:] # mid block if string[:2].isdigit(): _UpperCAmelCase = string[:2] _UpperCAmelCase = string[2:] else: _UpperCAmelCase = string[0] _UpperCAmelCase = string[1:] if depth == max_depth: _UpperCAmelCase = MID_NUM_TO_LAYER[layer_num] _UpperCAmelCase = "mid_block" elif depth > 0 and int(_lowerCAmelCase ) < 7: _UpperCAmelCase = DOWN_NUM_TO_LAYER[layer_num] _UpperCAmelCase = F'''down_blocks.{depth}''' elif depth > 0 and int(_lowerCAmelCase ) > 7: _UpperCAmelCase = UP_NUM_TO_LAYER[layer_num] _UpperCAmelCase = F'''up_blocks.{max_depth - depth - 1}''' elif depth == 0: _UpperCAmelCase = DEPTH_0_TO_LAYER[layer_num] _UpperCAmelCase = F'''up_blocks.{max_depth - 1}''' if int(_lowerCAmelCase ) > 3 else "down_blocks.0" if not string_left.startswith("." ): raise ValueError(F'''Naming error with {input_string} and string_left: {string_left}.''' ) _UpperCAmelCase = string_left[1:] if "resnets" in new_layer: _UpperCAmelCase = convert_resconv_naming(_lowerCAmelCase ) elif "attentions" in new_layer: _UpperCAmelCase = convert_attn_naming(_lowerCAmelCase ) _UpperCAmelCase = new_string_left if not isinstance(_lowerCAmelCase , _lowerCAmelCase ): _UpperCAmelCase = prefix + "." + new_layer + "." + string_left else: _UpperCAmelCase = [prefix + "." + new_layer + "." + s for s in string_left] return new_string def __lowerCamelCase ( _lowerCAmelCase ) -> Optional[int]: _UpperCAmelCase = {} for k, v in state_dict.items(): if k.endswith("kernel" ): # up- and downsample layers, don't have trainable weights continue _UpperCAmelCase = rename(_lowerCAmelCase ) # check if we need to transform from Conv => Linear for attention if isinstance(_lowerCAmelCase , _lowerCAmelCase ): _UpperCAmelCase = transform_conv_attns(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) else: _UpperCAmelCase = v return new_state_dict def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]: if len(_lowerCAmelCase ) == 1: if len(v.shape ) == 3: # weight _UpperCAmelCase = v[:, :, 0] else: # bias _UpperCAmelCase = v else: # qkv matrices _UpperCAmelCase = v.shape[0] _UpperCAmelCase = trippled_shape // 3 for i in range(3 ): if len(v.shape ) == 3: _UpperCAmelCase = v[i * single_shape : (i + 1) * single_shape, :, 0] else: _UpperCAmelCase = v[i * single_shape : (i + 1) * single_shape] return new_state_dict def __lowerCamelCase ( _lowerCAmelCase ) -> Tuple: _UpperCAmelCase = torch.device("cuda" if torch.cuda.is_available() else "cpu" ) _UpperCAmelCase = args.model_path.split("/" )[-1].split("." )[0] if not os.path.isfile(args.model_path ): assert ( model_name == args.model_path ), F'''Make sure to provide one of the official model names {MODELS_MAP.keys()}''' _UpperCAmelCase = download(_lowerCAmelCase ) _UpperCAmelCase = MODELS_MAP[model_name]["sample_rate"] _UpperCAmelCase = MODELS_MAP[model_name]["sample_size"] _UpperCAmelCase = Object() _UpperCAmelCase = sample_size _UpperCAmelCase = sample_rate _UpperCAmelCase = 0 _UpperCAmelCase = UNetaDModel(sample_size=_lowerCAmelCase , sample_rate=_lowerCAmelCase ) _UpperCAmelCase = diffusers_model.state_dict() _UpperCAmelCase = DiffusionUncond(_lowerCAmelCase ) orig_model.load_state_dict(torch.load(args.model_path , map_location=_lowerCAmelCase )["state_dict"] ) _UpperCAmelCase = orig_model.diffusion_ema.eval() _UpperCAmelCase = orig_model.state_dict() _UpperCAmelCase = rename_orig_weights(_lowerCAmelCase ) _UpperCAmelCase = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() ) _UpperCAmelCase = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() ) assert len(_lowerCAmelCase ) == 0, F'''Problem with {renamed_minus_diffusers}''' assert all(k.endswith("kernel" ) for k in list(_lowerCAmelCase ) ), F'''Problem with {diffusers_minus_renamed}''' for key, value in renamed_state_dict.items(): assert ( diffusers_state_dict[key].squeeze().shape == value.squeeze().shape ), F'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}''' if key == "time_proj.weight": _UpperCAmelCase = value.squeeze() _UpperCAmelCase = value diffusers_model.load_state_dict(_lowerCAmelCase ) _UpperCAmelCase = 100 _UpperCAmelCase = 33 _UpperCAmelCase = IPNDMScheduler(num_train_timesteps=_lowerCAmelCase ) _UpperCAmelCase = torch.manual_seed(_lowerCAmelCase ) _UpperCAmelCase = torch.randn([1, 2, config.sample_size] , generator=_lowerCAmelCase ).to(_lowerCAmelCase ) _UpperCAmelCase = torch.linspace(1 , 0 , steps + 1 , device=_lowerCAmelCase )[:-1] _UpperCAmelCase = get_crash_schedule(_lowerCAmelCase ) _UpperCAmelCase = DanceDiffusionPipeline(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase ) _UpperCAmelCase = torch.manual_seed(33 ) _UpperCAmelCase = pipe(num_inference_steps=_lowerCAmelCase , generator=_lowerCAmelCase ).audios _UpperCAmelCase = sampling.iplms_sample(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , {} ) _UpperCAmelCase = generated.clamp(-1 , 1 ) _UpperCAmelCase = (generated - audio).abs().sum() _UpperCAmelCase = (generated - audio).abs().max() if args.save: pipe.save_pretrained(args.checkpoint_path ) print("Diff sum" , _lowerCAmelCase ) print("Diff max" , _lowerCAmelCase ) assert diff_max < 1E-3, F'''Diff max: {diff_max} is too much :-/''' print(F'''Conversion for {model_name} successful!''' ) if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.") parser.add_argument( "--save", default=True, type=bool, required=False, help="Whether to save the converted model or not." ) parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.") __lowerCAmelCase = parser.parse_args() main(args)
684
1
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path: # hack it in for now: import sys from pathlib import Path __lowerCAmelCase = Path(__file__).resolve().parents[3] / "src" sys.path.insert(1, str(git_repo_path)) import dataclasses # noqa import io # noqa import itertools # noqa import json # noqa import os # noqa import unittest # noqa from copy import deepcopy # noqa from parameterized import parameterized # noqa from transformers import TrainingArguments, is_torch_available # noqa from transformers.deepspeed import is_deepspeed_available # noqa from transformers.file_utils import WEIGHTS_NAME # noqa from transformers.testing_utils import ( # noqa CaptureLogger, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, mockenv_context, require_deepspeed, require_torch_gpu, require_torch_multi_gpu, slow, ) from transformers.trainer_utils import set_seed # noqa set_seed(4_2) __lowerCAmelCase = {"base": "patrickvonplaten/wav2vec2_tiny_random", "robust": "patrickvonplaten/wav2vec2_tiny_random_robust"} __lowerCAmelCase = "zero2" __lowerCAmelCase = "zero3" __lowerCAmelCase = [ZEROa, ZEROa] def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int: # customize the test name generator function as we want both params to appear in the sub-test # name, as by default it shows only the first param _UpperCAmelCase = parameterized.to_safe_name("_".join(str(_lowerCAmelCase ) for x in param.args ) ) return F'''{func.__name__}_{param_based_name}''' # Cartesian-product of zero stages with models to test __lowerCAmelCase = list(itertools.product(stages, models.keys())) @slow @require_deepspeed @require_torch_gpu class __SCREAMING_SNAKE_CASE ( lowercase): @parameterized.expand(__UpperCamelCase , name_func=__UpperCamelCase ) def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict ): self.run_and_check( stage=__UpperCamelCase , model=__UpperCamelCase , distributed=__UpperCamelCase , fpaa=__UpperCamelCase , ) @require_torch_multi_gpu @parameterized.expand(__UpperCamelCase , name_func=__UpperCamelCase ) def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple ): self.run_and_check( stage=__UpperCamelCase , model=__UpperCamelCase , distributed=__UpperCamelCase , fpaa=__UpperCamelCase , ) @parameterized.expand(__UpperCamelCase , name_func=__UpperCamelCase ) def UpperCAmelCase__ ( self : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : str ): self.run_and_check( stage=__UpperCamelCase , model=__UpperCamelCase , distributed=__UpperCamelCase , fpaa=__UpperCamelCase , ) @require_torch_multi_gpu @parameterized.expand(__UpperCamelCase , name_func=__UpperCamelCase ) def UpperCAmelCase__ ( self : Any , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any ): self.run_and_check( stage=__UpperCamelCase , model=__UpperCamelCase , distributed=__UpperCamelCase , fpaa=__UpperCamelCase , ) def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : int ): # XXX: run_asr is premature and doesn't save any results # so all we check for now is that the process didn't fail pass def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : int = 10 , __UpperCamelCase : bool = True , __UpperCamelCase : bool = True , __UpperCamelCase : bool = True , ): _UpperCAmelCase = models[model] _UpperCAmelCase = self.run_trainer( stage=__UpperCamelCase , model_name=__UpperCamelCase , eval_steps=__UpperCamelCase , num_train_epochs=1 , distributed=__UpperCamelCase , fpaa=__UpperCamelCase , ) self.do_checks(__UpperCamelCase ) return output_dir def UpperCAmelCase__ ( self : List[Any] , __UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : int = 10 , __UpperCamelCase : int = 1 , __UpperCamelCase : bool = True , __UpperCamelCase : bool = True , ): _UpperCAmelCase = self.get_auto_remove_tmp_dir("./xxx" , after=__UpperCamelCase ) _UpperCAmelCase = F''' --model_name_or_path {model_name} --dataset_name hf-internal-testing/librispeech_asr_dummy --dataset_config_name clean --train_split_name validation --validation_split_name validation --output_dir {output_dir} --num_train_epochs {str(__UpperCamelCase )} --per_device_train_batch_size 2 --per_device_eval_batch_size 2 --evaluation_strategy steps --learning_rate 5e-4 --warmup_steps 8 --orthography timit --preprocessing_num_workers 1 --group_by_length --freeze_feature_extractor --report_to none --save_steps 0 --eval_steps {eval_steps} --report_to none '''.split() if fpaa: args.extend(["--fp16"] ) # currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true, # hence the separate config files _UpperCAmelCase = F'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split() _UpperCAmelCase = [F'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py'''] _UpperCAmelCase = self.get_launcher(__UpperCamelCase ) _UpperCAmelCase = launcher + script + args + ds_args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(__UpperCamelCase , env=self.get_env() ) return output_dir def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : Dict=False ): # 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup # - it won't be able to handle that # 2. for now testing with just 2 gpus max (since some quality tests may give different # results with mode gpus because we use very little data) _UpperCAmelCase = min(2 , get_gpu_count() ) if distributed else 1 return F'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split()
684
import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 __lowerCAmelCase = get_tests_dir("fixtures") class __SCREAMING_SNAKE_CASE ( unittest.TestCase): def UpperCAmelCase__ ( self : Dict ): # A mock response for an HTTP head request to emulate server down _UpperCAmelCase = mock.Mock() _UpperCAmelCase = 500 _UpperCAmelCase = {} _UpperCAmelCase = HTTPError _UpperCAmelCase = {} # Download this model to make sure it's in the cache. _UpperCAmelCase = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("requests.Session.request" , return_value=__UpperCamelCase ) as mock_head: _UpperCAmelCase = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" ) # This check we did call the fake head request mock_head.assert_called() def UpperCAmelCase__ ( self : List[Any] ): # This test is for deprecated behavior and can be removed in v5 _UpperCAmelCase = ViTImageProcessor.from_pretrained( "https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json" ) def UpperCAmelCase__ ( self : Dict ): with self.assertRaises(__UpperCamelCase ): # config is in subfolder, the following should not work without specifying the subfolder _UpperCAmelCase = AutoImageProcessor.from_pretrained("hf-internal-testing/stable-diffusion-all-variants" ) _UpperCAmelCase = AutoImageProcessor.from_pretrained( "hf-internal-testing/stable-diffusion-all-variants" , subfolder="feature_extractor" ) self.assertIsNotNone(__UpperCamelCase ) @is_staging_test class __SCREAMING_SNAKE_CASE ( unittest.TestCase): @classmethod def UpperCAmelCase__ ( cls : str ): _UpperCAmelCase = TOKEN HfFolder.save_token(__UpperCamelCase ) @classmethod def UpperCAmelCase__ ( cls : Optional[Any] ): try: delete_repo(token=cls._token , repo_id="test-image-processor" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="valid_org/test-image-processor-org" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="test-dynamic-image-processor" ) except HTTPError: pass def UpperCAmelCase__ ( self : Union[str, Any] ): _UpperCAmelCase = ViTImageProcessor.from_pretrained(__UpperCamelCase ) image_processor.push_to_hub("test-image-processor" , use_auth_token=self._token ) _UpperCAmelCase = ViTImageProcessor.from_pretrained(F'''{USER}/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) ) # Reset repo delete_repo(token=self._token , repo_id="test-image-processor" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( __UpperCamelCase , repo_id="test-image-processor" , push_to_hub=__UpperCamelCase , use_auth_token=self._token ) _UpperCAmelCase = ViTImageProcessor.from_pretrained(F'''{USER}/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) ) def UpperCAmelCase__ ( self : Union[str, Any] ): _UpperCAmelCase = ViTImageProcessor.from_pretrained(__UpperCamelCase ) image_processor.push_to_hub("valid_org/test-image-processor" , use_auth_token=self._token ) _UpperCAmelCase = ViTImageProcessor.from_pretrained("valid_org/test-image-processor" ) for k, v in image_processor.__dict__.items(): self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) ) # Reset repo delete_repo(token=self._token , repo_id="valid_org/test-image-processor" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( __UpperCamelCase , repo_id="valid_org/test-image-processor-org" , push_to_hub=__UpperCamelCase , use_auth_token=self._token ) _UpperCAmelCase = ViTImageProcessor.from_pretrained("valid_org/test-image-processor-org" ) for k, v in image_processor.__dict__.items(): self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) ) def UpperCAmelCase__ ( self : int ): CustomImageProcessor.register_for_auto_class() _UpperCAmelCase = CustomImageProcessor.from_pretrained(__UpperCamelCase ) image_processor.push_to_hub("test-dynamic-image-processor" , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map , {"AutoImageProcessor": "custom_image_processing.CustomImageProcessor"} , ) _UpperCAmelCase = AutoImageProcessor.from_pretrained( F'''{USER}/test-dynamic-image-processor''' , trust_remote_code=__UpperCamelCase ) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__ , "CustomImageProcessor" )
684
1
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class __SCREAMING_SNAKE_CASE ( unittest.TestCase): def UpperCAmelCase__ ( self : List[str] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def UpperCAmelCase__ ( self : List[str] ): _UpperCAmelCase = 1 _UpperCAmelCase = 3 _UpperCAmelCase = (32, 32) _UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__UpperCamelCase ) return image @property def UpperCAmelCase__ ( self : Optional[int] ): torch.manual_seed(0 ) _UpperCAmelCase = UNetaDConditionModel( block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__UpperCamelCase , only_cross_attention=(True, True, False) , num_class_embeds=100 , ) return model @property def UpperCAmelCase__ ( self : List[Any] ): torch.manual_seed(0 ) _UpperCAmelCase = AutoencoderKL( block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) return model @property def UpperCAmelCase__ ( self : Union[str, Any] ): torch.manual_seed(0 ) _UpperCAmelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="gelu" , projection_dim=512 , ) return CLIPTextModel(__UpperCamelCase ) def UpperCAmelCase__ ( self : Tuple ): _UpperCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator _UpperCAmelCase = self.dummy_cond_unet_upscale _UpperCAmelCase = DDPMScheduler() _UpperCAmelCase = DDIMScheduler(prediction_type="v_prediction" ) _UpperCAmelCase = self.dummy_vae _UpperCAmelCase = self.dummy_text_encoder _UpperCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) _UpperCAmelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] _UpperCAmelCase = Image.fromarray(np.uinta(__UpperCamelCase ) ).convert("RGB" ).resize((64, 64) ) # make sure here that pndm scheduler skips prk _UpperCAmelCase = StableDiffusionUpscalePipeline( unet=__UpperCamelCase , low_res_scheduler=__UpperCamelCase , scheduler=__UpperCamelCase , vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , max_noise_level=350 , ) _UpperCAmelCase = sd_pipe.to(__UpperCamelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCamelCase ) _UpperCAmelCase = "A painting of a squirrel eating a burger" _UpperCAmelCase = torch.Generator(device=__UpperCamelCase ).manual_seed(0 ) _UpperCAmelCase = sd_pipe( [prompt] , image=__UpperCamelCase , generator=__UpperCamelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , ) _UpperCAmelCase = output.images _UpperCAmelCase = torch.Generator(device=__UpperCamelCase ).manual_seed(0 ) _UpperCAmelCase = sd_pipe( [prompt] , image=__UpperCamelCase , generator=__UpperCamelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , return_dict=__UpperCamelCase , )[0] _UpperCAmelCase = image[0, -3:, -3:, -1] _UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1] _UpperCAmelCase = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) _UpperCAmelCase = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def UpperCAmelCase__ ( self : str ): _UpperCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator _UpperCAmelCase = self.dummy_cond_unet_upscale _UpperCAmelCase = DDPMScheduler() _UpperCAmelCase = DDIMScheduler(prediction_type="v_prediction" ) _UpperCAmelCase = self.dummy_vae _UpperCAmelCase = self.dummy_text_encoder _UpperCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) _UpperCAmelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] _UpperCAmelCase = Image.fromarray(np.uinta(__UpperCamelCase ) ).convert("RGB" ).resize((64, 64) ) # make sure here that pndm scheduler skips prk _UpperCAmelCase = StableDiffusionUpscalePipeline( unet=__UpperCamelCase , low_res_scheduler=__UpperCamelCase , scheduler=__UpperCamelCase , vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , max_noise_level=350 , ) _UpperCAmelCase = sd_pipe.to(__UpperCamelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCamelCase ) _UpperCAmelCase = "A painting of a squirrel eating a burger" _UpperCAmelCase = sd_pipe( 2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , ) _UpperCAmelCase = output.images assert image.shape[0] == 2 _UpperCAmelCase = torch.Generator(device=__UpperCamelCase ).manual_seed(0 ) _UpperCAmelCase = sd_pipe( [prompt] , image=__UpperCamelCase , generator=__UpperCamelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , ) _UpperCAmelCase = output.images assert image.shape[0] == 2 @unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" ) def UpperCAmelCase__ ( self : Dict ): _UpperCAmelCase = self.dummy_cond_unet_upscale _UpperCAmelCase = DDPMScheduler() _UpperCAmelCase = DDIMScheduler(prediction_type="v_prediction" ) _UpperCAmelCase = self.dummy_vae _UpperCAmelCase = self.dummy_text_encoder _UpperCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) _UpperCAmelCase = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] _UpperCAmelCase = Image.fromarray(np.uinta(__UpperCamelCase ) ).convert("RGB" ).resize((64, 64) ) # put models in fp16, except vae as it overflows in fp16 _UpperCAmelCase = unet.half() _UpperCAmelCase = text_encoder.half() # make sure here that pndm scheduler skips prk _UpperCAmelCase = StableDiffusionUpscalePipeline( unet=__UpperCamelCase , low_res_scheduler=__UpperCamelCase , scheduler=__UpperCamelCase , vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , max_noise_level=350 , ) _UpperCAmelCase = sd_pipe.to(__UpperCamelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCamelCase ) _UpperCAmelCase = "A painting of a squirrel eating a burger" _UpperCAmelCase = torch.manual_seed(0 ) _UpperCAmelCase = sd_pipe( [prompt] , image=__UpperCamelCase , generator=__UpperCamelCase , num_inference_steps=2 , output_type="np" , ).images _UpperCAmelCase = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) @slow @require_torch_gpu class __SCREAMING_SNAKE_CASE ( unittest.TestCase): def UpperCAmelCase__ ( self : Union[str, Any] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase__ ( self : Any ): _UpperCAmelCase = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-upscale/low_res_cat.png" ) _UpperCAmelCase = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale" "/upsampled_cat.npy" ) _UpperCAmelCase = "stabilityai/stable-diffusion-x4-upscaler" _UpperCAmelCase = StableDiffusionUpscalePipeline.from_pretrained(__UpperCamelCase ) pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) pipe.enable_attention_slicing() _UpperCAmelCase = "a cat sitting on a park bench" _UpperCAmelCase = torch.manual_seed(0 ) _UpperCAmelCase = pipe( prompt=__UpperCamelCase , image=__UpperCamelCase , generator=__UpperCamelCase , output_type="np" , ) _UpperCAmelCase = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 1e-3 def UpperCAmelCase__ ( self : Union[str, Any] ): _UpperCAmelCase = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-upscale/low_res_cat.png" ) _UpperCAmelCase = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale" "/upsampled_cat_fp16.npy" ) _UpperCAmelCase = "stabilityai/stable-diffusion-x4-upscaler" _UpperCAmelCase = StableDiffusionUpscalePipeline.from_pretrained( __UpperCamelCase , torch_dtype=torch.floataa , ) pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) pipe.enable_attention_slicing() _UpperCAmelCase = "a cat sitting on a park bench" _UpperCAmelCase = torch.manual_seed(0 ) _UpperCAmelCase = pipe( prompt=__UpperCamelCase , image=__UpperCamelCase , generator=__UpperCamelCase , output_type="np" , ) _UpperCAmelCase = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 5e-1 def UpperCAmelCase__ ( self : Union[str, Any] ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() _UpperCAmelCase = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-upscale/low_res_cat.png" ) _UpperCAmelCase = "stabilityai/stable-diffusion-x4-upscaler" _UpperCAmelCase = StableDiffusionUpscalePipeline.from_pretrained( __UpperCamelCase , torch_dtype=torch.floataa , ) pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() _UpperCAmelCase = "a cat sitting on a park bench" _UpperCAmelCase = torch.manual_seed(0 ) _UpperCAmelCase = pipe( prompt=__UpperCamelCase , image=__UpperCamelCase , generator=__UpperCamelCase , num_inference_steps=5 , output_type="np" , ) _UpperCAmelCase = torch.cuda.max_memory_allocated() # make sure that less than 2.9 GB is allocated assert mem_bytes < 2.9 * 10**9
684
from operator import delitem, getitem, setitem import pytest from data_structures.hashing.hash_map import HashMap def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]: return getitem, k def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]: return setitem, k, v def __lowerCamelCase ( _lowerCAmelCase ) -> str: return delitem, k def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase ) -> Optional[int]: try: return fun(_lowerCAmelCase , *_lowerCAmelCase ), None except Exception as e: return None, e __lowerCAmelCase = ( _set("key_a", "val_a"), _set("key_b", "val_b"), ) __lowerCAmelCase = [ _set("key_a", "val_a"), _set("key_a", "val_b"), ] __lowerCAmelCase = [ _set("key_a", "val_a"), _set("key_b", "val_b"), _del("key_a"), _del("key_b"), _set("key_a", "val_a"), _del("key_a"), ] __lowerCAmelCase = [ _get("key_a"), _del("key_a"), _set("key_a", "val_a"), _del("key_a"), _del("key_a"), _get("key_a"), ] __lowerCAmelCase = [ *[_set(x, x) for x in range(5)], # guaranteed upsize ] __lowerCAmelCase = [ *[_set(x, x) for x in range(5)], # guaranteed upsize *[_del(x) for x in range(5)], _set("key_a", "val_b"), ] @pytest.mark.parametrize( "operations" , ( pytest.param(_add_items , id="add items" ), pytest.param(_overwrite_items , id="overwrite items" ), pytest.param(_delete_items , id="delete items" ), pytest.param(_access_absent_items , id="access absent items" ), pytest.param(_add_with_resize_up , id="add with resize up" ), pytest.param(_add_with_resize_down , id="add with resize down" ), ) , ) def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]: _UpperCAmelCase = HashMap(initial_block_size=4 ) _UpperCAmelCase = {} for _, (fun, *args) in enumerate(_lowerCAmelCase ): _UpperCAmelCase , _UpperCAmelCase = _run_operation(_lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase ) _UpperCAmelCase , _UpperCAmelCase = _run_operation(_lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase ) assert my_res == py_res assert str(_lowerCAmelCase ) == str(_lowerCAmelCase ) assert set(_lowerCAmelCase ) == set(_lowerCAmelCase ) assert len(_lowerCAmelCase ) == len(_lowerCAmelCase ) assert set(my.items() ) == set(py.items() ) def __lowerCamelCase ( ) -> List[Any]: def is_public(_lowerCAmelCase ) -> bool: return not name.startswith("_" ) _UpperCAmelCase = {name for name in dir({} ) if is_public(_lowerCAmelCase )} _UpperCAmelCase = {name for name in dir(HashMap() ) if is_public(_lowerCAmelCase )} assert dict_public_names > hash_public_names
684
1
from typing import TYPE_CHECKING from ..utils import _LazyModule __lowerCAmelCase = { "config": [ "EXTERNAL_DATA_FORMAT_SIZE_LIMIT", "OnnxConfig", "OnnxConfigWithPast", "OnnxSeq2SeqConfigWithPast", "PatchingSpec", ], "convert": ["export", "validate_model_outputs"], "features": ["FeaturesManager"], "utils": ["ParameterFormat", "compute_serialized_parameters_size"], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
684
def __lowerCamelCase ( _lowerCAmelCase ) -> list: _UpperCAmelCase = len(_lowerCAmelCase ) for i in range(1 , _lowerCAmelCase ): _UpperCAmelCase = collection[i] _UpperCAmelCase = 0 _UpperCAmelCase = i - 1 while low <= high: _UpperCAmelCase = (low + high) // 2 if val < collection[mid]: _UpperCAmelCase = mid - 1 else: _UpperCAmelCase = mid + 1 for j in range(_lowerCAmelCase , _lowerCAmelCase , -1 ): _UpperCAmelCase = collection[j - 1] _UpperCAmelCase = val return collection if __name__ == "__main__": __lowerCAmelCase = input("Enter numbers separated by a comma:\n").strip() __lowerCAmelCase = [int(item) for item in user_input.split(",")] print(binary_insertion_sort(unsorted))
684
1
import os import random import sys from . import cryptomath_module as cryptomath from . import rabin_miller __lowerCAmelCase = 3 def __lowerCamelCase ( _lowerCAmelCase ) -> int: print("Generating primitive root of p" ) while True: _UpperCAmelCase = random.randrange(3 , _lowerCAmelCase ) if pow(_lowerCAmelCase , 2 , _lowerCAmelCase ) == 1: continue if pow(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) == 1: continue return g def __lowerCamelCase ( _lowerCAmelCase ) -> tuple[tuple[int, int, int, int], tuple[int, int]]: print("Generating prime p..." ) _UpperCAmelCase = rabin_miller.generate_large_prime(_lowerCAmelCase ) # select large prime number. _UpperCAmelCase = primitive_root(_lowerCAmelCase ) # one primitive root on modulo p. _UpperCAmelCase = random.randrange(3 , _lowerCAmelCase ) # private_key -> have to be greater than 2 for safety. _UpperCAmelCase = cryptomath.find_mod_inverse(pow(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase ) _UpperCAmelCase = (key_size, e_a, e_a, p) _UpperCAmelCase = (key_size, d) return public_key, private_key def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> None: if os.path.exists(F'''{name}_pubkey.txt''' ) or os.path.exists(F'''{name}_privkey.txt''' ): print("\nWARNING:" ) print( F'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n''' "Use a different name or delete these files and re-run this program." ) sys.exit() _UpperCAmelCase , _UpperCAmelCase = generate_key(_lowerCAmelCase ) print(F'''\nWriting public key to file {name}_pubkey.txt...''' ) with open(F'''{name}_pubkey.txt''' , "w" ) as fo: fo.write(F'''{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}''' ) print(F'''Writing private key to file {name}_privkey.txt...''' ) with open(F'''{name}_privkey.txt''' , "w" ) as fo: fo.write(F'''{private_key[0]},{private_key[1]}''' ) def __lowerCamelCase ( ) -> None: print("Making key files..." ) make_key_files("elgamal" , 2_048 ) print("Key files generation successful" ) if __name__ == "__main__": main()
684
__lowerCAmelCase = 2_5_6 # Modulus to hash a string __lowerCAmelCase = 1_0_0_0_0_0_3 def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> bool: _UpperCAmelCase = len(_lowerCAmelCase ) _UpperCAmelCase = len(_lowerCAmelCase ) if p_len > t_len: return False _UpperCAmelCase = 0 _UpperCAmelCase = 0 _UpperCAmelCase = 1 # Calculating the hash of pattern and substring of text for i in range(_lowerCAmelCase ): _UpperCAmelCase = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus _UpperCAmelCase = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue _UpperCAmelCase = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash _UpperCAmelCase = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def __lowerCamelCase ( ) -> None: _UpperCAmelCase = "abc1abc12" _UpperCAmelCase = "alskfjaldsabc1abc1abc12k23adsfabcabc" _UpperCAmelCase = "alskfjaldsk23adsfabcabc" assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase ) and not rabin_karp(_lowerCAmelCase , _lowerCAmelCase ) # Test 2) _UpperCAmelCase = "ABABX" _UpperCAmelCase = "ABABZABABYABABX" assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase ) # Test 3) _UpperCAmelCase = "AAAB" _UpperCAmelCase = "ABAAAAAB" assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase ) # Test 4) _UpperCAmelCase = "abcdabcy" _UpperCAmelCase = "abcxabcdabxabcdabcdabcy" assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase ) # Test 5) _UpperCAmelCase = "Lü" _UpperCAmelCase = "Lüsai" assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase ) _UpperCAmelCase = "Lue" assert not rabin_karp(_lowerCAmelCase , _lowerCAmelCase ) print("Success." ) if __name__ == "__main__": test_rabin_karp()
684
1
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = {"vocab_file": "spm_char.model"} __lowerCAmelCase = { "vocab_file": { "microsoft/speecht5_asr": "https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model", "microsoft/speecht5_tts": "https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model", "microsoft/speecht5_vc": "https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model", } } __lowerCAmelCase = { "microsoft/speecht5_asr": 1_0_2_4, "microsoft/speecht5_tts": 1_0_2_4, "microsoft/speecht5_vc": 1_0_2_4, } class __SCREAMING_SNAKE_CASE ( lowercase): __SCREAMING_SNAKE_CASE : int = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE : int = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE : str = ["""input_ids""", """attention_mask"""] def __init__( self : Any , __UpperCamelCase : Any , __UpperCamelCase : Tuple="<s>" , __UpperCamelCase : str="</s>" , __UpperCamelCase : Any="<unk>" , __UpperCamelCase : Dict="<pad>" , __UpperCamelCase : Optional[Dict[str, Any]] = None , **__UpperCamelCase : Optional[int] , ): _UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , ) _UpperCAmelCase = vocab_file _UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__UpperCamelCase ) @property def UpperCAmelCase__ ( self : str ): return self.sp_model.get_piece_size() def UpperCAmelCase__ ( self : Tuple ): _UpperCAmelCase = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Any ): _UpperCAmelCase = self.__dict__.copy() _UpperCAmelCase = None return state def __setstate__( self : Optional[Any] , __UpperCamelCase : List[str] ): _UpperCAmelCase = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): _UpperCAmelCase = {} _UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCAmelCase__ ( self : int , __UpperCamelCase : str ): return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase ) def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Optional[Any] ): return self.sp_model.piece_to_id(__UpperCamelCase ) def UpperCAmelCase__ ( self : Any , __UpperCamelCase : Optional[int] ): _UpperCAmelCase = self.sp_model.IdToPiece(__UpperCamelCase ) return token def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : Union[str, Any] ): _UpperCAmelCase = [] _UpperCAmelCase = "" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(__UpperCamelCase ) + token _UpperCAmelCase = [] else: current_sub_tokens.append(__UpperCamelCase ) out_string += self.sp_model.decode(__UpperCamelCase ) return out_string.strip() def UpperCAmelCase__ ( self : int , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any]=None ): if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None , __UpperCamelCase : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase ) _UpperCAmelCase = [1] if token_ids_a is None: return ([0] * len(__UpperCamelCase )) + suffix_ones return ([0] * len(__UpperCamelCase )) + ([0] * len(__UpperCamelCase )) + suffix_ones def UpperCAmelCase__ ( self : int , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ): if not os.path.isdir(__UpperCamelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return _UpperCAmelCase = os.path.join( __UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __UpperCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCamelCase , "wb" ) as fi: _UpperCAmelCase = self.sp_model.serialized_model_proto() fi.write(__UpperCamelCase ) return (out_vocab_file,)
684
import itertools import os import random import tempfile import unittest import numpy as np from datasets import load_dataset from transformers import is_speech_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import WhisperFeatureExtractor if is_torch_available(): import torch __lowerCAmelCase = random.Random() def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=1.0 , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> List[str]: if rng is None: _UpperCAmelCase = global_rng _UpperCAmelCase = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class __SCREAMING_SNAKE_CASE ( unittest.TestCase): def __init__( self : Optional[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any]=7 , __UpperCamelCase : Union[str, Any]=400 , __UpperCamelCase : List[Any]=2_000 , __UpperCamelCase : Optional[Any]=10 , __UpperCamelCase : Optional[int]=160 , __UpperCamelCase : Any=8 , __UpperCamelCase : List[Any]=0.0 , __UpperCamelCase : Dict=4_000 , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : Tuple=True , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = min_seq_length _UpperCAmelCase = max_seq_length _UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) _UpperCAmelCase = padding_value _UpperCAmelCase = sampling_rate _UpperCAmelCase = return_attention_mask _UpperCAmelCase = do_normalize _UpperCAmelCase = feature_size _UpperCAmelCase = chunk_length _UpperCAmelCase = hop_length def UpperCAmelCase__ ( self : Optional[Any] ): return { "feature_size": self.feature_size, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : Tuple=False , __UpperCamelCase : Dict=False ): def _flatten(__UpperCamelCase : Any ): return list(itertools.chain(*__UpperCamelCase ) ) if equal_length: _UpperCAmelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size _UpperCAmelCase = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: _UpperCAmelCase = [np.asarray(__UpperCamelCase ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class __SCREAMING_SNAKE_CASE ( lowercase , unittest.TestCase): __SCREAMING_SNAKE_CASE : str = WhisperFeatureExtractor if is_speech_available() else None def UpperCAmelCase__ ( self : str ): _UpperCAmelCase = WhisperFeatureExtractionTester(self ) def UpperCAmelCase__ ( self : str ): _UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _UpperCAmelCase = feat_extract_first.save_pretrained(__UpperCamelCase )[0] check_json_file_has_correct_format(__UpperCamelCase ) _UpperCAmelCase = self.feature_extraction_class.from_pretrained(__UpperCamelCase ) _UpperCAmelCase = feat_extract_first.to_dict() _UpperCAmelCase = feat_extract_second.to_dict() _UpperCAmelCase = feat_extract_first.mel_filters _UpperCAmelCase = feat_extract_second.mel_filters self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase ) ) self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def UpperCAmelCase__ ( self : Union[str, Any] ): _UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _UpperCAmelCase = os.path.join(__UpperCamelCase , "feat_extract.json" ) feat_extract_first.to_json_file(__UpperCamelCase ) _UpperCAmelCase = self.feature_extraction_class.from_json_file(__UpperCamelCase ) _UpperCAmelCase = feat_extract_first.to_dict() _UpperCAmelCase = feat_extract_second.to_dict() _UpperCAmelCase = feat_extract_first.mel_filters _UpperCAmelCase = feat_extract_second.mel_filters self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase ) ) self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def UpperCAmelCase__ ( self : int ): # Tests that all call wrap to encode_plus and batch_encode_plus _UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 _UpperCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] _UpperCAmelCase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs] # Test feature size _UpperCAmelCase = feature_extractor(__UpperCamelCase , padding="max_length" , return_tensors="np" ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames ) self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size ) # Test not batched input _UpperCAmelCase = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features _UpperCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) ) # Test batched _UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features _UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ): self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. _UpperCAmelCase = [floats_list((1, x) )[0] for x in (800, 800, 800)] _UpperCAmelCase = np.asarray(__UpperCamelCase ) _UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features _UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ): self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) ) # Test truncation required _UpperCAmelCase = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )] _UpperCAmelCase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs] _UpperCAmelCase = [x[: feature_extractor.n_samples] for x in speech_inputs] _UpperCAmelCase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs_truncated] _UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features _UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ): self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) ) def UpperCAmelCase__ ( self : Union[str, Any] ): import torch _UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) _UpperCAmelCase = np.random.rand(100 , 32 ).astype(np.floataa ) _UpperCAmelCase = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: _UpperCAmelCase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) _UpperCAmelCase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : Tuple ): _UpperCAmelCase = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" ) # automatic decoding with librispeech _UpperCAmelCase = ds.sort("id" ).select(range(__UpperCamelCase ) )[:num_samples]["audio"] return [x["array"] for x in speech_samples] def UpperCAmelCase__ ( self : Tuple ): # fmt: off _UpperCAmelCase = torch.tensor( [ 0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951, 0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678, 0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554, -0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854 ] ) # fmt: on _UpperCAmelCase = self._load_datasamples(1 ) _UpperCAmelCase = WhisperFeatureExtractor() _UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="pt" ).input_features self.assertEqual(input_features.shape , (1, 80, 3_000) ) self.assertTrue(torch.allclose(input_features[0, 0, :30] , __UpperCamelCase , atol=1e-4 ) ) def UpperCAmelCase__ ( self : Optional[Any] ): _UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) _UpperCAmelCase = self._load_datasamples(1 )[0] _UpperCAmelCase = ((audio - audio.min()) / (audio.max() - audio.min())) * 65_535 # Rescale to [0, 65535] to show issue _UpperCAmelCase = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__UpperCamelCase )[0] self.assertTrue(np.all(np.mean(__UpperCamelCase ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(__UpperCamelCase ) - 1 ) < 1e-3 ) )
684
1
from __future__ import annotations def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None ) -> None: if start is None: _UpperCAmelCase = 0 if end is None: _UpperCAmelCase = len(_lowerCAmelCase ) - 1 if start >= end: return _UpperCAmelCase = (start + end) // 2 slowsort(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) slowsort(_lowerCAmelCase , mid + 1 , _lowerCAmelCase ) if sequence[end] < sequence[mid]: _UpperCAmelCase , _UpperCAmelCase = sequence[mid], sequence[end] slowsort(_lowerCAmelCase , _lowerCAmelCase , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
684
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..utils import cached_file # docstyle-ignore __lowerCAmelCase = "\nHuman: <<task>>\n\nAssistant: " __lowerCAmelCase = "huggingface-tools/default-prompts" __lowerCAmelCase = {"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"} def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase="run" ) -> Union[str, Any]: if prompt_or_repo_id is None: _UpperCAmelCase = DEFAULT_PROMPTS_REPO # prompt is considered a repo ID when it does not contain any kind of space if re.search("\\s" , _lowerCAmelCase ) is not None: return prompt_or_repo_id _UpperCAmelCase = cached_file( _lowerCAmelCase , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} ) with open(_lowerCAmelCase , "r" , encoding="utf-8" ) as f: return f.read()
684
1
import logging import sys from dataclasses import dataclass, field from typing import Any, Dict, List, Optional, Union import librosa import torch from datasets import DatasetDict, load_dataset from packaging import version from torch import nn from transformers import ( HfArgumentParser, Trainer, TrainingArguments, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForPreTraining, is_apex_available, trainer_utils, ) from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices if is_apex_available(): from apex import amp if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"): __lowerCAmelCase = True from torch.cuda.amp import autocast __lowerCAmelCase = logging.getLogger(__name__) @dataclass class __SCREAMING_SNAKE_CASE : __SCREAMING_SNAKE_CASE : str = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""}) __SCREAMING_SNAKE_CASE : Optional[str] = field( default=lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) __SCREAMING_SNAKE_CASE : Optional[bool] = field( default=lowercase , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""}) __SCREAMING_SNAKE_CASE : Optional[bool] = field( default=lowercase , metadata={"""help""": """Whether to log verbose messages or not."""} , ) __SCREAMING_SNAKE_CASE : Optional[float] = field( default=2.0 , metadata={"""help""": """Maximum temperature for gumbel softmax."""}) __SCREAMING_SNAKE_CASE : Optional[float] = field( default=0.5 , metadata={"""help""": """Minimum temperature for gumbel softmax."""}) __SCREAMING_SNAKE_CASE : Optional[float] = field( default=0.99_99_95 , metadata={"""help""": """Decay of gumbel temperature during training."""}) def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]: logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) _UpperCAmelCase = logging.WARNING if model_args.verbose_logging: _UpperCAmelCase = logging.DEBUG elif trainer_utils.is_main_process(training_args.local_rank ): _UpperCAmelCase = logging.INFO logger.setLevel(_lowerCAmelCase ) @dataclass class __SCREAMING_SNAKE_CASE : __SCREAMING_SNAKE_CASE : str = field( default=lowercase , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""}) __SCREAMING_SNAKE_CASE : Optional[str] = field( default=lowercase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""}) __SCREAMING_SNAKE_CASE : Optional[str] = field( default="""train""" , metadata={ """help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'""" } , ) __SCREAMING_SNAKE_CASE : Optional[str] = field( default="""validation""" , metadata={ """help""": ( """The name of the validation data set split to use (via the datasets library). Defaults to 'validation'""" ) } , ) __SCREAMING_SNAKE_CASE : Optional[str] = field( default="""file""" , metadata={"""help""": """Column in the dataset that contains speech file path. Defaults to 'file'"""} , ) __SCREAMING_SNAKE_CASE : bool = field( default=lowercase , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""}) __SCREAMING_SNAKE_CASE : Optional[int] = field( default=1 , metadata={ """help""": """The percentage of the train set used as validation set in case there's no validation split""" } , ) __SCREAMING_SNAKE_CASE : Optional[int] = field( default=lowercase , metadata={"""help""": """The number of processes to use for the preprocessing."""} , ) __SCREAMING_SNAKE_CASE : Optional[float] = field( default=20.0 , metadata={"""help""": """Filter audio files that are longer than `max_duration_in_seconds` seconds"""}) @dataclass class __SCREAMING_SNAKE_CASE : __SCREAMING_SNAKE_CASE : WavaVecaForPreTraining __SCREAMING_SNAKE_CASE : WavaVecaFeatureExtractor __SCREAMING_SNAKE_CASE : Union[bool, str] = "longest" __SCREAMING_SNAKE_CASE : Optional[int] = None __SCREAMING_SNAKE_CASE : Optional[int] = None def __call__( self : Optional[int] , __UpperCamelCase : List[Dict[str, Union[List[int], torch.Tensor]]] ): # reformat list to dict and set to pytorch format _UpperCAmelCase = self.feature_extractor.pad( __UpperCamelCase , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , ) _UpperCAmelCase = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1] ) _UpperCAmelCase = batch["input_values"].shape[0] # make sure that no loss is computed on padded inputs if batch["attention_mask"] is not None: # compute real output lengths according to convolution formula _UpperCAmelCase = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1 ) ).to( torch.long ) _UpperCAmelCase = torch.zeros( (batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch["input_values"].device ) # these two operations makes sure that all values # before the output lengths indices are attended to _UpperCAmelCase = 1 _UpperCAmelCase = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool() # sample randomly masked indices _UpperCAmelCase = _compute_mask_indices( (batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=__UpperCamelCase , min_masks=2 , ) return batch class __SCREAMING_SNAKE_CASE ( lowercase): def __init__( self : int , *__UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any]=1 , __UpperCamelCase : int=0 , __UpperCamelCase : str=1.0 , **__UpperCamelCase : Union[str, Any] ): super().__init__(*__UpperCamelCase , **__UpperCamelCase ) _UpperCAmelCase = 0 _UpperCAmelCase = max_gumbel_temp _UpperCAmelCase = min_gumbel_temp _UpperCAmelCase = gumbel_temp_decay def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : nn.Module , __UpperCamelCase : Dict[str, Union[torch.Tensor, Any]] ): model.train() _UpperCAmelCase = self._prepare_inputs(__UpperCamelCase ) if self.use_amp: with autocast(): _UpperCAmelCase = self.compute_loss(__UpperCamelCase , __UpperCamelCase ) else: _UpperCAmelCase = self.compute_loss(__UpperCamelCase , __UpperCamelCase ) if self.args.n_gpu > 1 or self.deepspeed: if model.module.config.ctc_loss_reduction == "mean": _UpperCAmelCase = loss.mean() elif model.module.config.ctc_loss_reduction == "sum": _UpperCAmelCase = loss.sum() / (inputs["mask_time_indices"]).sum() else: raise ValueError(F'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' ) if self.args.gradient_accumulation_steps > 1: _UpperCAmelCase = loss / self.args.gradient_accumulation_steps if self.use_amp: self.scaler.scale(__UpperCamelCase ).backward() elif self.use_apex: with amp.scale_loss(__UpperCamelCase , self.optimizer ) as scaled_loss: scaled_loss.backward() elif self.deepspeed: self.deepspeed.backward(__UpperCamelCase ) else: loss.backward() self.num_update_step += 1 # make sure gumbel softmax temperature is decayed if self.args.n_gpu > 1 or self.deepspeed: model.module.set_gumbel_temperature( max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) ) else: model.set_gumbel_temperature( max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) ) return loss.detach() def __lowerCamelCase ( ) -> Optional[int]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses() configure_logger(_lowerCAmelCase , _lowerCAmelCase ) # Downloading and loading a dataset from the hub. _UpperCAmelCase = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) if "validation" not in datasets.keys(): # make sure only "validation" and "train" keys remain" _UpperCAmelCase = DatasetDict() _UpperCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[:{data_args.validation_split_percentage}%]''' , cache_dir=model_args.cache_dir , ) _UpperCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[{data_args.validation_split_percentage}%:]''' , cache_dir=model_args.cache_dir , ) else: # make sure only "validation" and "train" keys remain" _UpperCAmelCase = DatasetDict() _UpperCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , ) _UpperCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}''' , cache_dir=model_args.cache_dir , ) # only normalized-inputs-training is supported _UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained( model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_lowerCAmelCase ) def prepare_dataset(_lowerCAmelCase ): # check that all files have the correct sampling rate _UpperCAmelCase , _UpperCAmelCase = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate ) return batch # load audio files into numpy arrays _UpperCAmelCase = datasets.map( _lowerCAmelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names ) # filter audio files that are too long _UpperCAmelCase = vectorized_datasets.filter( lambda _lowerCAmelCase : len(data["speech"] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) ) def normalize(_lowerCAmelCase ): return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate ) # normalize and transform to `BatchFeatures` _UpperCAmelCase = vectorized_datasets.map( _lowerCAmelCase , batched=_lowerCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , ) # pretraining is only supported for "newer" stable layer norm architecture # apply_spec_augment has to be True, mask_feature_prob has to be 0.0 _UpperCAmelCase = WavaVecaConfig.from_pretrained( model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , ) if not config.do_stable_layer_norm or config.feat_extract_norm != "layer": raise ValueError( "PreTraining is only supported for ``config.do_stable_layer_norm=True`` and" " ``config.feat_extract_norm='layer'" ) _UpperCAmelCase = WavaVecaForPreTraining(_lowerCAmelCase ) _UpperCAmelCase = DataCollatorForWavaVecaPretraining(model=_lowerCAmelCase , feature_extractor=_lowerCAmelCase ) _UpperCAmelCase = WavaVecaPreTrainer( model=_lowerCAmelCase , data_collator=_lowerCAmelCase , args=_lowerCAmelCase , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=_lowerCAmelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , ) trainer.train() if __name__ == "__main__": main()
684
from itertools import permutations def __lowerCamelCase ( _lowerCAmelCase ) -> bool: if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False _UpperCAmelCase = [7, 11, 13, 17] for i, test in enumerate(_lowerCAmelCase ): if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def __lowerCamelCase ( _lowerCAmelCase = 10 ) -> int: return sum( int("".join(map(_lowerCAmelCase , _lowerCAmelCase ) ) ) for num in permutations(range(_lowerCAmelCase ) ) if is_substring_divisible(_lowerCAmelCase ) ) if __name__ == "__main__": print(F'''{solution() = }''')
684
1
import re import string import numpy as np import datasets __lowerCAmelCase = "\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n" __lowerCAmelCase = "\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n" __lowerCAmelCase = "\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class __SCREAMING_SNAKE_CASE ( datasets.Metric): def UpperCAmelCase__ ( self : Union[str, Any] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , reference_urls=[] , ) def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : Tuple , __UpperCamelCase : int , __UpperCamelCase : Dict=None , __UpperCamelCase : Optional[Any]=False , __UpperCamelCase : Union[str, Any]=False , __UpperCamelCase : Union[str, Any]=False , ): if regexes_to_ignore is not None: for s in regexes_to_ignore: _UpperCAmelCase = np.array([re.sub(__UpperCamelCase , "" , __UpperCamelCase ) for x in predictions] ) _UpperCAmelCase = np.array([re.sub(__UpperCamelCase , "" , __UpperCamelCase ) for x in references] ) else: _UpperCAmelCase = np.asarray(__UpperCamelCase ) _UpperCAmelCase = np.asarray(__UpperCamelCase ) if ignore_case: _UpperCAmelCase = np.char.lower(__UpperCamelCase ) _UpperCAmelCase = np.char.lower(__UpperCamelCase ) if ignore_punctuation: _UpperCAmelCase = string.punctuation.maketrans("" , "" , string.punctuation ) _UpperCAmelCase = np.char.translate(__UpperCamelCase , table=__UpperCamelCase ) _UpperCAmelCase = np.char.translate(__UpperCamelCase , table=__UpperCamelCase ) if ignore_numbers: _UpperCAmelCase = string.digits.maketrans("" , "" , string.digits ) _UpperCAmelCase = np.char.translate(__UpperCamelCase , table=__UpperCamelCase ) _UpperCAmelCase = np.char.translate(__UpperCamelCase , table=__UpperCamelCase ) _UpperCAmelCase = predictions == references return {"exact_match": np.mean(__UpperCamelCase ) * 100}
684
import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot import BlenderbotTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = { "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } __lowerCAmelCase = { "vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"}, "merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"}, "tokenizer_config_file": { "facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json" }, } __lowerCAmelCase = {"facebook/blenderbot-3B": 1_2_8} class __SCREAMING_SNAKE_CASE ( lowercase): __SCREAMING_SNAKE_CASE : Optional[int] = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE : str = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE : List[Any] = ["""input_ids""", """attention_mask"""] __SCREAMING_SNAKE_CASE : List[str] = BlenderbotTokenizer def __init__( self : Tuple , __UpperCamelCase : List[str]=None , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : List[str]=None , __UpperCamelCase : Union[str, Any]="replace" , __UpperCamelCase : Tuple="<s>" , __UpperCamelCase : str="</s>" , __UpperCamelCase : Dict="</s>" , __UpperCamelCase : Union[str, Any]="<s>" , __UpperCamelCase : Union[str, Any]="<unk>" , __UpperCamelCase : Tuple="<pad>" , __UpperCamelCase : Optional[int]="<mask>" , __UpperCamelCase : Union[str, Any]=False , __UpperCamelCase : List[str]=True , **__UpperCamelCase : int , ): super().__init__( __UpperCamelCase , __UpperCamelCase , tokenizer_file=__UpperCamelCase , errors=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , trim_offsets=__UpperCamelCase , **__UpperCamelCase , ) _UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , __UpperCamelCase ) != add_prefix_space: _UpperCAmelCase = getattr(__UpperCamelCase , pre_tok_state.pop("type" ) ) _UpperCAmelCase = add_prefix_space _UpperCAmelCase = pre_tok_class(**__UpperCamelCase ) _UpperCAmelCase = add_prefix_space _UpperCAmelCase = "post_processor" _UpperCAmelCase = getattr(self.backend_tokenizer , __UpperCamelCase , __UpperCamelCase ) if tokenizer_component_instance: _UpperCAmelCase = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: _UpperCAmelCase = tuple(state["sep"] ) if "cls" in state: _UpperCAmelCase = tuple(state["cls"] ) _UpperCAmelCase = False if state.get("add_prefix_space" , __UpperCamelCase ) != add_prefix_space: _UpperCAmelCase = add_prefix_space _UpperCAmelCase = True if state.get("trim_offsets" , __UpperCamelCase ) != trim_offsets: _UpperCAmelCase = trim_offsets _UpperCAmelCase = True if changes_to_apply: _UpperCAmelCase = getattr(__UpperCamelCase , state.pop("type" ) ) _UpperCAmelCase = component_class(**__UpperCamelCase ) setattr(self.backend_tokenizer , __UpperCamelCase , __UpperCamelCase ) @property # Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot def UpperCAmelCase__ ( self : Union[str, Any] ): if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Union[str, Any] ): _UpperCAmelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else value _UpperCAmelCase = value def UpperCAmelCase__ ( self : int , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : List[Any] ): _UpperCAmelCase = kwargs.get("is_split_into_words" , __UpperCamelCase ) assert self.add_prefix_space or not is_split_into_words, ( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__UpperCamelCase , **__UpperCamelCase ) def UpperCAmelCase__ ( self : Tuple , *__UpperCamelCase : int , **__UpperCamelCase : Union[str, Any] ): _UpperCAmelCase = kwargs.get("is_split_into_words" , __UpperCamelCase ) assert self.add_prefix_space or not is_split_into_words, ( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*__UpperCamelCase , **__UpperCamelCase ) def UpperCAmelCase__ ( self : str , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ): _UpperCAmelCase = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase ) return tuple(__UpperCamelCase ) def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ): _UpperCAmelCase = [self.sep_token_id] _UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ): return token_ids_a + [self.eos_token_id] def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : "Conversation" ): _UpperCAmelCase = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(" " + text ) else: # Generated responses should contain them already. inputs.append(__UpperCamelCase ) _UpperCAmelCase = " ".join(__UpperCamelCase ) _UpperCAmelCase = self.encode(__UpperCamelCase ) if len(__UpperCamelCase ) > self.model_max_length: _UpperCAmelCase = input_ids[-self.model_max_length :] logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' ) return input_ids
684
1
import argparse import logging import os import re import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, DataCollatorForLanguageModeling, PushToHubCallback, TFAutoModelForMaskedLM, create_optimizer, ) __lowerCAmelCase = logging.getLogger(__name__) __lowerCAmelCase = tf.data.AUTOTUNE def __lowerCamelCase ( ) -> str: _UpperCAmelCase = argparse.ArgumentParser(description="Train a masked language model on TPU." ) parser.add_argument( "--pretrained_model_config" , type=_lowerCAmelCase , default="roberta-base" , help="The model config to use. Note that we don't copy the model's weights, only the config!" , ) parser.add_argument( "--tokenizer" , type=_lowerCAmelCase , default="unigram-tokenizer-wikitext" , help="The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size." , ) parser.add_argument( "--per_replica_batch_size" , type=_lowerCAmelCase , default=8 , help="Batch size per TPU core." , ) parser.add_argument( "--no_tpu" , action="store_true" , help="If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances." , ) parser.add_argument( "--tpu_name" , type=_lowerCAmelCase , help="Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs." , default="local" , ) parser.add_argument( "--tpu_zone" , type=_lowerCAmelCase , help="Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes." , ) parser.add_argument( "--gcp_project" , type=_lowerCAmelCase , help="Google cloud project name. Only used for non-Colab TPU nodes." ) parser.add_argument( "--bfloat16" , action="store_true" , help="Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU." , ) parser.add_argument( "--train_dataset" , type=_lowerCAmelCase , help="Path to training dataset to load. If the path begins with `gs://`" " then the dataset will be loaded from a Google Cloud Storage bucket." , ) parser.add_argument( "--shuffle_buffer_size" , type=_lowerCAmelCase , default=2**18 , help="Size of the shuffle buffer (in samples)" , ) parser.add_argument( "--eval_dataset" , type=_lowerCAmelCase , help="Path to evaluation dataset to load. If the path begins with `gs://`" " then the dataset will be loaded from a Google Cloud Storage bucket." , ) parser.add_argument( "--num_epochs" , type=_lowerCAmelCase , default=1 , help="Number of epochs to train for." , ) parser.add_argument( "--learning_rate" , type=_lowerCAmelCase , default=1E-4 , help="Learning rate to use for training." , ) parser.add_argument( "--weight_decay_rate" , type=_lowerCAmelCase , default=1E-3 , help="Weight decay rate to use for training." , ) parser.add_argument( "--max_length" , type=_lowerCAmelCase , default=512 , help="Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py" , ) parser.add_argument( "--mlm_probability" , type=_lowerCAmelCase , default=0.15 , help="Fraction of tokens to mask during training." , ) parser.add_argument("--output_dir" , type=_lowerCAmelCase , required=_lowerCAmelCase , help="Path to save model checkpoints to." ) parser.add_argument("--hub_model_id" , type=_lowerCAmelCase , help="Model ID to upload to on the Hugging Face Hub." ) _UpperCAmelCase = parser.parse_args() return args def __lowerCamelCase ( _lowerCAmelCase ) -> Optional[Any]: try: if args.tpu_name: _UpperCAmelCase = tf.distribute.cluster_resolver.TPUClusterResolver( args.tpu_name , zone=args.tpu_zone , project=args.gcp_project ) else: _UpperCAmelCase = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: raise RuntimeError( "Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or " "--gcp_project. When running on a TPU VM, use --tpu_name local." ) tf.config.experimental_connect_to_cluster(_lowerCAmelCase ) tf.tpu.experimental.initialize_tpu_system(_lowerCAmelCase ) return tpu def __lowerCamelCase ( _lowerCAmelCase ) -> Optional[Any]: _UpperCAmelCase = 0 for file in file_list: _UpperCAmelCase = file.split("/" )[-1] _UpperCAmelCase = re.search(r"-\d+-(\d+)\.tfrecord" , _lowerCAmelCase ).group(1 ) _UpperCAmelCase = int(_lowerCAmelCase ) num_samples += sample_count return num_samples def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ) -> Optional[Any]: _UpperCAmelCase = count_samples(_lowerCAmelCase ) _UpperCAmelCase = tf.data.Dataset.from_tensor_slices(_lowerCAmelCase ) if shuffle: _UpperCAmelCase = dataset.shuffle(len(_lowerCAmelCase ) ) _UpperCAmelCase = tf.data.TFRecordDataset(_lowerCAmelCase , num_parallel_reads=_lowerCAmelCase ) # TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here _UpperCAmelCase = dataset.apply(tf.data.experimental.assert_cardinality(_lowerCAmelCase ) ) _UpperCAmelCase = dataset.map(_lowerCAmelCase , num_parallel_calls=_lowerCAmelCase ) if shuffle: assert shuffle_buffer_size is not None _UpperCAmelCase = dataset.shuffle(args.shuffle_buffer_size ) _UpperCAmelCase = dataset.batch(_lowerCAmelCase , drop_remainder=_lowerCAmelCase ) _UpperCAmelCase = dataset.map(_lowerCAmelCase , num_parallel_calls=_lowerCAmelCase ) _UpperCAmelCase = dataset.prefetch(_lowerCAmelCase ) return dataset def __lowerCamelCase ( _lowerCAmelCase ) -> Dict: if not args.no_tpu: _UpperCAmelCase = initialize_tpu(_lowerCAmelCase ) _UpperCAmelCase = tf.distribute.TPUStrategy(_lowerCAmelCase ) else: _UpperCAmelCase = tf.distribute.OneDeviceStrategy(device="/gpu:0" ) if args.bfloataa: tf.keras.mixed_precision.set_global_policy("mixed_bfloat16" ) _UpperCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer ) _UpperCAmelCase = AutoConfig.from_pretrained(args.pretrained_model_config ) _UpperCAmelCase = tokenizer.vocab_size _UpperCAmelCase = tf.io.gfile.glob(os.path.join(args.train_dataset , "*.tfrecord" ) ) if not training_records: raise ValueError(F'''No .tfrecord files found in {args.train_dataset}.''' ) _UpperCAmelCase = tf.io.gfile.glob(os.path.join(args.eval_dataset , "*.tfrecord" ) ) if not eval_records: raise ValueError(F'''No .tfrecord files found in {args.eval_dataset}.''' ) _UpperCAmelCase = count_samples(_lowerCAmelCase ) _UpperCAmelCase = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync) _UpperCAmelCase = steps_per_epoch * args.num_epochs with strategy.scope(): _UpperCAmelCase = TFAutoModelForMaskedLM.from_config(_lowerCAmelCase ) model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built _UpperCAmelCase , _UpperCAmelCase = create_optimizer( num_train_steps=_lowerCAmelCase , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , ) # Transformers models compute the right loss for their task by default when labels are passed, and will # use this for training unless you specify your own loss function in compile(). model.compile(optimizer=_lowerCAmelCase , metrics=["accuracy"] ) def decode_fn(_lowerCAmelCase ): _UpperCAmelCase = { "input_ids": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), "attention_mask": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ), } return tf.io.parse_single_example(_lowerCAmelCase , _lowerCAmelCase ) # Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can # use their methods in our data pipeline. _UpperCAmelCase = DataCollatorForLanguageModeling( tokenizer=_lowerCAmelCase , mlm_probability=args.mlm_probability , mlm=_lowerCAmelCase , return_tensors="tf" ) def mask_with_collator(_lowerCAmelCase ): # TF really needs an isin() function _UpperCAmelCase = ( ~tf.cast(batch["attention_mask"] , tf.bool ) | (batch["input_ids"] == tokenizer.cls_token_id) | (batch["input_ids"] == tokenizer.sep_token_id) ) _UpperCAmelCase , _UpperCAmelCase = data_collator.tf_mask_tokens( batch["input_ids"] , vocab_size=len(_lowerCAmelCase ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=_lowerCAmelCase , ) return batch _UpperCAmelCase = args.per_replica_batch_size * strategy.num_replicas_in_sync _UpperCAmelCase = prepare_dataset( _lowerCAmelCase , decode_fn=_lowerCAmelCase , mask_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase , shuffle=_lowerCAmelCase , shuffle_buffer_size=args.shuffle_buffer_size , ) _UpperCAmelCase = prepare_dataset( _lowerCAmelCase , decode_fn=_lowerCAmelCase , mask_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase , shuffle=_lowerCAmelCase , ) _UpperCAmelCase = [] if args.hub_model_id: callbacks.append( PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=_lowerCAmelCase ) ) model.fit( _lowerCAmelCase , validation_data=_lowerCAmelCase , epochs=args.num_epochs , callbacks=_lowerCAmelCase , ) model.save_pretrained(args.output_dir ) if __name__ == "__main__": __lowerCAmelCase = parse_args() main(args)
684
import argparse import torch from transformers import ( WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForAudioFrameClassification, WavaVecaForSequenceClassification, WavaVecaForXVector, logging, ) logging.set_verbosity_info() __lowerCAmelCase = logging.get_logger(__name__) def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: _UpperCAmelCase = WavaVecaForSequenceClassification.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase ) _UpperCAmelCase = downstream_dict["projector.weight"] _UpperCAmelCase = downstream_dict["projector.bias"] _UpperCAmelCase = downstream_dict["model.post_net.linear.weight"] _UpperCAmelCase = downstream_dict["model.post_net.linear.bias"] return model def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str: _UpperCAmelCase = WavaVecaForAudioFrameClassification.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase ) _UpperCAmelCase = downstream_dict["model.linear.weight"] _UpperCAmelCase = downstream_dict["model.linear.bias"] return model def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict: _UpperCAmelCase = WavaVecaForXVector.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase ) _UpperCAmelCase = downstream_dict["connector.weight"] _UpperCAmelCase = downstream_dict["connector.bias"] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): _UpperCAmelCase = downstream_dict[ F'''model.framelevel_feature_extractor.module.{i}.kernel.weight''' ] _UpperCAmelCase = downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias'''] _UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"] _UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"] _UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"] _UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"] _UpperCAmelCase = downstream_dict["objective.W"] return model @torch.no_grad() def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict: _UpperCAmelCase = torch.load(_lowerCAmelCase , map_location="cpu" ) _UpperCAmelCase = checkpoint["Downstream"] _UpperCAmelCase = WavaVecaConfig.from_pretrained(_lowerCAmelCase ) _UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained( _lowerCAmelCase , return_attention_mask=_lowerCAmelCase , do_normalize=_lowerCAmelCase ) _UpperCAmelCase = hf_config.architectures[0] if arch.endswith("ForSequenceClassification" ): _UpperCAmelCase = convert_classification(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) elif arch.endswith("ForAudioFrameClassification" ): _UpperCAmelCase = convert_diarization(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) elif arch.endswith("ForXVector" ): _UpperCAmelCase = convert_xvector(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) else: raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''' ) if hf_config.use_weighted_layer_sum: _UpperCAmelCase = checkpoint["Featurizer"]["weights"] hf_feature_extractor.save_pretrained(_lowerCAmelCase ) hf_model.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() parser.add_argument( "--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model." ) parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.") parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.") __lowerCAmelCase = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
684
1
from collections import OrderedDict from typing import List, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = { "google/efficientnet-b7": "https://huggingface.co/google/efficientnet-b7/resolve/main/config.json", } class __SCREAMING_SNAKE_CASE ( lowercase): __SCREAMING_SNAKE_CASE : Optional[int] = """efficientnet""" def __init__( self : List[Any] , __UpperCamelCase : int = 3 , __UpperCamelCase : int = 600 , __UpperCamelCase : float = 2.0 , __UpperCamelCase : float = 3.1 , __UpperCamelCase : int = 8 , __UpperCamelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , __UpperCamelCase : List[int] = [32, 16, 24, 40, 80, 112, 192] , __UpperCamelCase : List[int] = [16, 24, 40, 80, 112, 192, 320] , __UpperCamelCase : List[int] = [] , __UpperCamelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , __UpperCamelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , __UpperCamelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , __UpperCamelCase : float = 0.25 , __UpperCamelCase : str = "swish" , __UpperCamelCase : int = 2_560 , __UpperCamelCase : str = "mean" , __UpperCamelCase : float = 0.02 , __UpperCamelCase : float = 0.001 , __UpperCamelCase : float = 0.99 , __UpperCamelCase : float = 0.5 , __UpperCamelCase : float = 0.2 , **__UpperCamelCase : List[Any] , ): super().__init__(**__UpperCamelCase ) _UpperCAmelCase = num_channels _UpperCAmelCase = image_size _UpperCAmelCase = width_coefficient _UpperCAmelCase = depth_coefficient _UpperCAmelCase = depth_divisor _UpperCAmelCase = kernel_sizes _UpperCAmelCase = in_channels _UpperCAmelCase = out_channels _UpperCAmelCase = depthwise_padding _UpperCAmelCase = strides _UpperCAmelCase = num_block_repeats _UpperCAmelCase = expand_ratios _UpperCAmelCase = squeeze_expansion_ratio _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dim _UpperCAmelCase = pooling_type _UpperCAmelCase = initializer_range _UpperCAmelCase = batch_norm_eps _UpperCAmelCase = batch_norm_momentum _UpperCAmelCase = dropout_rate _UpperCAmelCase = drop_connect_rate _UpperCAmelCase = sum(__UpperCamelCase ) * 4 class __SCREAMING_SNAKE_CASE ( lowercase): __SCREAMING_SNAKE_CASE : List[str] = version.parse("""1.11""") @property def UpperCAmelCase__ ( self : Optional[int] ): return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def UpperCAmelCase__ ( self : Optional[int] ): return 1e-5
684
def __lowerCamelCase ( _lowerCAmelCase ) -> str: _UpperCAmelCase = [] _UpperCAmelCase = set({"(", "[", "{"} ) _UpperCAmelCase = set({")", "]", "}"} ) _UpperCAmelCase = {"{": "}", "[": "]", "(": ")"} for i in range(len(_lowerCAmelCase ) ): if s[i] in open_brackets: stack.append(s[i] ) elif s[i] in closed_brackets and ( len(_lowerCAmelCase ) == 0 or (len(_lowerCAmelCase ) > 0 and open_to_closed[stack.pop()] != s[i]) ): return False return len(_lowerCAmelCase ) == 0 def __lowerCamelCase ( ) -> str: _UpperCAmelCase = input("Enter sequence of brackets: " ) if is_balanced(_lowerCAmelCase ): print(_lowerCAmelCase , "is balanced" ) else: print(_lowerCAmelCase , "is not balanced" ) if __name__ == "__main__": main()
684
1
import enum import warnings from ..tokenization_utils import TruncationStrategy from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING __lowerCAmelCase = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( enum.Enum): __SCREAMING_SNAKE_CASE : Dict = 0 __SCREAMING_SNAKE_CASE : Union[str, Any] = 1 @add_end_docstrings(lowercase) class __SCREAMING_SNAKE_CASE ( lowercase): __SCREAMING_SNAKE_CASE : Optional[int] = """generated""" def __init__( self : List[str] , *__UpperCamelCase : Any , **__UpperCamelCase : Tuple ): super().__init__(*__UpperCamelCase , **__UpperCamelCase ) self.check_model_type( TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING ) def UpperCAmelCase__ ( self : str , __UpperCamelCase : List[str]=None , __UpperCamelCase : List[str]=None , __UpperCamelCase : Dict=None , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : Optional[Any]=None , **__UpperCamelCase : Tuple , ): _UpperCAmelCase = {} if truncation is not None: _UpperCAmelCase = truncation _UpperCAmelCase = generate_kwargs _UpperCAmelCase = {} if return_tensors is not None and return_type is None: _UpperCAmelCase = ReturnType.TENSORS if return_tensors else ReturnType.TEXT if return_type is not None: _UpperCAmelCase = return_type if clean_up_tokenization_spaces is not None: _UpperCAmelCase = clean_up_tokenization_spaces if stop_sequence is not None: _UpperCAmelCase = self.tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) if len(__UpperCamelCase ) > 1: warnings.warn( "Stopping on a multiple token sequence is not yet supported on transformers. The first token of" " the stop sequence will be used as the stop sequence string in the interim." ) _UpperCAmelCase = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def UpperCAmelCase__ ( self : List[Any] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ): return True def UpperCAmelCase__ ( self : Optional[Any] , *__UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int] ): _UpperCAmelCase = self.model.config.prefix if self.model.config.prefix is not None else "" if isinstance(args[0] , __UpperCamelCase ): if self.tokenizer.pad_token_id is None: raise ValueError("Please make sure that the tokenizer has a pad_token_id when using a batch input" ) _UpperCAmelCase = ([prefix + arg for arg in args[0]],) _UpperCAmelCase = True elif isinstance(args[0] , __UpperCamelCase ): _UpperCAmelCase = (prefix + args[0],) _UpperCAmelCase = False else: raise ValueError( F''' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`''' ) _UpperCAmelCase = self.tokenizer(*__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors=self.framework ) # This is produced by tokenizers but is an invalid generate kwargs if "token_type_ids" in inputs: del inputs["token_type_ids"] return inputs def __call__( self : Optional[Any] , *__UpperCamelCase : int , **__UpperCamelCase : str ): _UpperCAmelCase = super().__call__(*__UpperCamelCase , **__UpperCamelCase ) if ( isinstance(args[0] , __UpperCamelCase ) and all(isinstance(__UpperCamelCase , __UpperCamelCase ) for el in args[0] ) and all(len(__UpperCamelCase ) == 1 for res in result ) ): return [res[0] for res in result] return result def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : Any , __UpperCamelCase : Dict=TruncationStrategy.DO_NOT_TRUNCATE , **__UpperCamelCase : Optional[int] ): _UpperCAmelCase = self._parse_and_tokenize(__UpperCamelCase , truncation=__UpperCamelCase , **__UpperCamelCase ) return inputs def UpperCAmelCase__ ( self : int , __UpperCamelCase : int , **__UpperCamelCase : int ): if self.framework == "pt": _UpperCAmelCase , _UpperCAmelCase = model_inputs["input_ids"].shape elif self.framework == "tf": _UpperCAmelCase , _UpperCAmelCase = tf.shape(model_inputs["input_ids"] ).numpy() _UpperCAmelCase = generate_kwargs.get("min_length" , self.model.config.min_length ) _UpperCAmelCase = generate_kwargs.get("max_length" , self.model.config.max_length ) self.check_inputs(__UpperCamelCase , generate_kwargs["min_length"] , generate_kwargs["max_length"] ) _UpperCAmelCase = self.model.generate(**__UpperCamelCase , **__UpperCamelCase ) _UpperCAmelCase = output_ids.shape[0] if self.framework == "pt": _UpperCAmelCase = output_ids.reshape(__UpperCamelCase , out_b // in_b , *output_ids.shape[1:] ) elif self.framework == "tf": _UpperCAmelCase = tf.reshape(__UpperCamelCase , (in_b, out_b // in_b, *output_ids.shape[1:]) ) return {"output_ids": output_ids} def UpperCAmelCase__ ( self : Any , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any]=ReturnType.TEXT , __UpperCamelCase : Optional[Any]=False ): _UpperCAmelCase = [] for output_ids in model_outputs["output_ids"][0]: if return_type == ReturnType.TENSORS: _UpperCAmelCase = {F'''{self.return_name}_token_ids''': output_ids} elif return_type == ReturnType.TEXT: _UpperCAmelCase = { F'''{self.return_name}_text''': self.tokenizer.decode( __UpperCamelCase , skip_special_tokens=__UpperCamelCase , clean_up_tokenization_spaces=__UpperCamelCase , ) } records.append(__UpperCamelCase ) return records @add_end_docstrings(lowercase) class __SCREAMING_SNAKE_CASE ( lowercase): __SCREAMING_SNAKE_CASE : int = """summary""" def __call__( self : Optional[int] , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : Any ): return super().__call__(*__UpperCamelCase , **__UpperCamelCase ) def UpperCAmelCase__ ( self : List[Any] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ): if max_length < min_length: logger.warning(F'''Your min_length={min_length} must be inferior than your max_length={max_length}.''' ) if input_length < max_length: logger.warning( F'''Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is ''' "a summarization task, where outputs shorter than the input are typically wanted, you might " F'''consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})''' ) @add_end_docstrings(lowercase) class __SCREAMING_SNAKE_CASE ( lowercase): __SCREAMING_SNAKE_CASE : Tuple = """translation""" def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ): if input_length > 0.9 * max_length: logger.warning( F'''Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider ''' "increasing your max_length manually, e.g. translator('...', max_length=400)" ) return True def UpperCAmelCase__ ( self : str , *__UpperCamelCase : Any , __UpperCamelCase : Union[str, Any]=TruncationStrategy.DO_NOT_TRUNCATE , __UpperCamelCase : int=None , __UpperCamelCase : Union[str, Any]=None ): if getattr(self.tokenizer , "_build_translation_inputs" , __UpperCamelCase ): return self.tokenizer._build_translation_inputs( *__UpperCamelCase , return_tensors=self.framework , truncation=__UpperCamelCase , src_lang=__UpperCamelCase , tgt_lang=__UpperCamelCase ) else: return super()._parse_and_tokenize(*__UpperCamelCase , truncation=__UpperCamelCase ) def UpperCAmelCase__ ( self : Optional[int] , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : str=None , **__UpperCamelCase : List[Any] ): _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = super()._sanitize_parameters(**__UpperCamelCase ) if src_lang is not None: _UpperCAmelCase = src_lang if tgt_lang is not None: _UpperCAmelCase = tgt_lang if src_lang is None and tgt_lang is None: # Backward compatibility, direct arguments use is preferred. _UpperCAmelCase = kwargs.get("task" , self.task ) _UpperCAmelCase = task.split("_" ) if task and len(__UpperCamelCase ) == 4: # translation, XX, to YY _UpperCAmelCase = items[1] _UpperCAmelCase = items[3] return preprocess_params, forward_params, postprocess_params def __call__( self : List[Any] , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : Optional[Any] ): return super().__call__(*__UpperCamelCase , **__UpperCamelCase )
684
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> tuple[float, float]: # Check if the input is valid if not len(_lowerCAmelCase ) == len(_lowerCAmelCase ) == 3: raise ValueError("Please enter a valid equation." ) if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0: raise ValueError("Both a & b of two equations can't be zero." ) # Extract the coefficients _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = equationa _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = equationa # Calculate the determinants of the matrices _UpperCAmelCase = aa * ba - aa * ba _UpperCAmelCase = ca * ba - ca * ba _UpperCAmelCase = aa * ca - aa * ca # Check if the system of linear equations has a solution (using Cramer's rule) if determinant == 0: if determinant_x == determinant_y == 0: raise ValueError("Infinite solutions. (Consistent system)" ) else: raise ValueError("No solution. (Inconsistent system)" ) else: if determinant_x == determinant_y == 0: # Trivial solution (Inconsistent system) return (0.0, 0.0) else: _UpperCAmelCase = determinant_x / determinant _UpperCAmelCase = determinant_y / determinant # Non-Trivial Solution (Consistent system) return (x, y)
684
1
import torch from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel class __SCREAMING_SNAKE_CASE ( lowercase): __SCREAMING_SNAKE_CASE : Union[str, Any] = """M-CLIP""" def __init__( self : Optional[int] , __UpperCamelCase : Union[str, Any]=1_024 , __UpperCamelCase : Dict=768 , **__UpperCamelCase : Union[str, Any] ): _UpperCAmelCase = transformerDimSize _UpperCAmelCase = imageDimSize super().__init__(**__UpperCamelCase ) class __SCREAMING_SNAKE_CASE ( lowercase): __SCREAMING_SNAKE_CASE : Dict = MCLIPConfig def __init__( self : Union[str, Any] , __UpperCamelCase : Dict , *__UpperCamelCase : List[str] , **__UpperCamelCase : int ): super().__init__(__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase ) _UpperCAmelCase = XLMRobertaModel(__UpperCamelCase ) _UpperCAmelCase = torch.nn.Linear( in_features=config.transformerDimensions , out_features=config.numDims ) def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : Any , __UpperCamelCase : Optional[int] ): _UpperCAmelCase = self.transformer(input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase )[0] _UpperCAmelCase = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None] return self.LinearTransformation(__UpperCamelCase ), embs
684
import argparse import torch from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert from transformers.utils import logging logging.set_verbosity_info() def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: # Initialise PyTorch model _UpperCAmelCase = RemBertConfig.from_json_file(_lowerCAmelCase ) print("Building PyTorch model from configuration: {}".format(str(_lowerCAmelCase ) ) ) _UpperCAmelCase = RemBertModel(_lowerCAmelCase ) # Load weights from tf checkpoint load_tf_weights_in_rembert(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # Save pytorch-model print("Save PyTorch model to {}".format(_lowerCAmelCase ) ) torch.save(model.state_dict() , _lowerCAmelCase ) if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--rembert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained RemBERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) __lowerCAmelCase = parser.parse_args() convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
684
1
import argparse import os import transformers from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS from .utils import logging logging.set_verbosity_info() __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS} def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict: if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES: raise ValueError(F'''Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.''' ) if tokenizer_name is None: _UpperCAmelCase = TOKENIZER_CLASSES else: _UpperCAmelCase = {tokenizer_name: getattr(_lowerCAmelCase , tokenizer_name + "Fast" )} logger.info(F'''Loading tokenizer classes: {tokenizer_names}''' ) for tokenizer_name in tokenizer_names: _UpperCAmelCase = TOKENIZER_CLASSES[tokenizer_name] _UpperCAmelCase = True if checkpoint_name is None: _UpperCAmelCase = list(tokenizer_class.max_model_input_sizes.keys() ) else: _UpperCAmelCase = [checkpoint_name] logger.info(F'''For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}''' ) for checkpoint in checkpoint_names: logger.info(F'''Loading {tokenizer_class.__class__.__name__} {checkpoint}''' ) # Load tokenizer _UpperCAmelCase = tokenizer_class.from_pretrained(_lowerCAmelCase , force_download=_lowerCAmelCase ) # Save fast tokenizer logger.info(F'''Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}''' ) # For organization names we create sub-directories if "/" in checkpoint: _UpperCAmelCase , _UpperCAmelCase = checkpoint.split("/" ) _UpperCAmelCase = os.path.join(_lowerCAmelCase , _lowerCAmelCase ) elif add_prefix: _UpperCAmelCase = checkpoint _UpperCAmelCase = dump_path else: _UpperCAmelCase = None _UpperCAmelCase = dump_path logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' ) if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]: _UpperCAmelCase = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint] _UpperCAmelCase = file_path.split(_lowerCAmelCase )[-1][0] if next_char == "/": _UpperCAmelCase = os.path.join(_lowerCAmelCase , _lowerCAmelCase ) _UpperCAmelCase = None logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' ) _UpperCAmelCase = tokenizer.save_pretrained( _lowerCAmelCase , legacy_format=_lowerCAmelCase , filename_prefix=_lowerCAmelCase ) logger.info(F'''=> File names {file_names}''' ) for file_name in file_names: if not file_name.endswith("tokenizer.json" ): os.remove(_lowerCAmelCase ) logger.info(F'''=> removing {file_name}''' ) if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( "--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files." ) parser.add_argument( "--tokenizer_name", default=None, type=str, help=( F'''Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will ''' "download and convert all the checkpoints from AWS." ), ) parser.add_argument( "--checkpoint_name", default=None, type=str, help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.", ) parser.add_argument( "--force_download", action="store_true", help="Re-download checkpoints.", ) __lowerCAmelCase = parser.parse_args() convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
684
import unittest from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __SCREAMING_SNAKE_CASE : @staticmethod def UpperCAmelCase__ ( *__UpperCamelCase : Dict , **__UpperCamelCase : Optional[int] ): pass @is_pipeline_test @require_torch @require_vision class __SCREAMING_SNAKE_CASE ( unittest.TestCase): __SCREAMING_SNAKE_CASE : List[Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] ): _UpperCAmelCase = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" ) _UpperCAmelCase = [ { "image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ), "question": "How many cats are there?", }, { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "question": "How many cats are there?", }, ] return vqa_pipeline, examples def UpperCAmelCase__ ( self : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] ): _UpperCAmelCase = vqa_pipeline(__UpperCamelCase , top_k=1 ) self.assertEqual( __UpperCamelCase , [ [{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}], [{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}], ] , ) @require_torch def UpperCAmelCase__ ( self : List[Any] ): _UpperCAmelCase = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" ) _UpperCAmelCase = "./tests/fixtures/tests_samples/COCO/000000039769.png" _UpperCAmelCase = "How many cats are there?" _UpperCAmelCase = vqa_pipeline(image=__UpperCamelCase , question="How many cats are there?" , top_k=2 ) self.assertEqual( __UpperCamelCase , [{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}, {"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}] ) _UpperCAmelCase = vqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual( __UpperCamelCase , [{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}, {"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}] ) @slow @require_torch def UpperCAmelCase__ ( self : List[str] ): _UpperCAmelCase = pipeline("visual-question-answering" , model="dandelin/vilt-b32-finetuned-vqa" ) _UpperCAmelCase = "./tests/fixtures/tests_samples/COCO/000000039769.png" _UpperCAmelCase = "How many cats are there?" _UpperCAmelCase = vqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 ) self.assertEqual( nested_simplify(__UpperCamelCase , decimals=4 ) , [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] ) _UpperCAmelCase = vqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(__UpperCamelCase , decimals=4 ) , [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] ) _UpperCAmelCase = vqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 ) self.assertEqual( nested_simplify(__UpperCamelCase , decimals=4 ) , [[{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}]] * 2 , ) @require_tf @unittest.skip("Visual question answering not implemented in TF" ) def UpperCAmelCase__ ( self : Optional[int] ): pass
684
1
from binascii import hexlify from hashlib import shaaaa from os import urandom # RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for # Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526 __lowerCAmelCase = { # 1536-bit 5: { "prime": int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF", base=1_6, ), "generator": 2, }, # 2048-bit 1_4: { "prime": int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" + "15728E5A8AACAA68FFFFFFFFFFFFFFFF", base=1_6, ), "generator": 2, }, # 3072-bit 1_5: { "prime": int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" + "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64" + "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7" + "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B" + "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C" + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31" + "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF", base=1_6, ), "generator": 2, }, # 4096-bit 1_6: { "prime": int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" + "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64" + "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7" + "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B" + "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C" + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31" + "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7" + "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA" + "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6" + "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED" + "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9" + "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199" + "FFFFFFFFFFFFFFFF", base=1_6, ), "generator": 2, }, # 6144-bit 1_7: { "prime": int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08" + "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B" + "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9" + "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6" + "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8" + "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C" + "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718" + "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D" + "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D" + "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226" + "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C" + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC" + "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26" + "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB" + "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2" + "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127" + "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492" + "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406" + "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918" + "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151" + "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03" + "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F" + "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA" + "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B" + "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632" + "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E" + "6DCC4024FFFFFFFFFFFFFFFF", base=1_6, ), "generator": 2, }, # 8192-bit 1_8: { "prime": int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" + "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64" + "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7" + "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B" + "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C" + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31" + "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7" + "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA" + "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6" + "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED" + "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9" + "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492" + "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD" + "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831" + "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B" + "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF" + "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6" + "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3" + "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA" + "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328" + "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C" + "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE" + "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4" + "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300" + "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568" + "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9" + "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B" + "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A" + "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36" + "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1" + "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92" + "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47" + "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71" + "60C980DD98EDD3DFFFFFFFFFFFFFFFFF", base=1_6, ), "generator": 2, }, } class __SCREAMING_SNAKE_CASE : def __init__( self : Dict , __UpperCamelCase : int = 14 ): if group not in primes: raise ValueError("Unsupported Group" ) _UpperCAmelCase = primes[group]["prime"] _UpperCAmelCase = primes[group]["generator"] _UpperCAmelCase = int(hexlify(urandom(32 ) ) , base=16 ) def UpperCAmelCase__ ( self : Dict ): return hex(self.__private_key )[2:] def UpperCAmelCase__ ( self : int ): _UpperCAmelCase = pow(self.generator , self.__private_key , self.prime ) return hex(__UpperCamelCase )[2:] def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : int ): # check if the other public key is valid based on NIST SP800-56 return ( 2 <= key <= self.prime - 2 and pow(__UpperCamelCase , (self.prime - 1) // 2 , self.prime ) == 1 ) def UpperCAmelCase__ ( self : Optional[int] , __UpperCamelCase : str ): _UpperCAmelCase = int(__UpperCamelCase , base=16 ) if not self.is_valid_public_key(__UpperCamelCase ): raise ValueError("Invalid public key" ) _UpperCAmelCase = pow(__UpperCamelCase , self.__private_key , self.prime ) return shaaaa(str(__UpperCamelCase ).encode() ).hexdigest() @staticmethod def UpperCAmelCase__ ( __UpperCamelCase : int , __UpperCamelCase : int ): # check if the other public key is valid based on NIST SP800-56 return ( 2 <= remote_public_key_str <= prime - 2 and pow(__UpperCamelCase , (prime - 1) // 2 , __UpperCamelCase ) == 1 ) @staticmethod def UpperCAmelCase__ ( __UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : int = 14 ): _UpperCAmelCase = int(__UpperCamelCase , base=16 ) _UpperCAmelCase = int(__UpperCamelCase , base=16 ) _UpperCAmelCase = primes[group]["prime"] if not DiffieHellman.is_valid_public_key_static(__UpperCamelCase , __UpperCamelCase ): raise ValueError("Invalid public key" ) _UpperCAmelCase = pow(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) return shaaaa(str(__UpperCamelCase ).encode() ).hexdigest() if __name__ == "__main__": import doctest doctest.testmod()
684
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, ) else: from .modeling_text_unet import UNetFlatConditionModel from .pipeline_versatile_diffusion import VersatileDiffusionPipeline from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
684
1
from collections import Counter import numpy as np from sklearn import datasets from sklearn.model_selection import train_test_split __lowerCAmelCase = datasets.load_iris() __lowerCAmelCase = np.array(data["data"]) __lowerCAmelCase = np.array(data["target"]) __lowerCAmelCase = data["target_names"] __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = train_test_split(X, y) def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> int: return np.linalg.norm(np.array(_lowerCAmelCase ) - np.array(_lowerCAmelCase ) ) def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=5 ) -> List[str]: _UpperCAmelCase = zip(_lowerCAmelCase , _lowerCAmelCase ) # List of distances of all points from the point to be classified _UpperCAmelCase = [] for data_point in data: _UpperCAmelCase = euclidean_distance(data_point[0] , _lowerCAmelCase ) distances.append((distance, data_point[1]) ) # Choosing 'k' points with the least distances. _UpperCAmelCase = [i[1] for i in sorted(_lowerCAmelCase )[:k]] # Most commonly occurring class among them # is the class into which the point is classified _UpperCAmelCase = Counter(_lowerCAmelCase ).most_common(1 )[0][0] return classes[result] if __name__ == "__main__": print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
684
import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class __SCREAMING_SNAKE_CASE ( lowercase): __SCREAMING_SNAKE_CASE : str = (UniPCMultistepScheduler,) __SCREAMING_SNAKE_CASE : Dict = (("""num_inference_steps""", 25),) def UpperCAmelCase__ ( self : str , **__UpperCamelCase : Any ): _UpperCAmelCase = { "num_train_timesteps": 1_000, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", "solver_order": 2, "solver_type": "bh2", } config.update(**__UpperCamelCase ) return config def UpperCAmelCase__ ( self : int , __UpperCamelCase : Any=0 , **__UpperCamelCase : Any ): _UpperCAmelCase = dict(self.forward_default_kwargs ) _UpperCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase ) _UpperCAmelCase = self.dummy_sample _UpperCAmelCase = 0.1 * sample _UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: _UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residuals _UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__UpperCamelCase ) _UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase ) new_scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residuals _UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] _UpperCAmelCase , _UpperCAmelCase = sample, sample for t in range(__UpperCamelCase , time_step + scheduler.config.solver_order + 1 ): _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample _UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Union[str, Any]=0 , **__UpperCamelCase : List[Any] ): _UpperCAmelCase = dict(self.forward_default_kwargs ) _UpperCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase ) _UpperCAmelCase = self.dummy_sample _UpperCAmelCase = 0.1 * sample _UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: _UpperCAmelCase = self.get_scheduler_config() _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residuals (must be after setting timesteps) _UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__UpperCamelCase ) _UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residual (must be after setting timesteps) _UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample _UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : Dict=None , **__UpperCamelCase : Optional[Any] ): if scheduler is None: _UpperCAmelCase = self.scheduler_classes[0] _UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) _UpperCAmelCase = self.scheduler_classes[0] _UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) _UpperCAmelCase = 10 _UpperCAmelCase = self.dummy_model() _UpperCAmelCase = self.dummy_sample_deter scheduler.set_timesteps(__UpperCamelCase ) for i, t in enumerate(scheduler.timesteps ): _UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample return sample def UpperCAmelCase__ ( self : List[str] ): _UpperCAmelCase = dict(self.forward_default_kwargs ) _UpperCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase ) for scheduler_class in self.scheduler_classes: _UpperCAmelCase = self.get_scheduler_config() _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) _UpperCAmelCase = self.dummy_sample _UpperCAmelCase = 0.1 * sample if num_inference_steps is not None and hasattr(__UpperCamelCase , "set_timesteps" ): scheduler.set_timesteps(__UpperCamelCase ) elif num_inference_steps is not None and not hasattr(__UpperCamelCase , "set_timesteps" ): _UpperCAmelCase = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) _UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10] _UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] _UpperCAmelCase = scheduler.timesteps[5] _UpperCAmelCase = scheduler.timesteps[6] _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def UpperCAmelCase__ ( self : Union[str, Any] ): # make sure that iterating over schedulers with same config names gives same results # for defaults _UpperCAmelCase = UniPCMultistepScheduler(**self.get_scheduler_config() ) _UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2464 ) < 1e-3 _UpperCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config ) _UpperCAmelCase = DEISMultistepScheduler.from_config(scheduler.config ) _UpperCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config ) _UpperCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config ) _UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2464 ) < 1e-3 def UpperCAmelCase__ ( self : str ): for timesteps in [25, 50, 100, 999, 1_000]: self.check_over_configs(num_train_timesteps=__UpperCamelCase ) def UpperCAmelCase__ ( self : int ): self.check_over_configs(thresholding=__UpperCamelCase ) for order in [1, 2, 3]: for solver_type in ["bh1", "bh2"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=__UpperCamelCase , prediction_type=__UpperCamelCase , sample_max_value=__UpperCamelCase , solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , ) def UpperCAmelCase__ ( self : int ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__UpperCamelCase ) def UpperCAmelCase__ ( self : int ): for solver_type in ["bh1", "bh2"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , ) _UpperCAmelCase = self.full_loop( solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , ) assert not torch.isnan(__UpperCamelCase ).any(), "Samples have nan numbers" def UpperCAmelCase__ ( self : Optional[int] ): self.check_over_configs(lower_order_final=__UpperCamelCase ) self.check_over_configs(lower_order_final=__UpperCamelCase ) def UpperCAmelCase__ ( self : Optional[Any] ): for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]: self.check_over_forward(num_inference_steps=__UpperCamelCase , time_step=0 ) def UpperCAmelCase__ ( self : List[str] ): _UpperCAmelCase = self.full_loop() _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2464 ) < 1e-3 def UpperCAmelCase__ ( self : Optional[Any] ): _UpperCAmelCase = self.full_loop(prediction_type="v_prediction" ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.1014 ) < 1e-3 def UpperCAmelCase__ ( self : Tuple ): _UpperCAmelCase = self.scheduler_classes[0] _UpperCAmelCase = self.get_scheduler_config(thresholding=__UpperCamelCase , dynamic_thresholding_ratio=0 ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) _UpperCAmelCase = 10 _UpperCAmelCase = self.dummy_model() _UpperCAmelCase = self.dummy_sample_deter.half() scheduler.set_timesteps(__UpperCamelCase ) for i, t in enumerate(scheduler.timesteps ): _UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample assert sample.dtype == torch.floataa def UpperCAmelCase__ ( self : str , **__UpperCamelCase : Optional[Any] ): for scheduler_class in self.scheduler_classes: _UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) scheduler.set_timesteps(scheduler.config.num_train_timesteps ) assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
684
1
import os import unittest from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, BertTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class __SCREAMING_SNAKE_CASE ( lowercase , unittest.TestCase): __SCREAMING_SNAKE_CASE : List[str] = BertTokenizer __SCREAMING_SNAKE_CASE : int = BertTokenizerFast __SCREAMING_SNAKE_CASE : Optional[int] = True __SCREAMING_SNAKE_CASE : Any = True __SCREAMING_SNAKE_CASE : List[str] = filter_non_english def UpperCAmelCase__ ( self : List[Any] ): super().setUp() _UpperCAmelCase = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] _UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) def UpperCAmelCase__ ( self : Optional[int] , __UpperCamelCase : Optional[Any] ): _UpperCAmelCase = "UNwant\u00E9d,running" _UpperCAmelCase = "unwanted, running" return input_text, output_text def UpperCAmelCase__ ( self : Union[str, Any] ): _UpperCAmelCase = self.tokenizer_class(self.vocab_file ) _UpperCAmelCase = tokenizer.tokenize("UNwant\u00E9d,running" ) self.assertListEqual(__UpperCamelCase , ["un", "##want", "##ed", ",", "runn", "##ing"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , [9, 6, 7, 12, 10, 11] ) def UpperCAmelCase__ ( self : List[Any] ): if not self.test_rust_tokenizer: return _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = self.get_rust_tokenizer() _UpperCAmelCase = "UNwant\u00E9d,running" _UpperCAmelCase = tokenizer.tokenize(__UpperCamelCase ) _UpperCAmelCase = rust_tokenizer.tokenize(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) _UpperCAmelCase = rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) self.assertListEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = self.get_rust_tokenizer() _UpperCAmelCase = tokenizer.encode(__UpperCamelCase ) _UpperCAmelCase = rust_tokenizer.encode(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase , __UpperCamelCase ) # With lower casing _UpperCAmelCase = self.get_tokenizer(do_lower_case=__UpperCamelCase ) _UpperCAmelCase = self.get_rust_tokenizer(do_lower_case=__UpperCamelCase ) _UpperCAmelCase = "UNwant\u00E9d,running" _UpperCAmelCase = tokenizer.tokenize(__UpperCamelCase ) _UpperCAmelCase = rust_tokenizer.tokenize(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) _UpperCAmelCase = rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) self.assertListEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = self.get_rust_tokenizer() _UpperCAmelCase = tokenizer.encode(__UpperCamelCase ) _UpperCAmelCase = rust_tokenizer.encode(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase , __UpperCamelCase ) def UpperCAmelCase__ ( self : str ): _UpperCAmelCase = BasicTokenizer() self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] ) def UpperCAmelCase__ ( self : Optional[int] ): _UpperCAmelCase = BasicTokenizer(do_lower_case=__UpperCamelCase ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def UpperCAmelCase__ ( self : Dict ): _UpperCAmelCase = BasicTokenizer(do_lower_case=__UpperCamelCase , strip_accents=__UpperCamelCase ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] ) def UpperCAmelCase__ ( self : Dict ): _UpperCAmelCase = BasicTokenizer(do_lower_case=__UpperCamelCase , strip_accents=__UpperCamelCase ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def UpperCAmelCase__ ( self : List[Any] ): _UpperCAmelCase = BasicTokenizer(do_lower_case=__UpperCamelCase ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def UpperCAmelCase__ ( self : Any ): _UpperCAmelCase = BasicTokenizer(do_lower_case=__UpperCamelCase ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] ) def UpperCAmelCase__ ( self : Dict ): _UpperCAmelCase = BasicTokenizer(do_lower_case=__UpperCamelCase , strip_accents=__UpperCamelCase ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] ) def UpperCAmelCase__ ( self : str ): _UpperCAmelCase = BasicTokenizer(do_lower_case=__UpperCamelCase , strip_accents=__UpperCamelCase ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] ) def UpperCAmelCase__ ( self : Any ): _UpperCAmelCase = BasicTokenizer(do_lower_case=__UpperCamelCase , never_split=["[UNK]"] ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] ) def UpperCAmelCase__ ( self : Union[str, Any] ): _UpperCAmelCase = BasicTokenizer() _UpperCAmelCase = "a\n'll !!to?'d of, can't." _UpperCAmelCase = ["a", "'", "ll", "!", "!", "to", "?", "'", "d", "of", ",", "can", "'", "t", "."] self.assertListEqual(tokenizer.tokenize(__UpperCamelCase ) , __UpperCamelCase ) def UpperCAmelCase__ ( self : Optional[Any] ): _UpperCAmelCase = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"] _UpperCAmelCase = {} for i, token in enumerate(__UpperCamelCase ): _UpperCAmelCase = i _UpperCAmelCase = WordpieceTokenizer(vocab=__UpperCamelCase , unk_token="[UNK]" ) self.assertListEqual(tokenizer.tokenize("" ) , [] ) self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] ) self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] ) def UpperCAmelCase__ ( self : Tuple ): self.assertTrue(_is_whitespace(" " ) ) self.assertTrue(_is_whitespace("\t" ) ) self.assertTrue(_is_whitespace("\r" ) ) self.assertTrue(_is_whitespace("\n" ) ) self.assertTrue(_is_whitespace("\u00A0" ) ) self.assertFalse(_is_whitespace("A" ) ) self.assertFalse(_is_whitespace("-" ) ) def UpperCAmelCase__ ( self : List[str] ): self.assertTrue(_is_control("\u0005" ) ) self.assertFalse(_is_control("A" ) ) self.assertFalse(_is_control(" " ) ) self.assertFalse(_is_control("\t" ) ) self.assertFalse(_is_control("\r" ) ) def UpperCAmelCase__ ( self : List[Any] ): self.assertTrue(_is_punctuation("-" ) ) self.assertTrue(_is_punctuation("$" ) ) self.assertTrue(_is_punctuation("`" ) ) self.assertTrue(_is_punctuation("." ) ) self.assertFalse(_is_punctuation("A" ) ) self.assertFalse(_is_punctuation(" " ) ) def UpperCAmelCase__ ( self : int ): _UpperCAmelCase = self.get_tokenizer() _UpperCAmelCase = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(__UpperCamelCase ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) self.assertListEqual( [rust_tokenizer.tokenize(__UpperCamelCase ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) @slow def UpperCAmelCase__ ( self : List[Any] ): _UpperCAmelCase = self.tokenizer_class.from_pretrained("bert-base-uncased" ) _UpperCAmelCase = tokenizer.encode("sequence builders" , add_special_tokens=__UpperCamelCase ) _UpperCAmelCase = tokenizer.encode("multi-sequence build" , add_special_tokens=__UpperCamelCase ) _UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase ) _UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase , __UpperCamelCase ) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_a + [102] def UpperCAmelCase__ ( self : Any ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): _UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase ) _UpperCAmelCase = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.''' _UpperCAmelCase = tokenizer_r.encode_plus( __UpperCamelCase , return_attention_mask=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , add_special_tokens=__UpperCamelCase , ) _UpperCAmelCase = tokenizer_r.do_lower_case if hasattr(__UpperCamelCase , "do_lower_case" ) else False _UpperCAmelCase = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "A"), ((1, 2), ","), ((3, 5), "na"), ((5, 6), "##ï"), ((6, 8), "##ve"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "Allen"), ((21, 23), "##NL"), ((23, 24), "##P"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "a"), ((1, 2), ","), ((3, 8), "naive"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "allen"), ((21, 23), "##nl"), ((23, 24), "##p"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) ) self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] ) def UpperCAmelCase__ ( self : Tuple ): _UpperCAmelCase = ["的", "人", "有"] _UpperCAmelCase = "".join(__UpperCamelCase ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): _UpperCAmelCase = True _UpperCAmelCase = self.tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase ) _UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase ) _UpperCAmelCase = tokenizer_p.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) _UpperCAmelCase = tokenizer_r.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) _UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(__UpperCamelCase ) _UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(__UpperCamelCase ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(__UpperCamelCase , __UpperCamelCase ) self.assertListEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = False _UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase ) _UpperCAmelCase = self.tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase ) _UpperCAmelCase = tokenizer_r.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) _UpperCAmelCase = tokenizer_p.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) _UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(__UpperCamelCase ) _UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(__UpperCamelCase ) # it is expected that only the first Chinese character is not preceded by "##". _UpperCAmelCase = [ F'''##{token}''' if idx != 0 else token for idx, token in enumerate(__UpperCamelCase ) ] self.assertListEqual(__UpperCamelCase , __UpperCamelCase ) self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
684
import math class __SCREAMING_SNAKE_CASE : def __init__( self : Union[str, Any] , __UpperCamelCase : List[Any]=0 ): # a graph with Node 0,1,...,N-1 _UpperCAmelCase = n _UpperCAmelCase = [ [math.inf for j in range(0 , __UpperCamelCase )] for i in range(0 , __UpperCamelCase ) ] # adjacency matrix for weight _UpperCAmelCase = [ [math.inf for j in range(0 , __UpperCamelCase )] for i in range(0 , __UpperCamelCase ) ] # dp[i][j] stores minimum distance from i to j def UpperCAmelCase__ ( self : str , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] ): _UpperCAmelCase = w def UpperCAmelCase__ ( self : Dict ): for k in range(0 , self.n ): for i in range(0 , self.n ): for j in range(0 , self.n ): _UpperCAmelCase = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] ) def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any ): return self.dp[u][v] if __name__ == "__main__": __lowerCAmelCase = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 1_0) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 1_0) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
684
1
from random import randint from tempfile import TemporaryFile import numpy as np def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]: _UpperCAmelCase = 0 if start < end: _UpperCAmelCase = randint(_lowerCAmelCase , _lowerCAmelCase ) _UpperCAmelCase = a[end] _UpperCAmelCase = a[pivot] _UpperCAmelCase = temp _UpperCAmelCase , _UpperCAmelCase = _in_place_partition(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) count += _in_place_quick_sort(_lowerCAmelCase , _lowerCAmelCase , p - 1 ) count += _in_place_quick_sort(_lowerCAmelCase , p + 1 , _lowerCAmelCase ) return count def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple: _UpperCAmelCase = 0 _UpperCAmelCase = randint(_lowerCAmelCase , _lowerCAmelCase ) _UpperCAmelCase = a[end] _UpperCAmelCase = a[pivot] _UpperCAmelCase = temp _UpperCAmelCase = start - 1 for index in range(_lowerCAmelCase , _lowerCAmelCase ): count += 1 if a[index] < a[end]: # check if current val is less than pivot value _UpperCAmelCase = new_pivot_index + 1 _UpperCAmelCase = a[new_pivot_index] _UpperCAmelCase = a[index] _UpperCAmelCase = temp _UpperCAmelCase = a[new_pivot_index + 1] _UpperCAmelCase = a[end] _UpperCAmelCase = temp return new_pivot_index + 1, count __lowerCAmelCase = TemporaryFile() __lowerCAmelCase = 1_0_0 # 1000 elements are to be sorted __lowerCAmelCase , __lowerCAmelCase = 0, 1 # mean and standard deviation __lowerCAmelCase = np.random.normal(mu, sigma, p) np.save(outfile, X) print("The array is") print(X) outfile.seek(0) # using the same array __lowerCAmelCase = np.load(outfile) __lowerCAmelCase = len(M) - 1 __lowerCAmelCase = _in_place_quick_sort(M, 0, r) print( "No of Comparisons for 100 elements selected from a standard normal distribution" "is :" ) print(z)
684
import unittest import torch from diffusers import VQModel from diffusers.utils import floats_tensor, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class __SCREAMING_SNAKE_CASE ( lowercase , lowercase , unittest.TestCase): __SCREAMING_SNAKE_CASE : Dict = VQModel __SCREAMING_SNAKE_CASE : Optional[int] = """sample""" @property def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Optional[int]=(32, 32) ): _UpperCAmelCase = 4 _UpperCAmelCase = 3 _UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCamelCase ) return {"sample": image} @property def UpperCAmelCase__ ( self : Tuple ): return (3, 32, 32) @property def UpperCAmelCase__ ( self : str ): return (3, 32, 32) def UpperCAmelCase__ ( self : Dict ): _UpperCAmelCase = { "block_out_channels": [32, 64], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 3, } _UpperCAmelCase = self.dummy_input return init_dict, inputs_dict def UpperCAmelCase__ ( self : Dict ): pass def UpperCAmelCase__ ( self : str ): pass def UpperCAmelCase__ ( self : List[str] ): _UpperCAmelCase , _UpperCAmelCase = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) self.assertEqual(len(loading_info["missing_keys"] ) , 0 ) model.to(__UpperCamelCase ) _UpperCAmelCase = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def UpperCAmelCase__ ( self : List[Any] ): _UpperCAmelCase = VQModel.from_pretrained("fusing/vqgan-dummy" ) model.to(__UpperCamelCase ).eval() torch.manual_seed(0 ) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0 ) _UpperCAmelCase = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size ) _UpperCAmelCase = image.to(__UpperCamelCase ) with torch.no_grad(): _UpperCAmelCase = model(__UpperCamelCase ).sample _UpperCAmelCase = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off _UpperCAmelCase = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] ) # fmt: on self.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
684
1
import importlib import shutil import threading import warnings from typing import List import fsspec import fsspec.asyn from . import compression from .hffilesystem import HfFileSystem __lowerCAmelCase = importlib.util.find_spec("s3fs") is not None if _has_safs: from .safilesystem import SaFileSystem # noqa: F401 __lowerCAmelCase = [ compression.BzaFileSystem, compression.GzipFileSystem, compression.LzaFileSystem, compression.XzFileSystem, compression.ZstdFileSystem, ] # Register custom filesystems for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]: if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class: warnings.warn(F'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''') fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True) def __lowerCamelCase ( _lowerCAmelCase ) -> str: if "://" in dataset_path: _UpperCAmelCase = dataset_path.split("://" )[1] return dataset_path def __lowerCamelCase ( _lowerCAmelCase ) -> bool: if fs is not None and fs.protocol != "file": return True else: return False def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: _UpperCAmelCase = not is_remote_filesystem(_lowerCAmelCase ) if is_local: # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory shutil.move(fs._strip_protocol(_lowerCAmelCase ) , fs._strip_protocol(_lowerCAmelCase ) ) else: fs.mv(_lowerCAmelCase , _lowerCAmelCase , recursive=_lowerCAmelCase ) def __lowerCamelCase ( ) -> None: if hasattr(fsspec.asyn , "reset_lock" ): # for future fsspec>2022.05.0 fsspec.asyn.reset_lock() else: _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = threading.Lock()
684
import requests __lowerCAmelCase = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey=" def __lowerCamelCase ( _lowerCAmelCase ) -> None: # fetching a list of articles in json format _UpperCAmelCase = requests.get(_NEWS_API + bbc_news_api_key ).json() # each article in the list is a dict for i, article in enumerate(bbc_news_page["articles"] , 1 ): print(F'''{i}.) {article["title"]}''' ) if __name__ == "__main__": fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
684
1
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInstructPixaPixPipeline, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils import floats_tensor, load_image, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __SCREAMING_SNAKE_CASE ( lowercase , lowercase , lowercase , unittest.TestCase): __SCREAMING_SNAKE_CASE : Optional[Any] = StableDiffusionInstructPixaPixPipeline __SCREAMING_SNAKE_CASE : Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width""", """cross_attention_kwargs"""} __SCREAMING_SNAKE_CASE : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS __SCREAMING_SNAKE_CASE : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS __SCREAMING_SNAKE_CASE : int = IMAGE_TO_IMAGE_IMAGE_PARAMS def UpperCAmelCase__ ( self : List[Any] ): torch.manual_seed(0 ) _UpperCAmelCase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) _UpperCAmelCase = PNDMScheduler(skip_prk_steps=__UpperCamelCase ) torch.manual_seed(0 ) _UpperCAmelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) torch.manual_seed(0 ) _UpperCAmelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) _UpperCAmelCase = CLIPTextModel(__UpperCamelCase ) _UpperCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) _UpperCAmelCase = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def UpperCAmelCase__ ( self : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any]=0 ): _UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase ) _UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0] _UpperCAmelCase = Image.fromarray(np.uinta(__UpperCamelCase ) ).convert("RGB" ) if str(__UpperCamelCase ).startswith("mps" ): _UpperCAmelCase = torch.manual_seed(__UpperCamelCase ) else: _UpperCAmelCase = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase ) _UpperCAmelCase = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "image_guidance_scale": 1, "output_type": "numpy", } return inputs def UpperCAmelCase__ ( self : Optional[int] ): _UpperCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator _UpperCAmelCase = self.get_dummy_components() _UpperCAmelCase = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase ) _UpperCAmelCase = sd_pipe.to(__UpperCamelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCamelCase ) _UpperCAmelCase = self.get_dummy_inputs(__UpperCamelCase ) _UpperCAmelCase = sd_pipe(**__UpperCamelCase ).images _UpperCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) _UpperCAmelCase = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def UpperCAmelCase__ ( self : str ): _UpperCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator _UpperCAmelCase = self.get_dummy_components() _UpperCAmelCase = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase ) _UpperCAmelCase = sd_pipe.to(__UpperCamelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCamelCase ) _UpperCAmelCase = self.get_dummy_inputs(__UpperCamelCase ) _UpperCAmelCase = "french fries" _UpperCAmelCase = sd_pipe(**__UpperCamelCase , negative_prompt=__UpperCamelCase ) _UpperCAmelCase = output.images _UpperCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) _UpperCAmelCase = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def UpperCAmelCase__ ( self : List[Any] ): _UpperCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator _UpperCAmelCase = self.get_dummy_components() _UpperCAmelCase = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase ) _UpperCAmelCase = sd_pipe.to(__UpperCamelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCamelCase ) _UpperCAmelCase = self.get_dummy_inputs(__UpperCamelCase ) _UpperCAmelCase = [inputs["prompt"]] * 2 _UpperCAmelCase = np.array(inputs["image"] ).astype(np.floataa ) / 255.0 _UpperCAmelCase = torch.from_numpy(__UpperCamelCase ).unsqueeze(0 ).to(__UpperCamelCase ) _UpperCAmelCase = image / 2 + 0.5 _UpperCAmelCase = image.permute(0 , 3 , 1 , 2 ) _UpperCAmelCase = image.repeat(2 , 1 , 1 , 1 ) _UpperCAmelCase = sd_pipe(**__UpperCamelCase ).images _UpperCAmelCase = image[-1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) _UpperCAmelCase = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def UpperCAmelCase__ ( self : Any ): _UpperCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator _UpperCAmelCase = self.get_dummy_components() _UpperCAmelCase = EulerAncestralDiscreteScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" ) _UpperCAmelCase = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase ) _UpperCAmelCase = sd_pipe.to(__UpperCamelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCamelCase ) _UpperCAmelCase = self.get_dummy_inputs(__UpperCamelCase ) _UpperCAmelCase = sd_pipe(**__UpperCamelCase ).images _UpperCAmelCase = image[0, -3:, -3:, -1] _UpperCAmelCase = [round(__UpperCamelCase , 4 ) for x in image_slice.flatten().tolist()] print(",".join([str(__UpperCamelCase ) for x in slice] ) ) assert image.shape == (1, 32, 32, 3) _UpperCAmelCase = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def UpperCAmelCase__ ( self : str ): super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) def UpperCAmelCase__ ( self : Union[str, Any] ): _UpperCAmelCase = self.get_dummy_components() _UpperCAmelCase = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase ) _UpperCAmelCase = VaeImageProcessor(do_resize=__UpperCamelCase , do_normalize=__UpperCamelCase ) _UpperCAmelCase = pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) _UpperCAmelCase = pipe(**self.get_dummy_inputs_by_type(__UpperCamelCase , input_image_type="pt" ) )[0] _UpperCAmelCase = components["vae"] _UpperCAmelCase = self.get_dummy_inputs_by_type(__UpperCamelCase , input_image_type="pt" ) for image_param in self.image_latents_params: if image_param in inputs.keys(): _UpperCAmelCase = vae.encode(inputs[image_param] ).latent_dist.mode() _UpperCAmelCase = pipe(**__UpperCamelCase )[0] _UpperCAmelCase = np.abs(out - out_latents_inputs ).max() self.assertLess(__UpperCamelCase , 1e-4 , "passing latents as image input generate different result from passing image" ) @slow @require_torch_gpu class __SCREAMING_SNAKE_CASE ( unittest.TestCase): def UpperCAmelCase__ ( self : Any ): super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : Union[str, Any]=0 ): _UpperCAmelCase = torch.manual_seed(__UpperCamelCase ) _UpperCAmelCase = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg" ) _UpperCAmelCase = { "prompt": "turn him into a cyborg", "image": image, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "image_guidance_scale": 1.0, "output_type": "numpy", } return inputs def UpperCAmelCase__ ( self : Dict ): _UpperCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=__UpperCamelCase ) pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) pipe.enable_attention_slicing() _UpperCAmelCase = self.get_inputs() _UpperCAmelCase = pipe(**__UpperCamelCase ).images _UpperCAmelCase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) _UpperCAmelCase = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def UpperCAmelCase__ ( self : Optional[int] ): _UpperCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=__UpperCamelCase ) _UpperCAmelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) pipe.enable_attention_slicing() _UpperCAmelCase = self.get_inputs() _UpperCAmelCase = pipe(**__UpperCamelCase ).images _UpperCAmelCase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) _UpperCAmelCase = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def UpperCAmelCase__ ( self : Optional[int] ): _UpperCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=__UpperCamelCase ) _UpperCAmelCase = DDIMScheduler.from_config(pipe.scheduler.config ) pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) pipe.enable_attention_slicing() _UpperCAmelCase = self.get_inputs() _UpperCAmelCase = pipe(**__UpperCamelCase ).images _UpperCAmelCase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) _UpperCAmelCase = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def UpperCAmelCase__ ( self : List[Any] ): _UpperCAmelCase = 0 def callback_fn(__UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : torch.FloatTensor ) -> None: _UpperCAmelCase = True nonlocal number_of_steps number_of_steps += 1 if step == 1: _UpperCAmelCase = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) _UpperCAmelCase = latents[0, -3:, -3:, -1] _UpperCAmelCase = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 elif step == 2: _UpperCAmelCase = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) _UpperCAmelCase = latents[0, -3:, -3:, -1] _UpperCAmelCase = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 _UpperCAmelCase = False _UpperCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=__UpperCamelCase , torch_dtype=torch.floataa ) _UpperCAmelCase = pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) pipe.enable_attention_slicing() _UpperCAmelCase = self.get_inputs() pipe(**__UpperCamelCase , callback=__UpperCamelCase , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def UpperCAmelCase__ ( self : Optional[int] ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() _UpperCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=__UpperCamelCase , torch_dtype=torch.floataa ) _UpperCAmelCase = pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() _UpperCAmelCase = self.get_inputs() _UpperCAmelCase = pipe(**__UpperCamelCase ) _UpperCAmelCase = torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 10**9 def UpperCAmelCase__ ( self : Any ): _UpperCAmelCase = self.get_inputs() # resize to resolution that is divisible by 8 but not 16 or 32 _UpperCAmelCase = inputs["image"].resize((504, 504) ) _UpperCAmelCase = "timbrooks/instruct-pix2pix" _UpperCAmelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained( __UpperCamelCase , safety_checker=__UpperCamelCase , ) pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) pipe.enable_attention_slicing() _UpperCAmelCase = pipe(**__UpperCamelCase ) _UpperCAmelCase = output.images[0] _UpperCAmelCase = image[255:258, 383:386, -1] assert image.shape == (504, 504, 3) _UpperCAmelCase = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
684
import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class __SCREAMING_SNAKE_CASE ( unittest.TestCase): def UpperCAmelCase__ ( self : Any ): _UpperCAmelCase = 10 def UpperCAmelCase__ ( self : Optional[int] ): _UpperCAmelCase = [1, 2, 3, 4] _UpperCAmelCase = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase ) def UpperCAmelCase__ ( self : List[Any] ): _UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] _UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase ) def UpperCAmelCase__ ( self : int ): _UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] _UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase ) def UpperCAmelCase__ ( self : Union[str, Any] ): _UpperCAmelCase = "It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this." _UpperCAmelCase , _UpperCAmelCase = process_story(__UpperCamelCase ) self.assertEqual(__UpperCamelCase , [] ) def UpperCAmelCase__ ( self : str ): _UpperCAmelCase = "" _UpperCAmelCase , _UpperCAmelCase = process_story(__UpperCamelCase ) self.assertEqual(__UpperCamelCase , [] ) self.assertEqual(__UpperCamelCase , [] ) def UpperCAmelCase__ ( self : Optional[Any] ): _UpperCAmelCase = ( "It was the year of Our Lord one thousand seven hundred and " "seventy-five\n\nSpiritual revelations were conceded to England " "at that favoured period, as at this.\n@highlight\n\nIt was the best of times" ) _UpperCAmelCase , _UpperCAmelCase = process_story(__UpperCamelCase ) _UpperCAmelCase = [ "It was the year of Our Lord one thousand seven hundred and seventy-five.", "Spiritual revelations were conceded to England at that favoured period, as at this.", ] self.assertEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = ["It was the best of times."] self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def UpperCAmelCase__ ( self : Dict ): _UpperCAmelCase = torch.tensor([1, 2, 3, 4] ) _UpperCAmelCase = torch.tensor([1, 1, 1, 1] ) np.testing.assert_array_equal(build_mask(__UpperCamelCase , 0 ).numpy() , expected.numpy() ) def UpperCAmelCase__ ( self : Optional[Any] ): _UpperCAmelCase = torch.tensor([1, 2, 3, 4, 23, 23, 23] ) _UpperCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(__UpperCamelCase , 23 ).numpy() , expected.numpy() ) def UpperCAmelCase__ ( self : Optional[int] ): _UpperCAmelCase = torch.tensor([8, 2, 3, 4, 1, 1, 1] ) _UpperCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(__UpperCamelCase , 1 ).numpy() , expected.numpy() ) def UpperCAmelCase__ ( self : Dict ): _UpperCAmelCase = 101 _UpperCAmelCase = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] ) _UpperCAmelCase = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] ) _UpperCAmelCase = compute_token_type_ids(__UpperCamelCase , __UpperCamelCase ) np.testing.assert_array_equal(__UpperCamelCase , __UpperCamelCase )
684
1
import json import os import re import unicodedata from json.encoder import INFINITY from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import regex from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging from ...utils.generic import _is_jax, _is_numpy __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = { "artists_file": "artists.json", "lyrics_file": "lyrics.json", "genres_file": "genres.json", } __lowerCAmelCase = { "artists_file": { "jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json", }, "genres_file": { "jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json", }, "lyrics_file": { "jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json", }, } __lowerCAmelCase = { "jukebox": 5_1_2, } class __SCREAMING_SNAKE_CASE ( lowercase): __SCREAMING_SNAKE_CASE : Tuple = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE : int = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE : int = PRETRAINED_LYRIC_TOKENS_SIZES __SCREAMING_SNAKE_CASE : Optional[int] = ["""input_ids""", """attention_mask"""] def __init__( self : int , __UpperCamelCase : str , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : List[str]=["v3", "v2", "v2"] , __UpperCamelCase : str=512 , __UpperCamelCase : Union[str, Any]=5 , __UpperCamelCase : List[Any]="<|endoftext|>" , **__UpperCamelCase : Union[str, Any] , ): _UpperCAmelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else unk_token super().__init__( unk_token=__UpperCamelCase , n_genres=__UpperCamelCase , version=__UpperCamelCase , max_n_lyric_tokens=__UpperCamelCase , **__UpperCamelCase , ) _UpperCAmelCase = version _UpperCAmelCase = max_n_lyric_tokens _UpperCAmelCase = n_genres with open(__UpperCamelCase , encoding="utf-8" ) as vocab_handle: _UpperCAmelCase = json.load(__UpperCamelCase ) with open(__UpperCamelCase , encoding="utf-8" ) as vocab_handle: _UpperCAmelCase = json.load(__UpperCamelCase ) with open(__UpperCamelCase , encoding="utf-8" ) as vocab_handle: _UpperCAmelCase = json.load(__UpperCamelCase ) _UpperCAmelCase = r"[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+" # In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters. if len(self.lyrics_encoder ) == 79: _UpperCAmelCase = oov.replace(r"\-'" , r"\-+'" ) _UpperCAmelCase = regex.compile(__UpperCamelCase ) _UpperCAmelCase = {v: k for k, v in self.artists_encoder.items()} _UpperCAmelCase = {v: k for k, v in self.genres_encoder.items()} _UpperCAmelCase = {v: k for k, v in self.lyrics_encoder.items()} @property def UpperCAmelCase__ ( self : Dict ): return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder ) def UpperCAmelCase__ ( self : Dict ): return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder ) def UpperCAmelCase__ ( self : int , __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict ): _UpperCAmelCase = [self.artists_encoder.get(__UpperCamelCase , 0 ) for artist in list_artists] for genres in range(len(__UpperCamelCase ) ): _UpperCAmelCase = [self.genres_encoder.get(__UpperCamelCase , 0 ) for genre in list_genres[genres]] _UpperCAmelCase = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] )) _UpperCAmelCase = [[self.lyrics_encoder.get(__UpperCamelCase , 0 ) for character in list_lyrics[0]], [], []] return artists_id, list_genres, lyric_ids def UpperCAmelCase__ ( self : int , __UpperCamelCase : List[str] ): return list(__UpperCamelCase ) def UpperCAmelCase__ ( self : List[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str , **__UpperCamelCase : Dict ): _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.prepare_for_tokenization(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = self._tokenize(__UpperCamelCase ) return artist, genre, lyrics def UpperCAmelCase__ ( self : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : bool = False ): for idx in range(len(self.version ) ): if self.version[idx] == "v3": _UpperCAmelCase = artists[idx].lower() _UpperCAmelCase = [genres[idx].lower()] else: _UpperCAmelCase = self._normalize(artists[idx] ) + ".v2" _UpperCAmelCase = [ self._normalize(__UpperCamelCase ) + ".v2" for genre in genres[idx].split("_" ) ] # split is for the full dictionary with combined genres if self.version[0] == "v2": _UpperCAmelCase = regex.compile(r"[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+" ) _UpperCAmelCase = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n" _UpperCAmelCase = {vocab[index]: index + 1 for index in range(len(__UpperCamelCase ) )} _UpperCAmelCase = 0 _UpperCAmelCase = len(__UpperCamelCase ) + 1 _UpperCAmelCase = self.vocab _UpperCAmelCase = {v: k for k, v in self.vocab.items()} _UpperCAmelCase = "" else: _UpperCAmelCase = regex.compile(r"[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+" ) _UpperCAmelCase = self._run_strip_accents(__UpperCamelCase ) _UpperCAmelCase = lyrics.replace("\\" , "\n" ) _UpperCAmelCase = self.out_of_vocab.sub("" , __UpperCamelCase ), [], [] return artists, genres, lyrics def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : Dict ): _UpperCAmelCase = unicodedata.normalize("NFD" , __UpperCamelCase ) _UpperCAmelCase = [] for char in text: _UpperCAmelCase = unicodedata.category(__UpperCamelCase ) if cat == "Mn": continue output.append(__UpperCamelCase ) return "".join(__UpperCamelCase ) def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : str ): _UpperCAmelCase = ( [chr(__UpperCamelCase ) for i in range(ord("a" ) , ord("z" ) + 1 )] + [chr(__UpperCamelCase ) for i in range(ord("A" ) , ord("Z" ) + 1 )] + [chr(__UpperCamelCase ) for i in range(ord("0" ) , ord("9" ) + 1 )] + ["."] ) _UpperCAmelCase = frozenset(__UpperCamelCase ) _UpperCAmelCase = re.compile(r"_+" ) _UpperCAmelCase = "".join([c if c in accepted else "_" for c in text.lower()] ) _UpperCAmelCase = pattern.sub("_" , __UpperCamelCase ).strip("_" ) return text def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : List[str] ): return " ".join(__UpperCamelCase ) def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : Any , __UpperCamelCase : Optional[Union[str, TensorType]] = None , __UpperCamelCase : bool = False ): # Convert to TensorType if not isinstance(__UpperCamelCase , __UpperCamelCase ): _UpperCAmelCase = TensorType(__UpperCamelCase ) # Get a function reference for the correct framework if tensor_type == TensorType.TENSORFLOW: if not is_tf_available(): raise ImportError( "Unable to convert output to TensorFlow tensors format, TensorFlow is not installed." ) import tensorflow as tf _UpperCAmelCase = tf.constant _UpperCAmelCase = tf.is_tensor elif tensor_type == TensorType.PYTORCH: if not is_torch_available(): raise ImportError("Unable to convert output to PyTorch tensors format, PyTorch is not installed." ) import torch _UpperCAmelCase = torch.tensor _UpperCAmelCase = torch.is_tensor elif tensor_type == TensorType.JAX: if not is_flax_available(): raise ImportError("Unable to convert output to JAX tensors format, JAX is not installed." ) import jax.numpy as jnp # noqa: F811 _UpperCAmelCase = jnp.array _UpperCAmelCase = _is_jax else: _UpperCAmelCase = np.asarray _UpperCAmelCase = _is_numpy # Do the tensor conversion in batch try: if prepend_batch_axis: _UpperCAmelCase = [inputs] if not is_tensor(__UpperCamelCase ): _UpperCAmelCase = as_tensor(__UpperCamelCase ) except: # noqa E722 raise ValueError( "Unable to create tensor, you should probably activate truncation and/or padding " "with 'padding=True' 'truncation=True' to have batched tensors with the same length." ) return inputs def __call__( self : Dict , __UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : List[Any]="" , __UpperCamelCase : int="pt" ): _UpperCAmelCase = [0, 0, 0] _UpperCAmelCase = [artist] * len(self.version ) _UpperCAmelCase = [genres] * len(self.version ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.tokenize(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self._convert_token_to_id(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = [-INFINITY] * len(full_tokens[-1] ) _UpperCAmelCase = [ self.convert_to_tensors( [input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=__UpperCamelCase ) for i in range(len(self.version ) ) ] return BatchEncoding({"input_ids": input_ids, "attention_masks": attention_masks} ) def UpperCAmelCase__ ( self : str , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ): if not os.path.isdir(__UpperCamelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return _UpperCAmelCase = os.path.join( __UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["artists_file"] ) with open(__UpperCamelCase , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.artists_encoder , ensure_ascii=__UpperCamelCase ) ) _UpperCAmelCase = os.path.join( __UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["genres_file"] ) with open(__UpperCamelCase , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.genres_encoder , ensure_ascii=__UpperCamelCase ) ) _UpperCAmelCase = os.path.join( __UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["lyrics_file"] ) with open(__UpperCamelCase , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.lyrics_encoder , ensure_ascii=__UpperCamelCase ) ) return (artists_file, genres_file, lyrics_file) def UpperCAmelCase__ ( self : str , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple ): _UpperCAmelCase = self.artists_decoder.get(__UpperCamelCase ) _UpperCAmelCase = [self.genres_decoder.get(__UpperCamelCase ) for genre in genres_index] _UpperCAmelCase = [self.lyrics_decoder.get(__UpperCamelCase ) for character in lyric_index] return artist, genres, lyrics
684
from __future__ import annotations from collections import namedtuple def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> tuple: _UpperCAmelCase = namedtuple("result" , "name value" ) if (voltage, current, power).count(0 ) != 1: raise ValueError("Only one argument must be 0" ) elif power < 0: raise ValueError( "Power cannot be negative in any electrical/electronics system" ) elif voltage == 0: return result("voltage" , power / current ) elif current == 0: return result("current" , power / voltage ) elif power == 0: return result("power" , float(round(abs(voltage * current ) , 2 ) ) ) else: raise ValueError("Exactly one argument must be 0" ) if __name__ == "__main__": import doctest doctest.testmod()
684
1
from __future__ import annotations class __SCREAMING_SNAKE_CASE : def __init__( self : str , __UpperCamelCase : int ): _UpperCAmelCase = order # a_{0} ... a_{k} _UpperCAmelCase = [1.0] + [0.0] * order # b_{0} ... b_{k} _UpperCAmelCase = [1.0] + [0.0] * order # x[n-1] ... x[n-k] _UpperCAmelCase = [0.0] * self.order # y[n-1] ... y[n-k] _UpperCAmelCase = [0.0] * self.order def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : list[float] , __UpperCamelCase : list[float] ): if len(__UpperCamelCase ) < self.order: _UpperCAmelCase = [1.0, *a_coeffs] if len(__UpperCamelCase ) != self.order + 1: _UpperCAmelCase = ( F'''Expected a_coeffs to have {self.order + 1} elements ''' F'''for {self.order}-order filter, got {len(__UpperCamelCase )}''' ) raise ValueError(__UpperCamelCase ) if len(__UpperCamelCase ) != self.order + 1: _UpperCAmelCase = ( F'''Expected b_coeffs to have {self.order + 1} elements ''' F'''for {self.order}-order filter, got {len(__UpperCamelCase )}''' ) raise ValueError(__UpperCamelCase ) _UpperCAmelCase = a_coeffs _UpperCAmelCase = b_coeffs def UpperCAmelCase__ ( self : int , __UpperCamelCase : float ): _UpperCAmelCase = 0.0 # Start at index 1 and do index 0 at the end. for i in range(1 , self.order + 1 ): result += ( self.b_coeffs[i] * self.input_history[i - 1] - self.a_coeffs[i] * self.output_history[i - 1] ) _UpperCAmelCase = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0] _UpperCAmelCase = self.input_history[:-1] _UpperCAmelCase = self.output_history[:-1] _UpperCAmelCase = sample _UpperCAmelCase = result return result
684
import argparse import math import traceback import dateutil.parser as date_parser import requests def __lowerCamelCase ( _lowerCAmelCase ) -> Any: _UpperCAmelCase = {} _UpperCAmelCase = job["started_at"] _UpperCAmelCase = job["completed_at"] _UpperCAmelCase = date_parser.parse(_lowerCAmelCase ) _UpperCAmelCase = date_parser.parse(_lowerCAmelCase ) _UpperCAmelCase = round((end_datetime - start_datetime).total_seconds() / 60.0 ) _UpperCAmelCase = start _UpperCAmelCase = end _UpperCAmelCase = duration_in_min return job_info def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=None ) -> str: _UpperCAmelCase = None if token is not None: _UpperCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''} _UpperCAmelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100''' _UpperCAmelCase = requests.get(_lowerCAmelCase , headers=_lowerCAmelCase ).json() _UpperCAmelCase = {} try: job_time.update({job["name"]: extract_time_from_single_job(_lowerCAmelCase ) for job in result["jobs"]} ) _UpperCAmelCase = math.ceil((result["total_count"] - 100) / 100 ) for i in range(_lowerCAmelCase ): _UpperCAmelCase = requests.get(url + F'''&page={i + 2}''' , headers=_lowerCAmelCase ).json() job_time.update({job["name"]: extract_time_from_single_job(_lowerCAmelCase ) for job in result["jobs"]} ) return job_time except Exception: print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' ) return {} if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.") __lowerCAmelCase = parser.parse_args() __lowerCAmelCase = get_job_time(args.workflow_run_id) __lowerCAmelCase = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True)) for k, v in job_time.items(): print(F'''{k}: {v["duration"]}''')
684
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = { "caidas/swin2sr-classicalsr-x2-64": ( "https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json" ), } class __SCREAMING_SNAKE_CASE ( lowercase): __SCREAMING_SNAKE_CASE : List[str] = """swin2sr""" __SCREAMING_SNAKE_CASE : Optional[int] = { """hidden_size""": """embed_dim""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self : List[str] , __UpperCamelCase : str=64 , __UpperCamelCase : Dict=1 , __UpperCamelCase : int=3 , __UpperCamelCase : int=180 , __UpperCamelCase : Optional[Any]=[6, 6, 6, 6, 6, 6] , __UpperCamelCase : str=[6, 6, 6, 6, 6, 6] , __UpperCamelCase : List[Any]=8 , __UpperCamelCase : Union[str, Any]=2.0 , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Dict=0.0 , __UpperCamelCase : Tuple=0.0 , __UpperCamelCase : str=0.1 , __UpperCamelCase : Dict="gelu" , __UpperCamelCase : Any=False , __UpperCamelCase : Tuple=0.02 , __UpperCamelCase : Optional[Any]=1e-5 , __UpperCamelCase : Dict=2 , __UpperCamelCase : str=1.0 , __UpperCamelCase : List[str]="1conv" , __UpperCamelCase : Union[str, Any]="pixelshuffle" , **__UpperCamelCase : List[Any] , ): super().__init__(**__UpperCamelCase ) _UpperCAmelCase = image_size _UpperCAmelCase = patch_size _UpperCAmelCase = num_channels _UpperCAmelCase = embed_dim _UpperCAmelCase = depths _UpperCAmelCase = len(__UpperCamelCase ) _UpperCAmelCase = num_heads _UpperCAmelCase = window_size _UpperCAmelCase = mlp_ratio _UpperCAmelCase = qkv_bias _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = drop_path_rate _UpperCAmelCase = hidden_act _UpperCAmelCase = use_absolute_embeddings _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = initializer_range _UpperCAmelCase = upscale _UpperCAmelCase = img_range _UpperCAmelCase = resi_connection _UpperCAmelCase = upsampler
684
import argparse import math import os from copy import deepcopy import torch from audio_diffusion.models import DiffusionAttnUnetaD from diffusion import sampling from torch import nn from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel __lowerCAmelCase = { "gwf-440k": { "url": "https://model-server.zqevans2.workers.dev/gwf-440k.ckpt", "sample_rate": 4_8_0_0_0, "sample_size": 6_5_5_3_6, }, "jmann-small-190k": { "url": "https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt", "sample_rate": 4_8_0_0_0, "sample_size": 6_5_5_3_6, }, "jmann-large-580k": { "url": "https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt", "sample_rate": 4_8_0_0_0, "sample_size": 1_3_1_0_7_2, }, "maestro-uncond-150k": { "url": "https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt", "sample_rate": 1_6_0_0_0, "sample_size": 6_5_5_3_6, }, "unlocked-uncond-250k": { "url": "https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt", "sample_rate": 1_6_0_0_0, "sample_size": 6_5_5_3_6, }, "honk-140k": { "url": "https://model-server.zqevans2.workers.dev/honk-140k.ckpt", "sample_rate": 1_6_0_0_0, "sample_size": 6_5_5_3_6, }, } def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: return torch.atana(_lowerCAmelCase , _lowerCAmelCase ) / math.pi * 2 def __lowerCamelCase ( _lowerCAmelCase ) -> Union[str, Any]: _UpperCAmelCase = torch.sin(t * math.pi / 2 ) ** 2 _UpperCAmelCase = (1 - sigma**2) ** 0.5 return alpha_sigma_to_t(_lowerCAmelCase , _lowerCAmelCase ) class __SCREAMING_SNAKE_CASE ( lowercase): pass class __SCREAMING_SNAKE_CASE ( nn.Module): def __init__( self : str , __UpperCamelCase : Optional[int] ): super().__init__() _UpperCAmelCase = DiffusionAttnUnetaD(__UpperCamelCase , n_attn_layers=4 ) _UpperCAmelCase = deepcopy(self.diffusion ) _UpperCAmelCase = torch.quasirandom.SobolEngine(1 , scramble=__UpperCamelCase ) def __lowerCamelCase ( _lowerCAmelCase ) -> int: _UpperCAmelCase = MODELS_MAP[model_name]["url"] os.system(F'''wget {url} ./''' ) return F'''./{model_name}.ckpt''' __lowerCAmelCase = { "1": "resnets.0", "2": "attentions.0", "3": "resnets.1", "4": "attentions.1", "5": "resnets.2", "6": "attentions.2", } __lowerCAmelCase = { "8": "resnets.0", "9": "attentions.0", "10": "resnets.1", "11": "attentions.1", "12": "resnets.2", "13": "attentions.2", } __lowerCAmelCase = { "1": "resnets.0", "2": "attentions.0", "3": "resnets.1", "4": "attentions.1", "5": "resnets.2", "6": "attentions.2", "8": "resnets.3", "9": "attentions.3", "10": "resnets.4", "11": "attentions.4", "12": "resnets.5", "13": "attentions.5", } __lowerCAmelCase = { "0": "resnets.0", "1": "resnets.1", "2": "resnets.2", "4": "resnets.0", "5": "resnets.1", "6": "resnets.2", } __lowerCAmelCase = { "skip": "conv_skip", "main.0": "conv_1", "main.1": "group_norm_1", "main.3": "conv_2", "main.4": "group_norm_2", } __lowerCAmelCase = { "norm": "group_norm", "qkv_proj": ["query", "key", "value"], "out_proj": ["proj_attn"], } def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]: if name.startswith("skip" ): return name.replace("skip" , RES_CONV_MAP["skip"] ) # name has to be of format main.{digit} if not name.startswith("main." ): raise ValueError(F'''ResConvBlock error with {name}''' ) return name.replace(name[:6] , RES_CONV_MAP[name[:6]] ) def __lowerCamelCase ( _lowerCAmelCase ) -> Optional[Any]: for key, value in ATTN_MAP.items(): if name.startswith(_lowerCAmelCase ) and not isinstance(_lowerCAmelCase , _lowerCAmelCase ): return name.replace(_lowerCAmelCase , _lowerCAmelCase ) elif name.startswith(_lowerCAmelCase ): return [name.replace(_lowerCAmelCase , _lowerCAmelCase ) for v in value] raise ValueError(F'''Attn error with {name}''' ) def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=13 ) -> List[Any]: _UpperCAmelCase = input_string if string.split("." )[0] == "timestep_embed": return string.replace("timestep_embed" , "time_proj" ) _UpperCAmelCase = 0 if string.startswith("net.3." ): depth += 1 _UpperCAmelCase = string[6:] elif string.startswith("net." ): _UpperCAmelCase = string[4:] while string.startswith("main.7." ): depth += 1 _UpperCAmelCase = string[7:] if string.startswith("main." ): _UpperCAmelCase = string[5:] # mid block if string[:2].isdigit(): _UpperCAmelCase = string[:2] _UpperCAmelCase = string[2:] else: _UpperCAmelCase = string[0] _UpperCAmelCase = string[1:] if depth == max_depth: _UpperCAmelCase = MID_NUM_TO_LAYER[layer_num] _UpperCAmelCase = "mid_block" elif depth > 0 and int(_lowerCAmelCase ) < 7: _UpperCAmelCase = DOWN_NUM_TO_LAYER[layer_num] _UpperCAmelCase = F'''down_blocks.{depth}''' elif depth > 0 and int(_lowerCAmelCase ) > 7: _UpperCAmelCase = UP_NUM_TO_LAYER[layer_num] _UpperCAmelCase = F'''up_blocks.{max_depth - depth - 1}''' elif depth == 0: _UpperCAmelCase = DEPTH_0_TO_LAYER[layer_num] _UpperCAmelCase = F'''up_blocks.{max_depth - 1}''' if int(_lowerCAmelCase ) > 3 else "down_blocks.0" if not string_left.startswith("." ): raise ValueError(F'''Naming error with {input_string} and string_left: {string_left}.''' ) _UpperCAmelCase = string_left[1:] if "resnets" in new_layer: _UpperCAmelCase = convert_resconv_naming(_lowerCAmelCase ) elif "attentions" in new_layer: _UpperCAmelCase = convert_attn_naming(_lowerCAmelCase ) _UpperCAmelCase = new_string_left if not isinstance(_lowerCAmelCase , _lowerCAmelCase ): _UpperCAmelCase = prefix + "." + new_layer + "." + string_left else: _UpperCAmelCase = [prefix + "." + new_layer + "." + s for s in string_left] return new_string def __lowerCamelCase ( _lowerCAmelCase ) -> Optional[int]: _UpperCAmelCase = {} for k, v in state_dict.items(): if k.endswith("kernel" ): # up- and downsample layers, don't have trainable weights continue _UpperCAmelCase = rename(_lowerCAmelCase ) # check if we need to transform from Conv => Linear for attention if isinstance(_lowerCAmelCase , _lowerCAmelCase ): _UpperCAmelCase = transform_conv_attns(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) else: _UpperCAmelCase = v return new_state_dict def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]: if len(_lowerCAmelCase ) == 1: if len(v.shape ) == 3: # weight _UpperCAmelCase = v[:, :, 0] else: # bias _UpperCAmelCase = v else: # qkv matrices _UpperCAmelCase = v.shape[0] _UpperCAmelCase = trippled_shape // 3 for i in range(3 ): if len(v.shape ) == 3: _UpperCAmelCase = v[i * single_shape : (i + 1) * single_shape, :, 0] else: _UpperCAmelCase = v[i * single_shape : (i + 1) * single_shape] return new_state_dict def __lowerCamelCase ( _lowerCAmelCase ) -> Tuple: _UpperCAmelCase = torch.device("cuda" if torch.cuda.is_available() else "cpu" ) _UpperCAmelCase = args.model_path.split("/" )[-1].split("." )[0] if not os.path.isfile(args.model_path ): assert ( model_name == args.model_path ), F'''Make sure to provide one of the official model names {MODELS_MAP.keys()}''' _UpperCAmelCase = download(_lowerCAmelCase ) _UpperCAmelCase = MODELS_MAP[model_name]["sample_rate"] _UpperCAmelCase = MODELS_MAP[model_name]["sample_size"] _UpperCAmelCase = Object() _UpperCAmelCase = sample_size _UpperCAmelCase = sample_rate _UpperCAmelCase = 0 _UpperCAmelCase = UNetaDModel(sample_size=_lowerCAmelCase , sample_rate=_lowerCAmelCase ) _UpperCAmelCase = diffusers_model.state_dict() _UpperCAmelCase = DiffusionUncond(_lowerCAmelCase ) orig_model.load_state_dict(torch.load(args.model_path , map_location=_lowerCAmelCase )["state_dict"] ) _UpperCAmelCase = orig_model.diffusion_ema.eval() _UpperCAmelCase = orig_model.state_dict() _UpperCAmelCase = rename_orig_weights(_lowerCAmelCase ) _UpperCAmelCase = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() ) _UpperCAmelCase = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() ) assert len(_lowerCAmelCase ) == 0, F'''Problem with {renamed_minus_diffusers}''' assert all(k.endswith("kernel" ) for k in list(_lowerCAmelCase ) ), F'''Problem with {diffusers_minus_renamed}''' for key, value in renamed_state_dict.items(): assert ( diffusers_state_dict[key].squeeze().shape == value.squeeze().shape ), F'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}''' if key == "time_proj.weight": _UpperCAmelCase = value.squeeze() _UpperCAmelCase = value diffusers_model.load_state_dict(_lowerCAmelCase ) _UpperCAmelCase = 100 _UpperCAmelCase = 33 _UpperCAmelCase = IPNDMScheduler(num_train_timesteps=_lowerCAmelCase ) _UpperCAmelCase = torch.manual_seed(_lowerCAmelCase ) _UpperCAmelCase = torch.randn([1, 2, config.sample_size] , generator=_lowerCAmelCase ).to(_lowerCAmelCase ) _UpperCAmelCase = torch.linspace(1 , 0 , steps + 1 , device=_lowerCAmelCase )[:-1] _UpperCAmelCase = get_crash_schedule(_lowerCAmelCase ) _UpperCAmelCase = DanceDiffusionPipeline(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase ) _UpperCAmelCase = torch.manual_seed(33 ) _UpperCAmelCase = pipe(num_inference_steps=_lowerCAmelCase , generator=_lowerCAmelCase ).audios _UpperCAmelCase = sampling.iplms_sample(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , {} ) _UpperCAmelCase = generated.clamp(-1 , 1 ) _UpperCAmelCase = (generated - audio).abs().sum() _UpperCAmelCase = (generated - audio).abs().max() if args.save: pipe.save_pretrained(args.checkpoint_path ) print("Diff sum" , _lowerCAmelCase ) print("Diff max" , _lowerCAmelCase ) assert diff_max < 1E-3, F'''Diff max: {diff_max} is too much :-/''' print(F'''Conversion for {model_name} successful!''' ) if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.") parser.add_argument( "--save", default=True, type=bool, required=False, help="Whether to save the converted model or not." ) parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.") __lowerCAmelCase = parser.parse_args() main(args)
684
1
import math from datetime import datetime, timedelta def __lowerCamelCase ( _lowerCAmelCase ) -> datetime: _UpperCAmelCase = year % 19 _UpperCAmelCase = year % 4 _UpperCAmelCase = year % 7 _UpperCAmelCase = math.floor(year / 100 ) _UpperCAmelCase = math.floor((13 + 8 * leap_day_inhibits) / 25 ) _UpperCAmelCase = leap_day_inhibits / 4 _UpperCAmelCase = ( 15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number ) % 30 _UpperCAmelCase = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7 # days to be added to March 21 _UpperCAmelCase = (19 * metonic_cycle + secular_moon_shift) % 30 # PHM -> Paschal Full Moon _UpperCAmelCase = ( 2 * julian_leap_year + 4 * non_leap_year + 6 * days_to_add + century_starting_point ) % 7 if days_to_add == 29 and days_from_phm_to_sunday == 6: return datetime(_lowerCAmelCase , 4 , 19 ) elif days_to_add == 28 and days_from_phm_to_sunday == 6: return datetime(_lowerCAmelCase , 4 , 18 ) else: return datetime(_lowerCAmelCase , 3 , 22 ) + timedelta( days=int(days_to_add + days_from_phm_to_sunday ) ) if __name__ == "__main__": for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3): __lowerCAmelCase = "will be" if year > datetime.now().year else "was" print(F'''Easter in {year} {tense} {gauss_easter(year)}''')
684
import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 __lowerCAmelCase = get_tests_dir("fixtures") class __SCREAMING_SNAKE_CASE ( unittest.TestCase): def UpperCAmelCase__ ( self : Dict ): # A mock response for an HTTP head request to emulate server down _UpperCAmelCase = mock.Mock() _UpperCAmelCase = 500 _UpperCAmelCase = {} _UpperCAmelCase = HTTPError _UpperCAmelCase = {} # Download this model to make sure it's in the cache. _UpperCAmelCase = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("requests.Session.request" , return_value=__UpperCamelCase ) as mock_head: _UpperCAmelCase = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" ) # This check we did call the fake head request mock_head.assert_called() def UpperCAmelCase__ ( self : List[Any] ): # This test is for deprecated behavior and can be removed in v5 _UpperCAmelCase = ViTImageProcessor.from_pretrained( "https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json" ) def UpperCAmelCase__ ( self : Dict ): with self.assertRaises(__UpperCamelCase ): # config is in subfolder, the following should not work without specifying the subfolder _UpperCAmelCase = AutoImageProcessor.from_pretrained("hf-internal-testing/stable-diffusion-all-variants" ) _UpperCAmelCase = AutoImageProcessor.from_pretrained( "hf-internal-testing/stable-diffusion-all-variants" , subfolder="feature_extractor" ) self.assertIsNotNone(__UpperCamelCase ) @is_staging_test class __SCREAMING_SNAKE_CASE ( unittest.TestCase): @classmethod def UpperCAmelCase__ ( cls : str ): _UpperCAmelCase = TOKEN HfFolder.save_token(__UpperCamelCase ) @classmethod def UpperCAmelCase__ ( cls : Optional[Any] ): try: delete_repo(token=cls._token , repo_id="test-image-processor" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="valid_org/test-image-processor-org" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="test-dynamic-image-processor" ) except HTTPError: pass def UpperCAmelCase__ ( self : Union[str, Any] ): _UpperCAmelCase = ViTImageProcessor.from_pretrained(__UpperCamelCase ) image_processor.push_to_hub("test-image-processor" , use_auth_token=self._token ) _UpperCAmelCase = ViTImageProcessor.from_pretrained(F'''{USER}/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) ) # Reset repo delete_repo(token=self._token , repo_id="test-image-processor" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( __UpperCamelCase , repo_id="test-image-processor" , push_to_hub=__UpperCamelCase , use_auth_token=self._token ) _UpperCAmelCase = ViTImageProcessor.from_pretrained(F'''{USER}/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) ) def UpperCAmelCase__ ( self : Union[str, Any] ): _UpperCAmelCase = ViTImageProcessor.from_pretrained(__UpperCamelCase ) image_processor.push_to_hub("valid_org/test-image-processor" , use_auth_token=self._token ) _UpperCAmelCase = ViTImageProcessor.from_pretrained("valid_org/test-image-processor" ) for k, v in image_processor.__dict__.items(): self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) ) # Reset repo delete_repo(token=self._token , repo_id="valid_org/test-image-processor" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( __UpperCamelCase , repo_id="valid_org/test-image-processor-org" , push_to_hub=__UpperCamelCase , use_auth_token=self._token ) _UpperCAmelCase = ViTImageProcessor.from_pretrained("valid_org/test-image-processor-org" ) for k, v in image_processor.__dict__.items(): self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) ) def UpperCAmelCase__ ( self : int ): CustomImageProcessor.register_for_auto_class() _UpperCAmelCase = CustomImageProcessor.from_pretrained(__UpperCamelCase ) image_processor.push_to_hub("test-dynamic-image-processor" , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map , {"AutoImageProcessor": "custom_image_processing.CustomImageProcessor"} , ) _UpperCAmelCase = AutoImageProcessor.from_pretrained( F'''{USER}/test-dynamic-image-processor''' , trust_remote_code=__UpperCamelCase ) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__ , "CustomImageProcessor" )
684
1
import logging from transformers.configuration_utils import PretrainedConfig __lowerCAmelCase = logging.getLogger(__name__) class __SCREAMING_SNAKE_CASE ( lowercase): __SCREAMING_SNAKE_CASE : List[str] = """masked_bert""" def __init__( self : Tuple , __UpperCamelCase : Tuple=30_522 , __UpperCamelCase : Any=768 , __UpperCamelCase : List[str]=12 , __UpperCamelCase : List[str]=12 , __UpperCamelCase : Optional[int]=3_072 , __UpperCamelCase : int="gelu" , __UpperCamelCase : Dict=0.1 , __UpperCamelCase : List[str]=0.1 , __UpperCamelCase : Any=512 , __UpperCamelCase : int=2 , __UpperCamelCase : Union[str, Any]=0.02 , __UpperCamelCase : Union[str, Any]=1e-1_2 , __UpperCamelCase : List[Any]=0 , __UpperCamelCase : Dict="topK" , __UpperCamelCase : List[str]="constant" , __UpperCamelCase : Tuple=0.0 , **__UpperCamelCase : Union[str, Any] , ): super().__init__(pad_token_id=__UpperCamelCase , **__UpperCamelCase ) _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = hidden_act _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = pruning_method _UpperCAmelCase = mask_init _UpperCAmelCase = mask_scale
684
from operator import delitem, getitem, setitem import pytest from data_structures.hashing.hash_map import HashMap def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]: return getitem, k def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]: return setitem, k, v def __lowerCamelCase ( _lowerCAmelCase ) -> str: return delitem, k def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase ) -> Optional[int]: try: return fun(_lowerCAmelCase , *_lowerCAmelCase ), None except Exception as e: return None, e __lowerCAmelCase = ( _set("key_a", "val_a"), _set("key_b", "val_b"), ) __lowerCAmelCase = [ _set("key_a", "val_a"), _set("key_a", "val_b"), ] __lowerCAmelCase = [ _set("key_a", "val_a"), _set("key_b", "val_b"), _del("key_a"), _del("key_b"), _set("key_a", "val_a"), _del("key_a"), ] __lowerCAmelCase = [ _get("key_a"), _del("key_a"), _set("key_a", "val_a"), _del("key_a"), _del("key_a"), _get("key_a"), ] __lowerCAmelCase = [ *[_set(x, x) for x in range(5)], # guaranteed upsize ] __lowerCAmelCase = [ *[_set(x, x) for x in range(5)], # guaranteed upsize *[_del(x) for x in range(5)], _set("key_a", "val_b"), ] @pytest.mark.parametrize( "operations" , ( pytest.param(_add_items , id="add items" ), pytest.param(_overwrite_items , id="overwrite items" ), pytest.param(_delete_items , id="delete items" ), pytest.param(_access_absent_items , id="access absent items" ), pytest.param(_add_with_resize_up , id="add with resize up" ), pytest.param(_add_with_resize_down , id="add with resize down" ), ) , ) def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]: _UpperCAmelCase = HashMap(initial_block_size=4 ) _UpperCAmelCase = {} for _, (fun, *args) in enumerate(_lowerCAmelCase ): _UpperCAmelCase , _UpperCAmelCase = _run_operation(_lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase ) _UpperCAmelCase , _UpperCAmelCase = _run_operation(_lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase ) assert my_res == py_res assert str(_lowerCAmelCase ) == str(_lowerCAmelCase ) assert set(_lowerCAmelCase ) == set(_lowerCAmelCase ) assert len(_lowerCAmelCase ) == len(_lowerCAmelCase ) assert set(my.items() ) == set(py.items() ) def __lowerCamelCase ( ) -> List[Any]: def is_public(_lowerCAmelCase ) -> bool: return not name.startswith("_" ) _UpperCAmelCase = {name for name in dir({} ) if is_public(_lowerCAmelCase )} _UpperCAmelCase = {name for name in dir(HashMap() ) if is_public(_lowerCAmelCase )} assert dict_public_names > hash_public_names
684
1
from __future__ import annotations from typing import Any def __lowerCamelCase ( _lowerCAmelCase ) -> int: if not postfix_notation: return 0 _UpperCAmelCase = {"+", "-", "*", "/"} _UpperCAmelCase = [] for token in postfix_notation: if token in operations: _UpperCAmelCase , _UpperCAmelCase = stack.pop(), stack.pop() if token == "+": stack.append(a + b ) elif token == "-": stack.append(a - b ) elif token == "*": stack.append(a * b ) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1 ) else: stack.append(a // b ) else: stack.append(int(_lowerCAmelCase ) ) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
684
def __lowerCamelCase ( _lowerCAmelCase ) -> list: _UpperCAmelCase = len(_lowerCAmelCase ) for i in range(1 , _lowerCAmelCase ): _UpperCAmelCase = collection[i] _UpperCAmelCase = 0 _UpperCAmelCase = i - 1 while low <= high: _UpperCAmelCase = (low + high) // 2 if val < collection[mid]: _UpperCAmelCase = mid - 1 else: _UpperCAmelCase = mid + 1 for j in range(_lowerCAmelCase , _lowerCAmelCase , -1 ): _UpperCAmelCase = collection[j - 1] _UpperCAmelCase = val return collection if __name__ == "__main__": __lowerCAmelCase = input("Enter numbers separated by a comma:\n").strip() __lowerCAmelCase = [int(item) for item in user_input.split(",")] print(binary_insertion_sort(unsorted))
684
1
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import evaluate import numpy as np import torch from datasets import load_dataset from PIL import Image from torchvision.transforms import ( CenterCrop, Compose, Normalize, RandomHorizontalFlip, RandomResizedCrop, Resize, ToTensor, ) import transformers from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForImageClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version __lowerCAmelCase = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.31.0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-classification/requirements.txt") __lowerCAmelCase = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys()) __lowerCAmelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) def __lowerCamelCase ( _lowerCAmelCase ) -> Optional[Any]: with open(_lowerCAmelCase , "rb" ) as f: _UpperCAmelCase = Image.open(_lowerCAmelCase ) return im.convert("RGB" ) @dataclass class __SCREAMING_SNAKE_CASE : __SCREAMING_SNAKE_CASE : Optional[str] = field( default=lowercase , metadata={ """help""": """Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).""" } , ) __SCREAMING_SNAKE_CASE : Optional[str] = field( default=lowercase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""}) __SCREAMING_SNAKE_CASE : Optional[str] = field(default=lowercase , metadata={"""help""": """A folder containing the training data."""}) __SCREAMING_SNAKE_CASE : Optional[str] = field(default=lowercase , metadata={"""help""": """A folder containing the validation data."""}) __SCREAMING_SNAKE_CASE : Optional[float] = field( default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""}) __SCREAMING_SNAKE_CASE : Optional[int] = field( default=lowercase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) __SCREAMING_SNAKE_CASE : Optional[int] = field( default=lowercase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) def UpperCAmelCase__ ( self : List[str] ): if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None): raise ValueError( "You must specify either a dataset name from the hub or a train and/or validation directory." ) @dataclass class __SCREAMING_SNAKE_CASE : __SCREAMING_SNAKE_CASE : str = field( default="""google/vit-base-patch16-224-in21k""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , ) __SCREAMING_SNAKE_CASE : Optional[str] = field( default=lowercase , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(lowercase)} , ) __SCREAMING_SNAKE_CASE : Optional[str] = field( default=lowercase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""}) __SCREAMING_SNAKE_CASE : Optional[str] = field( default=lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""}) __SCREAMING_SNAKE_CASE : str = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) __SCREAMING_SNAKE_CASE : str = field(default=lowercase , metadata={"""help""": """Name or path of preprocessor config."""}) __SCREAMING_SNAKE_CASE : bool = field( default=lowercase , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) __SCREAMING_SNAKE_CASE : bool = field( default=lowercase , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , ) def __lowerCamelCase ( _lowerCAmelCase ) -> Optional[int]: _UpperCAmelCase = torch.stack([example["pixel_values"] for example in examples] ) _UpperCAmelCase = torch.tensor([example["labels"] for example in examples] ) return {"pixel_values": pixel_values, "labels": labels} def __lowerCamelCase ( ) -> Union[str, Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_image_classification" , _lowerCAmelCase , _lowerCAmelCase ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _UpperCAmelCase = training_args.get_process_log_level() logger.setLevel(_lowerCAmelCase ) transformers.utils.logging.set_verbosity(_lowerCAmelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. _UpperCAmelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _UpperCAmelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed ) # Initialize our dataset and prepare it for the 'image-classification' task. if data_args.dataset_name is not None: _UpperCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="image-classification" , use_auth_token=True if model_args.use_auth_token else None , ) else: _UpperCAmelCase = {} if data_args.train_dir is not None: _UpperCAmelCase = os.path.join(data_args.train_dir , "**" ) if data_args.validation_dir is not None: _UpperCAmelCase = os.path.join(data_args.validation_dir , "**" ) _UpperCAmelCase = load_dataset( "imagefolder" , data_files=_lowerCAmelCase , cache_dir=model_args.cache_dir , task="image-classification" , ) # If we don't have a validation split, split off a percentage of train as validation. _UpperCAmelCase = None if "validation" in dataset.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , _lowerCAmelCase ) and data_args.train_val_split > 0.0: _UpperCAmelCase = dataset["train"].train_test_split(data_args.train_val_split ) _UpperCAmelCase = split["train"] _UpperCAmelCase = split["test"] # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. _UpperCAmelCase = dataset["train"].features["labels"].names _UpperCAmelCase , _UpperCAmelCase = {}, {} for i, label in enumerate(_lowerCAmelCase ): _UpperCAmelCase = str(_lowerCAmelCase ) _UpperCAmelCase = label # Load the accuracy metric from the datasets package _UpperCAmelCase = evaluate.load("accuracy" ) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(_lowerCAmelCase ): return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids ) _UpperCAmelCase = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path , num_labels=len(_lowerCAmelCase ) , labelaid=_lowerCAmelCase , idalabel=_lowerCAmelCase , finetuning_task="image-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) _UpperCAmelCase = AutoModelForImageClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) _UpperCAmelCase = AutoImageProcessor.from_pretrained( model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # Define torchvision transforms to be applied to each image. if "shortest_edge" in image_processor.size: _UpperCAmelCase = image_processor.size["shortest_edge"] else: _UpperCAmelCase = (image_processor.size["height"], image_processor.size["width"]) _UpperCAmelCase = Normalize(mean=image_processor.image_mean , std=image_processor.image_std ) _UpperCAmelCase = Compose( [ RandomResizedCrop(_lowerCAmelCase ), RandomHorizontalFlip(), ToTensor(), normalize, ] ) _UpperCAmelCase = Compose( [ Resize(_lowerCAmelCase ), CenterCrop(_lowerCAmelCase ), ToTensor(), normalize, ] ) def train_transforms(_lowerCAmelCase ): _UpperCAmelCase = [ _train_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"] ] return example_batch def val_transforms(_lowerCAmelCase ): _UpperCAmelCase = [_val_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]] return example_batch if training_args.do_train: if "train" not in dataset: raise ValueError("--do_train requires a train dataset" ) if data_args.max_train_samples is not None: _UpperCAmelCase = ( dataset["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) ) # Set the training transforms dataset["train"].set_transform(_lowerCAmelCase ) if training_args.do_eval: if "validation" not in dataset: raise ValueError("--do_eval requires a validation dataset" ) if data_args.max_eval_samples is not None: _UpperCAmelCase = ( dataset["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms dataset["validation"].set_transform(_lowerCAmelCase ) # Initalize our trainer _UpperCAmelCase = Trainer( model=_lowerCAmelCase , args=_lowerCAmelCase , train_dataset=dataset["train"] if training_args.do_train else None , eval_dataset=dataset["validation"] if training_args.do_eval else None , compute_metrics=_lowerCAmelCase , tokenizer=_lowerCAmelCase , data_collator=_lowerCAmelCase , ) # Training if training_args.do_train: _UpperCAmelCase = None if training_args.resume_from_checkpoint is not None: _UpperCAmelCase = training_args.resume_from_checkpoint elif last_checkpoint is not None: _UpperCAmelCase = last_checkpoint _UpperCAmelCase = trainer.train(resume_from_checkpoint=_lowerCAmelCase ) trainer.save_model() trainer.log_metrics("train" , train_result.metrics ) trainer.save_metrics("train" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _UpperCAmelCase = trainer.evaluate() trainer.log_metrics("eval" , _lowerCAmelCase ) trainer.save_metrics("eval" , _lowerCAmelCase ) # Write model card and (optionally) push to hub _UpperCAmelCase = { "finetuned_from": model_args.model_name_or_path, "tasks": "image-classification", "dataset": data_args.dataset_name, "tags": ["image-classification", "vision"], } if training_args.push_to_hub: trainer.push_to_hub(**_lowerCAmelCase ) else: trainer.create_model_card(**_lowerCAmelCase ) if __name__ == "__main__": main()
684
__lowerCAmelCase = 2_5_6 # Modulus to hash a string __lowerCAmelCase = 1_0_0_0_0_0_3 def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> bool: _UpperCAmelCase = len(_lowerCAmelCase ) _UpperCAmelCase = len(_lowerCAmelCase ) if p_len > t_len: return False _UpperCAmelCase = 0 _UpperCAmelCase = 0 _UpperCAmelCase = 1 # Calculating the hash of pattern and substring of text for i in range(_lowerCAmelCase ): _UpperCAmelCase = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus _UpperCAmelCase = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue _UpperCAmelCase = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash _UpperCAmelCase = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def __lowerCamelCase ( ) -> None: _UpperCAmelCase = "abc1abc12" _UpperCAmelCase = "alskfjaldsabc1abc1abc12k23adsfabcabc" _UpperCAmelCase = "alskfjaldsk23adsfabcabc" assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase ) and not rabin_karp(_lowerCAmelCase , _lowerCAmelCase ) # Test 2) _UpperCAmelCase = "ABABX" _UpperCAmelCase = "ABABZABABYABABX" assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase ) # Test 3) _UpperCAmelCase = "AAAB" _UpperCAmelCase = "ABAAAAAB" assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase ) # Test 4) _UpperCAmelCase = "abcdabcy" _UpperCAmelCase = "abcxabcdabxabcdabcdabcy" assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase ) # Test 5) _UpperCAmelCase = "Lü" _UpperCAmelCase = "Lüsai" assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase ) _UpperCAmelCase = "Lue" assert not rabin_karp(_lowerCAmelCase , _lowerCAmelCase ) print("Success." ) if __name__ == "__main__": test_rabin_karp()
684
1
import argparse import json import logging import os import sys from unittest.mock import patch from transformers.testing_utils import TestCasePlus, get_gpu_count, slow __lowerCAmelCase = [ os.path.join(os.path.dirname(__file__), dirname) for dirname in [ "text-classification", "language-modeling", "summarization", "token-classification", "question-answering", ] ] sys.path.extend(SRC_DIRS) if SRC_DIRS is not None: import run_clm_flax import run_flax_glue import run_flax_ner import run_mlm_flax import run_qa import run_summarization_flax import run_ta_mlm_flax logging.basicConfig(level=logging.DEBUG) __lowerCAmelCase = logging.getLogger() def __lowerCamelCase ( ) -> Optional[Any]: _UpperCAmelCase = argparse.ArgumentParser() parser.add_argument("-f" ) _UpperCAmelCase = parser.parse_args() return args.f def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase="eval" ) -> Optional[Any]: _UpperCAmelCase = os.path.join(_lowerCAmelCase , F'''{split}_results.json''' ) if os.path.exists(_lowerCAmelCase ): with open(_lowerCAmelCase , "r" ) as f: return json.load(_lowerCAmelCase ) raise ValueError(F'''can\'t find {path}''' ) __lowerCAmelCase = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class __SCREAMING_SNAKE_CASE ( lowercase): def UpperCAmelCase__ ( self : Tuple ): _UpperCAmelCase = self.get_auto_remove_tmp_dir() _UpperCAmelCase = F''' run_glue.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --eval_steps=2 --warmup_steps=2 --seed=42 --max_seq_length=128 '''.split() with patch.object(__UpperCamelCase , "argv" , __UpperCamelCase ): run_flax_glue.main() _UpperCAmelCase = get_results(__UpperCamelCase ) self.assertGreaterEqual(result["eval_accuracy"] , 0.75 ) @slow def UpperCAmelCase__ ( self : Optional[int] ): _UpperCAmelCase = self.get_auto_remove_tmp_dir() _UpperCAmelCase = F''' run_clm_flax.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --block_size 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir '''.split() with patch.object(__UpperCamelCase , "argv" , __UpperCamelCase ): run_clm_flax.main() _UpperCAmelCase = get_results(__UpperCamelCase ) self.assertLess(result["eval_perplexity"] , 100 ) @slow def UpperCAmelCase__ ( self : List[str] ): _UpperCAmelCase = self.get_auto_remove_tmp_dir() _UpperCAmelCase = F''' run_summarization.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --test_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=8 --do_train --do_eval --do_predict --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --predict_with_generate '''.split() with patch.object(__UpperCamelCase , "argv" , __UpperCamelCase ): run_summarization_flax.main() _UpperCAmelCase = get_results(__UpperCamelCase , split="test" ) self.assertGreaterEqual(result["test_rouge1"] , 10 ) self.assertGreaterEqual(result["test_rouge2"] , 2 ) self.assertGreaterEqual(result["test_rougeL"] , 7 ) self.assertGreaterEqual(result["test_rougeLsum"] , 7 ) @slow def UpperCAmelCase__ ( self : int ): _UpperCAmelCase = self.get_auto_remove_tmp_dir() _UpperCAmelCase = F''' run_mlm.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --overwrite_output_dir --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --logging_steps 2 --eval_steps 2 --do_train --do_eval --num_train_epochs=1 '''.split() with patch.object(__UpperCamelCase , "argv" , __UpperCamelCase ): run_mlm_flax.main() _UpperCAmelCase = get_results(__UpperCamelCase ) self.assertLess(result["eval_perplexity"] , 42 ) @slow def UpperCAmelCase__ ( self : List[str] ): _UpperCAmelCase = self.get_auto_remove_tmp_dir() _UpperCAmelCase = F''' run_t5_mlm_flax.py --model_name_or_path t5-small --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir '''.split() with patch.object(__UpperCamelCase , "argv" , __UpperCamelCase ): run_ta_mlm_flax.main() _UpperCAmelCase = get_results(__UpperCamelCase ) self.assertGreaterEqual(result["eval_accuracy"] , 0.42 ) @slow def UpperCAmelCase__ ( self : Optional[Any] ): # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu _UpperCAmelCase = 7 if get_gpu_count() > 1 else 2 _UpperCAmelCase = self.get_auto_remove_tmp_dir() _UpperCAmelCase = F''' run_flax_ner.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --overwrite_output_dir --do_train --do_eval --warmup_steps=2 --learning_rate=2e-4 --logging_steps 2 --eval_steps 2 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 '''.split() with patch.object(__UpperCamelCase , "argv" , __UpperCamelCase ): run_flax_ner.main() _UpperCAmelCase = get_results(__UpperCamelCase ) self.assertGreaterEqual(result["eval_accuracy"] , 0.75 ) self.assertGreaterEqual(result["eval_f1"] , 0.3 ) @slow def UpperCAmelCase__ ( self : List[str] ): _UpperCAmelCase = self.get_auto_remove_tmp_dir() _UpperCAmelCase = F''' run_qa.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=2 --do_train --do_eval --logging_steps 2 --eval_steps 2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 '''.split() with patch.object(__UpperCamelCase , "argv" , __UpperCamelCase ): run_qa.main() _UpperCAmelCase = get_results(__UpperCamelCase ) self.assertGreaterEqual(result["eval_f1"] , 30 ) self.assertGreaterEqual(result["eval_exact"] , 30 )
684
import itertools import os import random import tempfile import unittest import numpy as np from datasets import load_dataset from transformers import is_speech_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import WhisperFeatureExtractor if is_torch_available(): import torch __lowerCAmelCase = random.Random() def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=1.0 , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> List[str]: if rng is None: _UpperCAmelCase = global_rng _UpperCAmelCase = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class __SCREAMING_SNAKE_CASE ( unittest.TestCase): def __init__( self : Optional[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any]=7 , __UpperCamelCase : Union[str, Any]=400 , __UpperCamelCase : List[Any]=2_000 , __UpperCamelCase : Optional[Any]=10 , __UpperCamelCase : Optional[int]=160 , __UpperCamelCase : Any=8 , __UpperCamelCase : List[Any]=0.0 , __UpperCamelCase : Dict=4_000 , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : Tuple=True , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = min_seq_length _UpperCAmelCase = max_seq_length _UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) _UpperCAmelCase = padding_value _UpperCAmelCase = sampling_rate _UpperCAmelCase = return_attention_mask _UpperCAmelCase = do_normalize _UpperCAmelCase = feature_size _UpperCAmelCase = chunk_length _UpperCAmelCase = hop_length def UpperCAmelCase__ ( self : Optional[Any] ): return { "feature_size": self.feature_size, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : Tuple=False , __UpperCamelCase : Dict=False ): def _flatten(__UpperCamelCase : Any ): return list(itertools.chain(*__UpperCamelCase ) ) if equal_length: _UpperCAmelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size _UpperCAmelCase = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: _UpperCAmelCase = [np.asarray(__UpperCamelCase ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class __SCREAMING_SNAKE_CASE ( lowercase , unittest.TestCase): __SCREAMING_SNAKE_CASE : str = WhisperFeatureExtractor if is_speech_available() else None def UpperCAmelCase__ ( self : str ): _UpperCAmelCase = WhisperFeatureExtractionTester(self ) def UpperCAmelCase__ ( self : str ): _UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _UpperCAmelCase = feat_extract_first.save_pretrained(__UpperCamelCase )[0] check_json_file_has_correct_format(__UpperCamelCase ) _UpperCAmelCase = self.feature_extraction_class.from_pretrained(__UpperCamelCase ) _UpperCAmelCase = feat_extract_first.to_dict() _UpperCAmelCase = feat_extract_second.to_dict() _UpperCAmelCase = feat_extract_first.mel_filters _UpperCAmelCase = feat_extract_second.mel_filters self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase ) ) self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def UpperCAmelCase__ ( self : Union[str, Any] ): _UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _UpperCAmelCase = os.path.join(__UpperCamelCase , "feat_extract.json" ) feat_extract_first.to_json_file(__UpperCamelCase ) _UpperCAmelCase = self.feature_extraction_class.from_json_file(__UpperCamelCase ) _UpperCAmelCase = feat_extract_first.to_dict() _UpperCAmelCase = feat_extract_second.to_dict() _UpperCAmelCase = feat_extract_first.mel_filters _UpperCAmelCase = feat_extract_second.mel_filters self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase ) ) self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def UpperCAmelCase__ ( self : int ): # Tests that all call wrap to encode_plus and batch_encode_plus _UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 _UpperCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] _UpperCAmelCase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs] # Test feature size _UpperCAmelCase = feature_extractor(__UpperCamelCase , padding="max_length" , return_tensors="np" ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames ) self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size ) # Test not batched input _UpperCAmelCase = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features _UpperCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) ) # Test batched _UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features _UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ): self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. _UpperCAmelCase = [floats_list((1, x) )[0] for x in (800, 800, 800)] _UpperCAmelCase = np.asarray(__UpperCamelCase ) _UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features _UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ): self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) ) # Test truncation required _UpperCAmelCase = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )] _UpperCAmelCase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs] _UpperCAmelCase = [x[: feature_extractor.n_samples] for x in speech_inputs] _UpperCAmelCase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs_truncated] _UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features _UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ): self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) ) def UpperCAmelCase__ ( self : Union[str, Any] ): import torch _UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) _UpperCAmelCase = np.random.rand(100 , 32 ).astype(np.floataa ) _UpperCAmelCase = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: _UpperCAmelCase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) _UpperCAmelCase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : Tuple ): _UpperCAmelCase = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" ) # automatic decoding with librispeech _UpperCAmelCase = ds.sort("id" ).select(range(__UpperCamelCase ) )[:num_samples]["audio"] return [x["array"] for x in speech_samples] def UpperCAmelCase__ ( self : Tuple ): # fmt: off _UpperCAmelCase = torch.tensor( [ 0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951, 0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678, 0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554, -0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854 ] ) # fmt: on _UpperCAmelCase = self._load_datasamples(1 ) _UpperCAmelCase = WhisperFeatureExtractor() _UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="pt" ).input_features self.assertEqual(input_features.shape , (1, 80, 3_000) ) self.assertTrue(torch.allclose(input_features[0, 0, :30] , __UpperCamelCase , atol=1e-4 ) ) def UpperCAmelCase__ ( self : Optional[Any] ): _UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) _UpperCAmelCase = self._load_datasamples(1 )[0] _UpperCAmelCase = ((audio - audio.min()) / (audio.max() - audio.min())) * 65_535 # Rescale to [0, 65535] to show issue _UpperCAmelCase = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__UpperCamelCase )[0] self.assertTrue(np.all(np.mean(__UpperCamelCase ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(__UpperCamelCase ) - 1 ) < 1e-3 ) )
684
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = { "abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json", } class __SCREAMING_SNAKE_CASE ( lowercase): __SCREAMING_SNAKE_CASE : List[Any] = """gpt_neox_japanese""" def __init__( self : Optional[Any] , __UpperCamelCase : List[str]=32_000 , __UpperCamelCase : Any=2_560 , __UpperCamelCase : List[Any]=32 , __UpperCamelCase : int=32 , __UpperCamelCase : List[str]=4 , __UpperCamelCase : Tuple="gelu" , __UpperCamelCase : Union[str, Any]=1.00 , __UpperCamelCase : int=10_000 , __UpperCamelCase : Any=2_048 , __UpperCamelCase : str=0.02 , __UpperCamelCase : List[str]=1e-5 , __UpperCamelCase : Tuple=True , __UpperCamelCase : Union[str, Any]=31_996 , __UpperCamelCase : int=31_999 , __UpperCamelCase : Union[str, Any]=0.1 , __UpperCamelCase : str=0.0 , **__UpperCamelCase : Optional[int] , ): super().__init__(bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase ) _UpperCAmelCase = vocab_size _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_multiple_size _UpperCAmelCase = hidden_act _UpperCAmelCase = rotary_pct _UpperCAmelCase = rotary_emb_base _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = use_cache _UpperCAmelCase = attention_dropout _UpperCAmelCase = hidden_dropout
684
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..utils import cached_file # docstyle-ignore __lowerCAmelCase = "\nHuman: <<task>>\n\nAssistant: " __lowerCAmelCase = "huggingface-tools/default-prompts" __lowerCAmelCase = {"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"} def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase="run" ) -> Union[str, Any]: if prompt_or_repo_id is None: _UpperCAmelCase = DEFAULT_PROMPTS_REPO # prompt is considered a repo ID when it does not contain any kind of space if re.search("\\s" , _lowerCAmelCase ) is not None: return prompt_or_repo_id _UpperCAmelCase = cached_file( _lowerCAmelCase , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} ) with open(_lowerCAmelCase , "r" , encoding="utf-8" ) as f: return f.read()
684
1
import re from filelock import FileLock try: import nltk __lowerCAmelCase = True except (ImportError, ModuleNotFoundError): __lowerCAmelCase = False if NLTK_AVAILABLE: with FileLock(".lock") as lock: nltk.download("punkt", quiet=True) def __lowerCamelCase ( _lowerCAmelCase ) -> str: re.sub("<n>" , "" , _lowerCAmelCase ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(_lowerCAmelCase ) )
684
from itertools import permutations def __lowerCamelCase ( _lowerCAmelCase ) -> bool: if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False _UpperCAmelCase = [7, 11, 13, 17] for i, test in enumerate(_lowerCAmelCase ): if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def __lowerCamelCase ( _lowerCAmelCase = 10 ) -> int: return sum( int("".join(map(_lowerCAmelCase , _lowerCAmelCase ) ) ) for num in permutations(range(_lowerCAmelCase ) ) if is_substring_divisible(_lowerCAmelCase ) ) if __name__ == "__main__": print(F'''{solution() = }''')
684
1
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM @require_tf @require_sentencepiece @require_tokenizers class __SCREAMING_SNAKE_CASE ( unittest.TestCase): @slow def UpperCAmelCase__ ( self : Tuple ): _UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" ) _UpperCAmelCase = AutoTokenizer.from_pretrained("google/mt5-small" ) _UpperCAmelCase = tokenizer("Hello there" , return_tensors="tf" ).input_ids _UpperCAmelCase = tokenizer("Hi I am" , return_tensors="tf" ).input_ids _UpperCAmelCase = model(__UpperCamelCase , labels=__UpperCamelCase ).loss _UpperCAmelCase = -tf.math.reduce_mean(__UpperCamelCase ).numpy() _UpperCAmelCase = -21.228168 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
684
import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot import BlenderbotTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = { "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } __lowerCAmelCase = { "vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"}, "merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"}, "tokenizer_config_file": { "facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json" }, } __lowerCAmelCase = {"facebook/blenderbot-3B": 1_2_8} class __SCREAMING_SNAKE_CASE ( lowercase): __SCREAMING_SNAKE_CASE : Optional[int] = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE : str = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE : List[Any] = ["""input_ids""", """attention_mask"""] __SCREAMING_SNAKE_CASE : List[str] = BlenderbotTokenizer def __init__( self : Tuple , __UpperCamelCase : List[str]=None , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : List[str]=None , __UpperCamelCase : Union[str, Any]="replace" , __UpperCamelCase : Tuple="<s>" , __UpperCamelCase : str="</s>" , __UpperCamelCase : Dict="</s>" , __UpperCamelCase : Union[str, Any]="<s>" , __UpperCamelCase : Union[str, Any]="<unk>" , __UpperCamelCase : Tuple="<pad>" , __UpperCamelCase : Optional[int]="<mask>" , __UpperCamelCase : Union[str, Any]=False , __UpperCamelCase : List[str]=True , **__UpperCamelCase : int , ): super().__init__( __UpperCamelCase , __UpperCamelCase , tokenizer_file=__UpperCamelCase , errors=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , trim_offsets=__UpperCamelCase , **__UpperCamelCase , ) _UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , __UpperCamelCase ) != add_prefix_space: _UpperCAmelCase = getattr(__UpperCamelCase , pre_tok_state.pop("type" ) ) _UpperCAmelCase = add_prefix_space _UpperCAmelCase = pre_tok_class(**__UpperCamelCase ) _UpperCAmelCase = add_prefix_space _UpperCAmelCase = "post_processor" _UpperCAmelCase = getattr(self.backend_tokenizer , __UpperCamelCase , __UpperCamelCase ) if tokenizer_component_instance: _UpperCAmelCase = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: _UpperCAmelCase = tuple(state["sep"] ) if "cls" in state: _UpperCAmelCase = tuple(state["cls"] ) _UpperCAmelCase = False if state.get("add_prefix_space" , __UpperCamelCase ) != add_prefix_space: _UpperCAmelCase = add_prefix_space _UpperCAmelCase = True if state.get("trim_offsets" , __UpperCamelCase ) != trim_offsets: _UpperCAmelCase = trim_offsets _UpperCAmelCase = True if changes_to_apply: _UpperCAmelCase = getattr(__UpperCamelCase , state.pop("type" ) ) _UpperCAmelCase = component_class(**__UpperCamelCase ) setattr(self.backend_tokenizer , __UpperCamelCase , __UpperCamelCase ) @property # Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot def UpperCAmelCase__ ( self : Union[str, Any] ): if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Union[str, Any] ): _UpperCAmelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else value _UpperCAmelCase = value def UpperCAmelCase__ ( self : int , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : List[Any] ): _UpperCAmelCase = kwargs.get("is_split_into_words" , __UpperCamelCase ) assert self.add_prefix_space or not is_split_into_words, ( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__UpperCamelCase , **__UpperCamelCase ) def UpperCAmelCase__ ( self : Tuple , *__UpperCamelCase : int , **__UpperCamelCase : Union[str, Any] ): _UpperCAmelCase = kwargs.get("is_split_into_words" , __UpperCamelCase ) assert self.add_prefix_space or not is_split_into_words, ( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*__UpperCamelCase , **__UpperCamelCase ) def UpperCAmelCase__ ( self : str , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ): _UpperCAmelCase = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase ) return tuple(__UpperCamelCase ) def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ): _UpperCAmelCase = [self.sep_token_id] _UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ): return token_ids_a + [self.eos_token_id] def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : "Conversation" ): _UpperCAmelCase = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(" " + text ) else: # Generated responses should contain them already. inputs.append(__UpperCamelCase ) _UpperCAmelCase = " ".join(__UpperCamelCase ) _UpperCAmelCase = self.encode(__UpperCamelCase ) if len(__UpperCamelCase ) > self.model_max_length: _UpperCAmelCase = input_ids[-self.model_max_length :] logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' ) return input_ids
684
1
from __future__ import annotations import collections import pprint from pathlib import Path def __lowerCamelCase ( _lowerCAmelCase ) -> str: return "".join(sorted(_lowerCAmelCase ) ) def __lowerCamelCase ( _lowerCAmelCase ) -> list[str]: return word_by_signature[signature(_lowerCAmelCase )] __lowerCAmelCase = Path(__file__).parent.joinpath("words.txt").read_text(encoding="utf-8") __lowerCAmelCase = sorted({word.strip().lower() for word in data.splitlines()}) __lowerCAmelCase = collections.defaultdict(list) for word in word_list: word_by_signature[signature(word)].append(word) if __name__ == "__main__": __lowerCAmelCase = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} with open("anagrams.txt", "w") as file: file.write("all_anagrams = \n ") file.write(pprint.pformat(all_anagrams))
684
import argparse import torch from transformers import ( WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForAudioFrameClassification, WavaVecaForSequenceClassification, WavaVecaForXVector, logging, ) logging.set_verbosity_info() __lowerCAmelCase = logging.get_logger(__name__) def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: _UpperCAmelCase = WavaVecaForSequenceClassification.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase ) _UpperCAmelCase = downstream_dict["projector.weight"] _UpperCAmelCase = downstream_dict["projector.bias"] _UpperCAmelCase = downstream_dict["model.post_net.linear.weight"] _UpperCAmelCase = downstream_dict["model.post_net.linear.bias"] return model def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str: _UpperCAmelCase = WavaVecaForAudioFrameClassification.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase ) _UpperCAmelCase = downstream_dict["model.linear.weight"] _UpperCAmelCase = downstream_dict["model.linear.bias"] return model def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict: _UpperCAmelCase = WavaVecaForXVector.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase ) _UpperCAmelCase = downstream_dict["connector.weight"] _UpperCAmelCase = downstream_dict["connector.bias"] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): _UpperCAmelCase = downstream_dict[ F'''model.framelevel_feature_extractor.module.{i}.kernel.weight''' ] _UpperCAmelCase = downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias'''] _UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"] _UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"] _UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"] _UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"] _UpperCAmelCase = downstream_dict["objective.W"] return model @torch.no_grad() def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict: _UpperCAmelCase = torch.load(_lowerCAmelCase , map_location="cpu" ) _UpperCAmelCase = checkpoint["Downstream"] _UpperCAmelCase = WavaVecaConfig.from_pretrained(_lowerCAmelCase ) _UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained( _lowerCAmelCase , return_attention_mask=_lowerCAmelCase , do_normalize=_lowerCAmelCase ) _UpperCAmelCase = hf_config.architectures[0] if arch.endswith("ForSequenceClassification" ): _UpperCAmelCase = convert_classification(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) elif arch.endswith("ForAudioFrameClassification" ): _UpperCAmelCase = convert_diarization(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) elif arch.endswith("ForXVector" ): _UpperCAmelCase = convert_xvector(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) else: raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''' ) if hf_config.use_weighted_layer_sum: _UpperCAmelCase = checkpoint["Featurizer"]["weights"] hf_feature_extractor.save_pretrained(_lowerCAmelCase ) hf_model.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() parser.add_argument( "--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model." ) parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.") parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.") __lowerCAmelCase = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
684
1
from __future__ import annotations import time __lowerCAmelCase = list[tuple[int, int]] __lowerCAmelCase = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] __lowerCAmelCase = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right class __SCREAMING_SNAKE_CASE : def __init__( self : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : Node | None ): _UpperCAmelCase = pos_x _UpperCAmelCase = pos_y _UpperCAmelCase = (pos_y, pos_x) _UpperCAmelCase = goal_x _UpperCAmelCase = goal_y _UpperCAmelCase = parent class __SCREAMING_SNAKE_CASE : def __init__( self : str , __UpperCamelCase : tuple[int, int] , __UpperCamelCase : tuple[int, int] ): _UpperCAmelCase = Node(start[1] , start[0] , goal[1] , goal[0] , __UpperCamelCase ) _UpperCAmelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , __UpperCamelCase ) _UpperCAmelCase = [self.start] _UpperCAmelCase = False def UpperCAmelCase__ ( self : Any ): while self.node_queue: _UpperCAmelCase = self.node_queue.pop(0 ) if current_node.pos == self.target.pos: _UpperCAmelCase = True return self.retrace_path(__UpperCamelCase ) _UpperCAmelCase = self.get_successors(__UpperCamelCase ) for node in successors: self.node_queue.append(__UpperCamelCase ) if not self.reached: return [self.start.pos] return None def UpperCAmelCase__ ( self : int , __UpperCamelCase : Node ): _UpperCAmelCase = [] for action in delta: _UpperCAmelCase = parent.pos_x + action[1] _UpperCAmelCase = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__UpperCamelCase ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node(__UpperCamelCase , __UpperCamelCase , self.target.pos_y , self.target.pos_x , __UpperCamelCase ) ) return successors def UpperCAmelCase__ ( self : Optional[int] , __UpperCamelCase : Node | None ): _UpperCAmelCase = node _UpperCAmelCase = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) _UpperCAmelCase = current_node.parent path.reverse() return path class __SCREAMING_SNAKE_CASE : def __init__( self : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] ): _UpperCAmelCase = BreadthFirstSearch(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = BreadthFirstSearch(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = False def UpperCAmelCase__ ( self : Union[str, Any] ): while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue: _UpperCAmelCase = self.fwd_bfs.node_queue.pop(0 ) _UpperCAmelCase = self.bwd_bfs.node_queue.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: _UpperCAmelCase = True return self.retrace_bidirectional_path( __UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = current_bwd_node _UpperCAmelCase = current_fwd_node _UpperCAmelCase = { self.fwd_bfs: self.fwd_bfs.get_successors(__UpperCamelCase ), self.bwd_bfs: self.bwd_bfs.get_successors(__UpperCamelCase ), } for bfs in [self.fwd_bfs, self.bwd_bfs]: for node in successors[bfs]: bfs.node_queue.append(__UpperCamelCase ) if not self.reached: return [self.fwd_bfs.start.pos] return None def UpperCAmelCase__ ( self : List[Any] , __UpperCamelCase : Node , __UpperCamelCase : Node ): _UpperCAmelCase = self.fwd_bfs.retrace_path(__UpperCamelCase ) _UpperCAmelCase = self.bwd_bfs.retrace_path(__UpperCamelCase ) bwd_path.pop() bwd_path.reverse() _UpperCAmelCase = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] import doctest doctest.testmod() __lowerCAmelCase = (0, 0) __lowerCAmelCase = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) __lowerCAmelCase = time.time() __lowerCAmelCase = BreadthFirstSearch(init, goal) __lowerCAmelCase = bfs.search() __lowerCAmelCase = time.time() - start_bfs_time print("Unidirectional BFS computation time : ", bfs_time) __lowerCAmelCase = time.time() __lowerCAmelCase = BidirectionalBreadthFirstSearch(init, goal) __lowerCAmelCase = bd_bfs.search() __lowerCAmelCase = time.time() - start_bd_bfs_time print("Bidirectional BFS computation time : ", bd_bfs_time)
684
def __lowerCamelCase ( _lowerCAmelCase ) -> str: _UpperCAmelCase = [] _UpperCAmelCase = set({"(", "[", "{"} ) _UpperCAmelCase = set({")", "]", "}"} ) _UpperCAmelCase = {"{": "}", "[": "]", "(": ")"} for i in range(len(_lowerCAmelCase ) ): if s[i] in open_brackets: stack.append(s[i] ) elif s[i] in closed_brackets and ( len(_lowerCAmelCase ) == 0 or (len(_lowerCAmelCase ) > 0 and open_to_closed[stack.pop()] != s[i]) ): return False return len(_lowerCAmelCase ) == 0 def __lowerCamelCase ( ) -> str: _UpperCAmelCase = input("Enter sequence of brackets: " ) if is_balanced(_lowerCAmelCase ): print(_lowerCAmelCase , "is balanced" ) else: print(_lowerCAmelCase , "is not balanced" ) if __name__ == "__main__": main()
684
1
import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL __lowerCAmelCase = logging.get_logger(__name__) def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple[int, int]: def constraint_to_multiple_of(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=0 , _lowerCAmelCase=None ): _UpperCAmelCase = round(val / multiple ) * multiple if max_val is not None and x > max_val: _UpperCAmelCase = math.floor(val / multiple ) * multiple if x < min_val: _UpperCAmelCase = math.ceil(val / multiple ) * multiple return x _UpperCAmelCase = (output_size, output_size) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else output_size _UpperCAmelCase , _UpperCAmelCase = get_image_size(_lowerCAmelCase ) _UpperCAmelCase , _UpperCAmelCase = output_size # determine new height and width _UpperCAmelCase = output_height / input_height _UpperCAmelCase = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width _UpperCAmelCase = scale_width else: # fit height _UpperCAmelCase = scale_height _UpperCAmelCase = constraint_to_multiple_of(scale_height * input_height , multiple=_lowerCAmelCase ) _UpperCAmelCase = constraint_to_multiple_of(scale_width * input_width , multiple=_lowerCAmelCase ) return (new_height, new_width) class __SCREAMING_SNAKE_CASE ( lowercase): __SCREAMING_SNAKE_CASE : str = ["""pixel_values"""] def __init__( self : Any , __UpperCamelCase : bool = True , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , __UpperCamelCase : bool = False , __UpperCamelCase : int = 1 , __UpperCamelCase : bool = True , __UpperCamelCase : Union[int, float] = 1 / 255 , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , **__UpperCamelCase : Tuple , ): super().__init__(**__UpperCamelCase ) _UpperCAmelCase = size if size is not None else {"height": 384, "width": 384} _UpperCAmelCase = get_size_dict(__UpperCamelCase ) _UpperCAmelCase = do_resize _UpperCAmelCase = size _UpperCAmelCase = keep_aspect_ratio _UpperCAmelCase = ensure_multiple_of _UpperCAmelCase = resample _UpperCAmelCase = do_rescale _UpperCAmelCase = rescale_factor _UpperCAmelCase = do_normalize _UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD def UpperCAmelCase__ ( self : List[Any] , __UpperCamelCase : np.ndarray , __UpperCamelCase : Dict[str, int] , __UpperCamelCase : bool = False , __UpperCamelCase : int = 1 , __UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : Optional[int] , ): _UpperCAmelCase = get_size_dict(__UpperCamelCase ) if "height" not in size or "width" not in size: raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' ) _UpperCAmelCase = get_resize_output_image_size( __UpperCamelCase , output_size=(size["height"], size["width"]) , keep_aspect_ratio=__UpperCamelCase , multiple=__UpperCamelCase , ) return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase ) def UpperCAmelCase__ ( self : Optional[int] , __UpperCamelCase : np.ndarray , __UpperCamelCase : Union[int, float] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : List[str] , ): return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase ) def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : np.ndarray , __UpperCamelCase : Union[float, List[float]] , __UpperCamelCase : Union[float, List[float]] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : Union[str, Any] , ): return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase ) def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : ImageInput , __UpperCamelCase : bool = None , __UpperCamelCase : int = None , __UpperCamelCase : bool = None , __UpperCamelCase : int = None , __UpperCamelCase : PILImageResampling = None , __UpperCamelCase : bool = None , __UpperCamelCase : float = None , __UpperCamelCase : bool = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[str, TensorType]] = None , __UpperCamelCase : ChannelDimension = ChannelDimension.FIRST , **__UpperCamelCase : str , ): _UpperCAmelCase = do_resize if do_resize is not None else self.do_resize _UpperCAmelCase = size if size is not None else self.size _UpperCAmelCase = get_size_dict(__UpperCamelCase ) _UpperCAmelCase = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio _UpperCAmelCase = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of _UpperCAmelCase = resample if resample is not None else self.resample _UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale _UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor _UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize _UpperCAmelCase = image_mean if image_mean is not None else self.image_mean _UpperCAmelCase = image_std if image_std is not None else self.image_std _UpperCAmelCase = make_list_of_images(__UpperCamelCase ) if not valid_images(__UpperCamelCase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. _UpperCAmelCase = [to_numpy_array(__UpperCamelCase ) for image in images] if do_resize: _UpperCAmelCase = [self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase ) for image in images] if do_rescale: _UpperCAmelCase = [self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase ) for image in images] if do_normalize: _UpperCAmelCase = [self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase ) for image in images] _UpperCAmelCase = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase ) for image in images] _UpperCAmelCase = {"pixel_values": images} return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase ) def UpperCAmelCase__ ( self : Any , __UpperCamelCase : List[str] , __UpperCamelCase : List[Tuple] = None ): _UpperCAmelCase = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(__UpperCamelCase ) != len(__UpperCamelCase ): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) if is_torch_tensor(__UpperCamelCase ): _UpperCAmelCase = target_sizes.numpy() _UpperCAmelCase = [] for idx in range(len(__UpperCamelCase ) ): _UpperCAmelCase = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=__UpperCamelCase ) _UpperCAmelCase = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(__UpperCamelCase ) else: _UpperCAmelCase = logits.argmax(dim=1 ) _UpperCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
684
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> tuple[float, float]: # Check if the input is valid if not len(_lowerCAmelCase ) == len(_lowerCAmelCase ) == 3: raise ValueError("Please enter a valid equation." ) if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0: raise ValueError("Both a & b of two equations can't be zero." ) # Extract the coefficients _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = equationa _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = equationa # Calculate the determinants of the matrices _UpperCAmelCase = aa * ba - aa * ba _UpperCAmelCase = ca * ba - ca * ba _UpperCAmelCase = aa * ca - aa * ca # Check if the system of linear equations has a solution (using Cramer's rule) if determinant == 0: if determinant_x == determinant_y == 0: raise ValueError("Infinite solutions. (Consistent system)" ) else: raise ValueError("No solution. (Inconsistent system)" ) else: if determinant_x == determinant_y == 0: # Trivial solution (Inconsistent system) return (0.0, 0.0) else: _UpperCAmelCase = determinant_x / determinant _UpperCAmelCase = determinant_y / determinant # Non-Trivial Solution (Consistent system) return (x, y)
684
1
def __lowerCamelCase ( ) -> int: return 1 def __lowerCamelCase ( _lowerCAmelCase ) -> int: return 0 if x < 0 else two_pence(x - 2 ) + one_pence() def __lowerCamelCase ( _lowerCAmelCase ) -> int: return 0 if x < 0 else five_pence(x - 5 ) + two_pence(_lowerCAmelCase ) def __lowerCamelCase ( _lowerCAmelCase ) -> int: return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(_lowerCAmelCase ) def __lowerCamelCase ( _lowerCAmelCase ) -> int: return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(_lowerCAmelCase ) def __lowerCamelCase ( _lowerCAmelCase ) -> int: return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(_lowerCAmelCase ) def __lowerCamelCase ( _lowerCAmelCase ) -> int: return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(_lowerCAmelCase ) def __lowerCamelCase ( _lowerCAmelCase ) -> int: return 0 if x < 0 else two_pound(x - 200 ) + one_pound(_lowerCAmelCase ) def __lowerCamelCase ( _lowerCAmelCase = 200 ) -> int: return two_pound(_lowerCAmelCase ) if __name__ == "__main__": print(solution(int(input().strip())))
684
import argparse import torch from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert from transformers.utils import logging logging.set_verbosity_info() def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: # Initialise PyTorch model _UpperCAmelCase = RemBertConfig.from_json_file(_lowerCAmelCase ) print("Building PyTorch model from configuration: {}".format(str(_lowerCAmelCase ) ) ) _UpperCAmelCase = RemBertModel(_lowerCAmelCase ) # Load weights from tf checkpoint load_tf_weights_in_rembert(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # Save pytorch-model print("Save PyTorch model to {}".format(_lowerCAmelCase ) ) torch.save(model.state_dict() , _lowerCAmelCase ) if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--rembert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained RemBERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) __lowerCAmelCase = parser.parse_args() convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
684
1
import argparse import json from typing import List from ltp import LTP from transformers import BertTokenizer def __lowerCamelCase ( _lowerCAmelCase ) -> Any: # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4E00 and cp <= 0x9FFF) or (cp >= 0x3400 and cp <= 0x4DBF) # or (cp >= 0x20000 and cp <= 0x2A6DF) # or (cp >= 0x2A700 and cp <= 0x2B73F) # or (cp >= 0x2B740 and cp <= 0x2B81F) # or (cp >= 0x2B820 and cp <= 0x2CEAF) # or (cp >= 0xF900 and cp <= 0xFAFF) or (cp >= 0x2F800 and cp <= 0x2FA1F) # ): # return True return False def __lowerCamelCase ( _lowerCAmelCase ) -> List[Any]: # word like '180' or '身高' or '神' for char in word: _UpperCAmelCase = ord(_lowerCAmelCase ) if not _is_chinese_char(_lowerCAmelCase ): return 0 return 1 def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]: _UpperCAmelCase = set() for token in tokens: _UpperCAmelCase = len(_lowerCAmelCase ) > 1 and is_chinese(_lowerCAmelCase ) if chinese_word: word_set.add(_lowerCAmelCase ) _UpperCAmelCase = list(_lowerCAmelCase ) return word_list def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> str: if not chinese_word_set: return bert_tokens _UpperCAmelCase = max([len(_lowerCAmelCase ) for w in chinese_word_set] ) _UpperCAmelCase = bert_tokens _UpperCAmelCase , _UpperCAmelCase = 0, len(_lowerCAmelCase ) while start < end: _UpperCAmelCase = True if is_chinese(bert_word[start] ): _UpperCAmelCase = min(end - start , _lowerCAmelCase ) for i in range(_lowerCAmelCase , 1 , -1 ): _UpperCAmelCase = "".join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): _UpperCAmelCase = "##" + bert_word[j] _UpperCAmelCase = start + i _UpperCAmelCase = False break if single_word: start += 1 return bert_word def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]: _UpperCAmelCase = [] for i in range(0 , len(_lowerCAmelCase ) , 100 ): _UpperCAmelCase = ltp_tokenizer.seg(lines[i : i + 100] )[0] _UpperCAmelCase = [get_chinese_word(_lowerCAmelCase ) for r in res] ltp_res.extend(_lowerCAmelCase ) assert len(_lowerCAmelCase ) == len(_lowerCAmelCase ) _UpperCAmelCase = [] for i in range(0 , len(_lowerCAmelCase ) , 100 ): _UpperCAmelCase = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_lowerCAmelCase , truncation=_lowerCAmelCase , max_length=512 ) bert_res.extend(res["input_ids"] ) assert len(_lowerCAmelCase ) == len(_lowerCAmelCase ) _UpperCAmelCase = [] for input_ids, chinese_word in zip(_lowerCAmelCase , _lowerCAmelCase ): _UpperCAmelCase = [] for id in input_ids: _UpperCAmelCase = bert_tokenizer._convert_id_to_token(_lowerCAmelCase ) input_tokens.append(_lowerCAmelCase ) _UpperCAmelCase = add_sub_symbol(_lowerCAmelCase , _lowerCAmelCase ) _UpperCAmelCase = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(_lowerCAmelCase ): if token[:2] == "##": _UpperCAmelCase = token[2:] # save chinese tokens' pos if len(_lowerCAmelCase ) == 1 and _is_chinese_char(ord(_lowerCAmelCase ) ): ref_id.append(_lowerCAmelCase ) ref_ids.append(_lowerCAmelCase ) assert len(_lowerCAmelCase ) == len(_lowerCAmelCase ) return ref_ids def __lowerCamelCase ( _lowerCAmelCase ) -> Tuple: # For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm) # If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp) with open(args.file_name , "r" , encoding="utf-8" ) as f: _UpperCAmelCase = f.readlines() _UpperCAmelCase = [line.strip() for line in data if len(_lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' _UpperCAmelCase = LTP(args.ltp ) # faster in GPU device _UpperCAmelCase = BertTokenizer.from_pretrained(args.bert ) _UpperCAmelCase = prepare_ref(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) with open(args.save_path , "w" , encoding="utf-8" ) as f: _UpperCAmelCase = [json.dumps(_lowerCAmelCase ) + "\n" for ref in ref_ids] f.writelines(_lowerCAmelCase ) if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser(description="prepare_chinese_ref") parser.add_argument( "--file_name", type=str, default="./resources/chinese-demo.txt", help="file need process, same as training data in lm", ) parser.add_argument( "--ltp", type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path" ) parser.add_argument("--bert", type=str, default="./resources/robert", help="resources for Bert tokenizer") parser.add_argument("--save_path", type=str, default="./resources/ref.txt", help="path to save res") __lowerCAmelCase = parser.parse_args() main(args)
684
import unittest from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __SCREAMING_SNAKE_CASE : @staticmethod def UpperCAmelCase__ ( *__UpperCamelCase : Dict , **__UpperCamelCase : Optional[int] ): pass @is_pipeline_test @require_torch @require_vision class __SCREAMING_SNAKE_CASE ( unittest.TestCase): __SCREAMING_SNAKE_CASE : List[Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] ): _UpperCAmelCase = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" ) _UpperCAmelCase = [ { "image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ), "question": "How many cats are there?", }, { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "question": "How many cats are there?", }, ] return vqa_pipeline, examples def UpperCAmelCase__ ( self : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] ): _UpperCAmelCase = vqa_pipeline(__UpperCamelCase , top_k=1 ) self.assertEqual( __UpperCamelCase , [ [{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}], [{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}], ] , ) @require_torch def UpperCAmelCase__ ( self : List[Any] ): _UpperCAmelCase = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" ) _UpperCAmelCase = "./tests/fixtures/tests_samples/COCO/000000039769.png" _UpperCAmelCase = "How many cats are there?" _UpperCAmelCase = vqa_pipeline(image=__UpperCamelCase , question="How many cats are there?" , top_k=2 ) self.assertEqual( __UpperCamelCase , [{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}, {"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}] ) _UpperCAmelCase = vqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual( __UpperCamelCase , [{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}, {"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}] ) @slow @require_torch def UpperCAmelCase__ ( self : List[str] ): _UpperCAmelCase = pipeline("visual-question-answering" , model="dandelin/vilt-b32-finetuned-vqa" ) _UpperCAmelCase = "./tests/fixtures/tests_samples/COCO/000000039769.png" _UpperCAmelCase = "How many cats are there?" _UpperCAmelCase = vqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 ) self.assertEqual( nested_simplify(__UpperCamelCase , decimals=4 ) , [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] ) _UpperCAmelCase = vqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(__UpperCamelCase , decimals=4 ) , [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] ) _UpperCAmelCase = vqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 ) self.assertEqual( nested_simplify(__UpperCamelCase , decimals=4 ) , [[{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}]] * 2 , ) @require_tf @unittest.skip("Visual question answering not implemented in TF" ) def UpperCAmelCase__ ( self : Optional[int] ): pass
684
1
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = "▁" __lowerCAmelCase = {"vocab_file": "sentencepiece.bpe.model"} __lowerCAmelCase = { "vocab_file": { "facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model", } } __lowerCAmelCase = { "facebook/xglm-564M": 2_0_4_8, } class __SCREAMING_SNAKE_CASE ( lowercase): __SCREAMING_SNAKE_CASE : Optional[int] = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE : int = ["""input_ids""", """attention_mask"""] def __init__( self : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : int="<s>" , __UpperCamelCase : Dict="</s>" , __UpperCamelCase : List[Any]="</s>" , __UpperCamelCase : List[str]="<s>" , __UpperCamelCase : str="<unk>" , __UpperCamelCase : Dict="<pad>" , __UpperCamelCase : Optional[Dict[str, Any]] = None , **__UpperCamelCase : List[Any] , ): _UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs # Compatibility with the original tokenizer _UpperCAmelCase = 7 _UpperCAmelCase = [F'''<madeupword{i}>''' for i in range(self.num_madeup_words )] _UpperCAmelCase = kwargs.get("additional_special_tokens" , [] ) kwargs["additional_special_tokens"] += [ word for word in madeup_words if word not in kwargs["additional_special_tokens"] ] super().__init__( bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , pad_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , ) _UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__UpperCamelCase ) ) _UpperCAmelCase = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab _UpperCAmelCase = 1 # Mimic fairseq token-to-id alignment for the first 4 token _UpperCAmelCase = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3} _UpperCAmelCase = len(self.sp_model ) _UpperCAmelCase = {F'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )} self.fairseq_tokens_to_ids.update(__UpperCamelCase ) _UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : Any ): _UpperCAmelCase = self.__dict__.copy() _UpperCAmelCase = None _UpperCAmelCase = self.sp_model.serialized_model_proto() return state def __setstate__( self : List[Any] , __UpperCamelCase : Optional[int] ): _UpperCAmelCase = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): _UpperCAmelCase = {} _UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ): if token_ids_a is None: return [self.sep_token_id] + token_ids_a _UpperCAmelCase = [self.sep_token_id] return sep + token_ids_a + sep + sep + token_ids_a def UpperCAmelCase__ ( self : Any , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None , __UpperCamelCase : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase ) if token_ids_a is None: return [1] + ([0] * len(__UpperCamelCase )) return [1] + ([0] * len(__UpperCamelCase )) + [1, 1] + ([0] * len(__UpperCamelCase )) def UpperCAmelCase__ ( self : str , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ): _UpperCAmelCase = [self.sep_token_id] if token_ids_a is None: return len(sep + token_ids_a ) * [0] return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0] @property def UpperCAmelCase__ ( self : Any ): return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words def UpperCAmelCase__ ( self : Tuple ): _UpperCAmelCase = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : str ): return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase ) def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Dict ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] _UpperCAmelCase = self.sp_model.PieceToId(__UpperCamelCase ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : Optional[Any] ): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def UpperCAmelCase__ ( self : Any , __UpperCamelCase : Any ): _UpperCAmelCase = "".join(__UpperCamelCase ).replace(__UpperCamelCase , " " ).strip() return out_string def UpperCAmelCase__ ( self : str , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ): if not os.path.isdir(__UpperCamelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return _UpperCAmelCase = os.path.join( __UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __UpperCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCamelCase , "wb" ) as fi: _UpperCAmelCase = self.sp_model.serialized_model_proto() fi.write(__UpperCamelCase ) return (out_vocab_file,)
684
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, ) else: from .modeling_text_unet import UNetFlatConditionModel from .pipeline_versatile_diffusion import VersatileDiffusionPipeline from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
684
1
import argparse import math import os from copy import deepcopy import torch from audio_diffusion.models import DiffusionAttnUnetaD from diffusion import sampling from torch import nn from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel __lowerCAmelCase = { "gwf-440k": { "url": "https://model-server.zqevans2.workers.dev/gwf-440k.ckpt", "sample_rate": 4_8_0_0_0, "sample_size": 6_5_5_3_6, }, "jmann-small-190k": { "url": "https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt", "sample_rate": 4_8_0_0_0, "sample_size": 6_5_5_3_6, }, "jmann-large-580k": { "url": "https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt", "sample_rate": 4_8_0_0_0, "sample_size": 1_3_1_0_7_2, }, "maestro-uncond-150k": { "url": "https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt", "sample_rate": 1_6_0_0_0, "sample_size": 6_5_5_3_6, }, "unlocked-uncond-250k": { "url": "https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt", "sample_rate": 1_6_0_0_0, "sample_size": 6_5_5_3_6, }, "honk-140k": { "url": "https://model-server.zqevans2.workers.dev/honk-140k.ckpt", "sample_rate": 1_6_0_0_0, "sample_size": 6_5_5_3_6, }, } def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: return torch.atana(_lowerCAmelCase , _lowerCAmelCase ) / math.pi * 2 def __lowerCamelCase ( _lowerCAmelCase ) -> Union[str, Any]: _UpperCAmelCase = torch.sin(t * math.pi / 2 ) ** 2 _UpperCAmelCase = (1 - sigma**2) ** 0.5 return alpha_sigma_to_t(_lowerCAmelCase , _lowerCAmelCase ) class __SCREAMING_SNAKE_CASE ( lowercase): pass class __SCREAMING_SNAKE_CASE ( nn.Module): def __init__( self : str , __UpperCamelCase : Optional[int] ): super().__init__() _UpperCAmelCase = DiffusionAttnUnetaD(__UpperCamelCase , n_attn_layers=4 ) _UpperCAmelCase = deepcopy(self.diffusion ) _UpperCAmelCase = torch.quasirandom.SobolEngine(1 , scramble=__UpperCamelCase ) def __lowerCamelCase ( _lowerCAmelCase ) -> int: _UpperCAmelCase = MODELS_MAP[model_name]["url"] os.system(F'''wget {url} ./''' ) return F'''./{model_name}.ckpt''' __lowerCAmelCase = { "1": "resnets.0", "2": "attentions.0", "3": "resnets.1", "4": "attentions.1", "5": "resnets.2", "6": "attentions.2", } __lowerCAmelCase = { "8": "resnets.0", "9": "attentions.0", "10": "resnets.1", "11": "attentions.1", "12": "resnets.2", "13": "attentions.2", } __lowerCAmelCase = { "1": "resnets.0", "2": "attentions.0", "3": "resnets.1", "4": "attentions.1", "5": "resnets.2", "6": "attentions.2", "8": "resnets.3", "9": "attentions.3", "10": "resnets.4", "11": "attentions.4", "12": "resnets.5", "13": "attentions.5", } __lowerCAmelCase = { "0": "resnets.0", "1": "resnets.1", "2": "resnets.2", "4": "resnets.0", "5": "resnets.1", "6": "resnets.2", } __lowerCAmelCase = { "skip": "conv_skip", "main.0": "conv_1", "main.1": "group_norm_1", "main.3": "conv_2", "main.4": "group_norm_2", } __lowerCAmelCase = { "norm": "group_norm", "qkv_proj": ["query", "key", "value"], "out_proj": ["proj_attn"], } def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]: if name.startswith("skip" ): return name.replace("skip" , RES_CONV_MAP["skip"] ) # name has to be of format main.{digit} if not name.startswith("main." ): raise ValueError(F'''ResConvBlock error with {name}''' ) return name.replace(name[:6] , RES_CONV_MAP[name[:6]] ) def __lowerCamelCase ( _lowerCAmelCase ) -> Optional[Any]: for key, value in ATTN_MAP.items(): if name.startswith(_lowerCAmelCase ) and not isinstance(_lowerCAmelCase , _lowerCAmelCase ): return name.replace(_lowerCAmelCase , _lowerCAmelCase ) elif name.startswith(_lowerCAmelCase ): return [name.replace(_lowerCAmelCase , _lowerCAmelCase ) for v in value] raise ValueError(F'''Attn error with {name}''' ) def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=13 ) -> List[Any]: _UpperCAmelCase = input_string if string.split("." )[0] == "timestep_embed": return string.replace("timestep_embed" , "time_proj" ) _UpperCAmelCase = 0 if string.startswith("net.3." ): depth += 1 _UpperCAmelCase = string[6:] elif string.startswith("net." ): _UpperCAmelCase = string[4:] while string.startswith("main.7." ): depth += 1 _UpperCAmelCase = string[7:] if string.startswith("main." ): _UpperCAmelCase = string[5:] # mid block if string[:2].isdigit(): _UpperCAmelCase = string[:2] _UpperCAmelCase = string[2:] else: _UpperCAmelCase = string[0] _UpperCAmelCase = string[1:] if depth == max_depth: _UpperCAmelCase = MID_NUM_TO_LAYER[layer_num] _UpperCAmelCase = "mid_block" elif depth > 0 and int(_lowerCAmelCase ) < 7: _UpperCAmelCase = DOWN_NUM_TO_LAYER[layer_num] _UpperCAmelCase = F'''down_blocks.{depth}''' elif depth > 0 and int(_lowerCAmelCase ) > 7: _UpperCAmelCase = UP_NUM_TO_LAYER[layer_num] _UpperCAmelCase = F'''up_blocks.{max_depth - depth - 1}''' elif depth == 0: _UpperCAmelCase = DEPTH_0_TO_LAYER[layer_num] _UpperCAmelCase = F'''up_blocks.{max_depth - 1}''' if int(_lowerCAmelCase ) > 3 else "down_blocks.0" if not string_left.startswith("." ): raise ValueError(F'''Naming error with {input_string} and string_left: {string_left}.''' ) _UpperCAmelCase = string_left[1:] if "resnets" in new_layer: _UpperCAmelCase = convert_resconv_naming(_lowerCAmelCase ) elif "attentions" in new_layer: _UpperCAmelCase = convert_attn_naming(_lowerCAmelCase ) _UpperCAmelCase = new_string_left if not isinstance(_lowerCAmelCase , _lowerCAmelCase ): _UpperCAmelCase = prefix + "." + new_layer + "." + string_left else: _UpperCAmelCase = [prefix + "." + new_layer + "." + s for s in string_left] return new_string def __lowerCamelCase ( _lowerCAmelCase ) -> Optional[int]: _UpperCAmelCase = {} for k, v in state_dict.items(): if k.endswith("kernel" ): # up- and downsample layers, don't have trainable weights continue _UpperCAmelCase = rename(_lowerCAmelCase ) # check if we need to transform from Conv => Linear for attention if isinstance(_lowerCAmelCase , _lowerCAmelCase ): _UpperCAmelCase = transform_conv_attns(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) else: _UpperCAmelCase = v return new_state_dict def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]: if len(_lowerCAmelCase ) == 1: if len(v.shape ) == 3: # weight _UpperCAmelCase = v[:, :, 0] else: # bias _UpperCAmelCase = v else: # qkv matrices _UpperCAmelCase = v.shape[0] _UpperCAmelCase = trippled_shape // 3 for i in range(3 ): if len(v.shape ) == 3: _UpperCAmelCase = v[i * single_shape : (i + 1) * single_shape, :, 0] else: _UpperCAmelCase = v[i * single_shape : (i + 1) * single_shape] return new_state_dict def __lowerCamelCase ( _lowerCAmelCase ) -> Tuple: _UpperCAmelCase = torch.device("cuda" if torch.cuda.is_available() else "cpu" ) _UpperCAmelCase = args.model_path.split("/" )[-1].split("." )[0] if not os.path.isfile(args.model_path ): assert ( model_name == args.model_path ), F'''Make sure to provide one of the official model names {MODELS_MAP.keys()}''' _UpperCAmelCase = download(_lowerCAmelCase ) _UpperCAmelCase = MODELS_MAP[model_name]["sample_rate"] _UpperCAmelCase = MODELS_MAP[model_name]["sample_size"] _UpperCAmelCase = Object() _UpperCAmelCase = sample_size _UpperCAmelCase = sample_rate _UpperCAmelCase = 0 _UpperCAmelCase = UNetaDModel(sample_size=_lowerCAmelCase , sample_rate=_lowerCAmelCase ) _UpperCAmelCase = diffusers_model.state_dict() _UpperCAmelCase = DiffusionUncond(_lowerCAmelCase ) orig_model.load_state_dict(torch.load(args.model_path , map_location=_lowerCAmelCase )["state_dict"] ) _UpperCAmelCase = orig_model.diffusion_ema.eval() _UpperCAmelCase = orig_model.state_dict() _UpperCAmelCase = rename_orig_weights(_lowerCAmelCase ) _UpperCAmelCase = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() ) _UpperCAmelCase = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() ) assert len(_lowerCAmelCase ) == 0, F'''Problem with {renamed_minus_diffusers}''' assert all(k.endswith("kernel" ) for k in list(_lowerCAmelCase ) ), F'''Problem with {diffusers_minus_renamed}''' for key, value in renamed_state_dict.items(): assert ( diffusers_state_dict[key].squeeze().shape == value.squeeze().shape ), F'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}''' if key == "time_proj.weight": _UpperCAmelCase = value.squeeze() _UpperCAmelCase = value diffusers_model.load_state_dict(_lowerCAmelCase ) _UpperCAmelCase = 100 _UpperCAmelCase = 33 _UpperCAmelCase = IPNDMScheduler(num_train_timesteps=_lowerCAmelCase ) _UpperCAmelCase = torch.manual_seed(_lowerCAmelCase ) _UpperCAmelCase = torch.randn([1, 2, config.sample_size] , generator=_lowerCAmelCase ).to(_lowerCAmelCase ) _UpperCAmelCase = torch.linspace(1 , 0 , steps + 1 , device=_lowerCAmelCase )[:-1] _UpperCAmelCase = get_crash_schedule(_lowerCAmelCase ) _UpperCAmelCase = DanceDiffusionPipeline(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase ) _UpperCAmelCase = torch.manual_seed(33 ) _UpperCAmelCase = pipe(num_inference_steps=_lowerCAmelCase , generator=_lowerCAmelCase ).audios _UpperCAmelCase = sampling.iplms_sample(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , {} ) _UpperCAmelCase = generated.clamp(-1 , 1 ) _UpperCAmelCase = (generated - audio).abs().sum() _UpperCAmelCase = (generated - audio).abs().max() if args.save: pipe.save_pretrained(args.checkpoint_path ) print("Diff sum" , _lowerCAmelCase ) print("Diff max" , _lowerCAmelCase ) assert diff_max < 1E-3, F'''Diff max: {diff_max} is too much :-/''' print(F'''Conversion for {model_name} successful!''' ) if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.") parser.add_argument( "--save", default=True, type=bool, required=False, help="Whether to save the converted model or not." ) parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.") __lowerCAmelCase = parser.parse_args() main(args)
684
import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class __SCREAMING_SNAKE_CASE ( lowercase): __SCREAMING_SNAKE_CASE : str = (UniPCMultistepScheduler,) __SCREAMING_SNAKE_CASE : Dict = (("""num_inference_steps""", 25),) def UpperCAmelCase__ ( self : str , **__UpperCamelCase : Any ): _UpperCAmelCase = { "num_train_timesteps": 1_000, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", "solver_order": 2, "solver_type": "bh2", } config.update(**__UpperCamelCase ) return config def UpperCAmelCase__ ( self : int , __UpperCamelCase : Any=0 , **__UpperCamelCase : Any ): _UpperCAmelCase = dict(self.forward_default_kwargs ) _UpperCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase ) _UpperCAmelCase = self.dummy_sample _UpperCAmelCase = 0.1 * sample _UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: _UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residuals _UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__UpperCamelCase ) _UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase ) new_scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residuals _UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] _UpperCAmelCase , _UpperCAmelCase = sample, sample for t in range(__UpperCamelCase , time_step + scheduler.config.solver_order + 1 ): _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample _UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Union[str, Any]=0 , **__UpperCamelCase : List[Any] ): _UpperCAmelCase = dict(self.forward_default_kwargs ) _UpperCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase ) _UpperCAmelCase = self.dummy_sample _UpperCAmelCase = 0.1 * sample _UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: _UpperCAmelCase = self.get_scheduler_config() _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residuals (must be after setting timesteps) _UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__UpperCamelCase ) _UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residual (must be after setting timesteps) _UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample _UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : Dict=None , **__UpperCamelCase : Optional[Any] ): if scheduler is None: _UpperCAmelCase = self.scheduler_classes[0] _UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) _UpperCAmelCase = self.scheduler_classes[0] _UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) _UpperCAmelCase = 10 _UpperCAmelCase = self.dummy_model() _UpperCAmelCase = self.dummy_sample_deter scheduler.set_timesteps(__UpperCamelCase ) for i, t in enumerate(scheduler.timesteps ): _UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample return sample def UpperCAmelCase__ ( self : List[str] ): _UpperCAmelCase = dict(self.forward_default_kwargs ) _UpperCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase ) for scheduler_class in self.scheduler_classes: _UpperCAmelCase = self.get_scheduler_config() _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) _UpperCAmelCase = self.dummy_sample _UpperCAmelCase = 0.1 * sample if num_inference_steps is not None and hasattr(__UpperCamelCase , "set_timesteps" ): scheduler.set_timesteps(__UpperCamelCase ) elif num_inference_steps is not None and not hasattr(__UpperCamelCase , "set_timesteps" ): _UpperCAmelCase = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) _UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10] _UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] _UpperCAmelCase = scheduler.timesteps[5] _UpperCAmelCase = scheduler.timesteps[6] _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def UpperCAmelCase__ ( self : Union[str, Any] ): # make sure that iterating over schedulers with same config names gives same results # for defaults _UpperCAmelCase = UniPCMultistepScheduler(**self.get_scheduler_config() ) _UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2464 ) < 1e-3 _UpperCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config ) _UpperCAmelCase = DEISMultistepScheduler.from_config(scheduler.config ) _UpperCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config ) _UpperCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config ) _UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2464 ) < 1e-3 def UpperCAmelCase__ ( self : str ): for timesteps in [25, 50, 100, 999, 1_000]: self.check_over_configs(num_train_timesteps=__UpperCamelCase ) def UpperCAmelCase__ ( self : int ): self.check_over_configs(thresholding=__UpperCamelCase ) for order in [1, 2, 3]: for solver_type in ["bh1", "bh2"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=__UpperCamelCase , prediction_type=__UpperCamelCase , sample_max_value=__UpperCamelCase , solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , ) def UpperCAmelCase__ ( self : int ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__UpperCamelCase ) def UpperCAmelCase__ ( self : int ): for solver_type in ["bh1", "bh2"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , ) _UpperCAmelCase = self.full_loop( solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , ) assert not torch.isnan(__UpperCamelCase ).any(), "Samples have nan numbers" def UpperCAmelCase__ ( self : Optional[int] ): self.check_over_configs(lower_order_final=__UpperCamelCase ) self.check_over_configs(lower_order_final=__UpperCamelCase ) def UpperCAmelCase__ ( self : Optional[Any] ): for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]: self.check_over_forward(num_inference_steps=__UpperCamelCase , time_step=0 ) def UpperCAmelCase__ ( self : List[str] ): _UpperCAmelCase = self.full_loop() _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2464 ) < 1e-3 def UpperCAmelCase__ ( self : Optional[Any] ): _UpperCAmelCase = self.full_loop(prediction_type="v_prediction" ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.1014 ) < 1e-3 def UpperCAmelCase__ ( self : Tuple ): _UpperCAmelCase = self.scheduler_classes[0] _UpperCAmelCase = self.get_scheduler_config(thresholding=__UpperCamelCase , dynamic_thresholding_ratio=0 ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) _UpperCAmelCase = 10 _UpperCAmelCase = self.dummy_model() _UpperCAmelCase = self.dummy_sample_deter.half() scheduler.set_timesteps(__UpperCamelCase ) for i, t in enumerate(scheduler.timesteps ): _UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample assert sample.dtype == torch.floataa def UpperCAmelCase__ ( self : str , **__UpperCamelCase : Optional[Any] ): for scheduler_class in self.scheduler_classes: _UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) scheduler.set_timesteps(scheduler.config.num_train_timesteps ) assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
684
1
from __future__ import absolute_import, division, print_function, unicode_literals from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers import RobertaConfig from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.roberta.modeling_roberta import ( ROBERTA_INPUTS_DOCSTRING, ROBERTA_START_DOCSTRING, RobertaEmbeddings, ) from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy @add_start_docstrings( """The RoBERTa Model transformer with early exiting (DeeRoBERTa). """ , lowercase , ) class __SCREAMING_SNAKE_CASE ( lowercase): __SCREAMING_SNAKE_CASE : Dict = RobertaConfig __SCREAMING_SNAKE_CASE : Dict = """roberta""" def __init__( self : Optional[Any] , __UpperCamelCase : int ): super().__init__(__UpperCamelCase ) _UpperCAmelCase = RobertaEmbeddings(__UpperCamelCase ) self.init_weights() @add_start_docstrings( """RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top, also takes care of multi-layer training. """ , lowercase , ) class __SCREAMING_SNAKE_CASE ( lowercase): __SCREAMING_SNAKE_CASE : Tuple = RobertaConfig __SCREAMING_SNAKE_CASE : Tuple = """roberta""" def __init__( self : Dict , __UpperCamelCase : Any ): super().__init__(__UpperCamelCase ) _UpperCAmelCase = config.num_labels _UpperCAmelCase = config.num_hidden_layers _UpperCAmelCase = DeeRobertaModel(__UpperCamelCase ) _UpperCAmelCase = nn.Dropout(config.hidden_dropout_prob ) _UpperCAmelCase = nn.Linear(config.hidden_size , self.config.num_labels ) @add_start_docstrings_to_model_forward(__UpperCamelCase ) def UpperCAmelCase__ ( self : List[Any] , __UpperCamelCase : Dict=None , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : Dict=None , __UpperCamelCase : Tuple=None , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : str=None , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : Optional[int]=-1 , __UpperCamelCase : List[Any]=False , ): _UpperCAmelCase = self.num_layers try: _UpperCAmelCase = self.roberta( __UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , position_ids=__UpperCamelCase , head_mask=__UpperCamelCase , inputs_embeds=__UpperCamelCase , ) _UpperCAmelCase = outputs[1] _UpperCAmelCase = self.dropout(__UpperCamelCase ) _UpperCAmelCase = self.classifier(__UpperCamelCase ) _UpperCAmelCase = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: _UpperCAmelCase = e.message _UpperCAmelCase = e.exit_layer _UpperCAmelCase = outputs[0] if not self.training: _UpperCAmelCase = entropy(__UpperCamelCase ) _UpperCAmelCase = [] _UpperCAmelCase = [] if labels is not None: if self.num_labels == 1: # We are doing regression _UpperCAmelCase = MSELoss() _UpperCAmelCase = loss_fct(logits.view(-1 ) , labels.view(-1 ) ) else: _UpperCAmelCase = CrossEntropyLoss() _UpperCAmelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) # work with highway exits _UpperCAmelCase = [] for highway_exit in outputs[-1]: _UpperCAmelCase = highway_exit[0] if not self.training: highway_logits_all.append(__UpperCamelCase ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression _UpperCAmelCase = MSELoss() _UpperCAmelCase = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) ) else: _UpperCAmelCase = CrossEntropyLoss() _UpperCAmelCase = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) highway_losses.append(__UpperCamelCase ) if train_highway: _UpperCAmelCase = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: _UpperCAmelCase = (loss,) + outputs if not self.training: _UpperCAmelCase = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: _UpperCAmelCase = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), entropy
684
import math class __SCREAMING_SNAKE_CASE : def __init__( self : Union[str, Any] , __UpperCamelCase : List[Any]=0 ): # a graph with Node 0,1,...,N-1 _UpperCAmelCase = n _UpperCAmelCase = [ [math.inf for j in range(0 , __UpperCamelCase )] for i in range(0 , __UpperCamelCase ) ] # adjacency matrix for weight _UpperCAmelCase = [ [math.inf for j in range(0 , __UpperCamelCase )] for i in range(0 , __UpperCamelCase ) ] # dp[i][j] stores minimum distance from i to j def UpperCAmelCase__ ( self : str , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] ): _UpperCAmelCase = w def UpperCAmelCase__ ( self : Dict ): for k in range(0 , self.n ): for i in range(0 , self.n ): for j in range(0 , self.n ): _UpperCAmelCase = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] ) def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any ): return self.dp[u][v] if __name__ == "__main__": __lowerCAmelCase = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 1_0) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 1_0) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
684
1
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Any: return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2 def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=0 ) -> Union[str, Any]: return sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : x[column] ) def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=float("inf" ) ) -> Optional[Any]: for i in range(points_counts - 1 ): for j in range(i + 1 , _lowerCAmelCase ): _UpperCAmelCase = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: _UpperCAmelCase = current_dis return min_dis def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=float("inf" ) ) -> Optional[int]: for i in range(min(6 , points_counts - 1 ) , _lowerCAmelCase ): for j in range(max(0 , i - 6 ) , _lowerCAmelCase ): _UpperCAmelCase = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: _UpperCAmelCase = current_dis return min_dis def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str: # base case if points_counts <= 3: return dis_between_closest_pair(_lowerCAmelCase , _lowerCAmelCase ) # recursion _UpperCAmelCase = points_counts // 2 _UpperCAmelCase = closest_pair_of_points_sqr( _lowerCAmelCase , points_sorted_on_y[:mid] , _lowerCAmelCase ) _UpperCAmelCase = closest_pair_of_points_sqr( _lowerCAmelCase , points_sorted_on_y[mid:] , points_counts - mid ) _UpperCAmelCase = min(_lowerCAmelCase , _lowerCAmelCase ) _UpperCAmelCase = [] for point in points_sorted_on_x: if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis: cross_strip.append(_lowerCAmelCase ) _UpperCAmelCase = dis_between_closest_in_strip( _lowerCAmelCase , len(_lowerCAmelCase ) , _lowerCAmelCase ) return min(_lowerCAmelCase , _lowerCAmelCase ) def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Dict: _UpperCAmelCase = column_based_sort(_lowerCAmelCase , column=0 ) _UpperCAmelCase = column_based_sort(_lowerCAmelCase , column=1 ) return ( closest_pair_of_points_sqr( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) ) ** 0.5 if __name__ == "__main__": __lowerCAmelCase = [(2, 3), (1_2, 3_0), (4_0, 5_0), (5, 1), (1_2, 1_0), (3, 4)] print("Distance:", closest_pair_of_points(points, len(points)))
684
import unittest import torch from diffusers import VQModel from diffusers.utils import floats_tensor, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class __SCREAMING_SNAKE_CASE ( lowercase , lowercase , unittest.TestCase): __SCREAMING_SNAKE_CASE : Dict = VQModel __SCREAMING_SNAKE_CASE : Optional[int] = """sample""" @property def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Optional[int]=(32, 32) ): _UpperCAmelCase = 4 _UpperCAmelCase = 3 _UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCamelCase ) return {"sample": image} @property def UpperCAmelCase__ ( self : Tuple ): return (3, 32, 32) @property def UpperCAmelCase__ ( self : str ): return (3, 32, 32) def UpperCAmelCase__ ( self : Dict ): _UpperCAmelCase = { "block_out_channels": [32, 64], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 3, } _UpperCAmelCase = self.dummy_input return init_dict, inputs_dict def UpperCAmelCase__ ( self : Dict ): pass def UpperCAmelCase__ ( self : str ): pass def UpperCAmelCase__ ( self : List[str] ): _UpperCAmelCase , _UpperCAmelCase = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) self.assertEqual(len(loading_info["missing_keys"] ) , 0 ) model.to(__UpperCamelCase ) _UpperCAmelCase = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def UpperCAmelCase__ ( self : List[Any] ): _UpperCAmelCase = VQModel.from_pretrained("fusing/vqgan-dummy" ) model.to(__UpperCamelCase ).eval() torch.manual_seed(0 ) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0 ) _UpperCAmelCase = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size ) _UpperCAmelCase = image.to(__UpperCamelCase ) with torch.no_grad(): _UpperCAmelCase = model(__UpperCamelCase ).sample _UpperCAmelCase = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off _UpperCAmelCase = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] ) # fmt: on self.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
684
1
from pathlib import Path import fire def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]: _UpperCAmelCase = Path(_lowerCAmelCase ) _UpperCAmelCase = Path(_lowerCAmelCase ) dest_dir.mkdir(exist_ok=_lowerCAmelCase ) for path in src_dir.iterdir(): _UpperCAmelCase = [x.rstrip() for x in list(path.open().readlines() )][:n] _UpperCAmelCase = dest_dir.joinpath(path.name ) print(_lowerCAmelCase ) dest_path.open("w" ).write("\n".join(_lowerCAmelCase ) ) if __name__ == "__main__": fire.Fire(minify)
684
import requests __lowerCAmelCase = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey=" def __lowerCamelCase ( _lowerCAmelCase ) -> None: # fetching a list of articles in json format _UpperCAmelCase = requests.get(_NEWS_API + bbc_news_api_key ).json() # each article in the list is a dict for i, article in enumerate(bbc_news_page["articles"] , 1 ): print(F'''{i}.) {article["title"]}''' ) if __name__ == "__main__": fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
684
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = { "studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json", "studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json", } class __SCREAMING_SNAKE_CASE ( lowercase): __SCREAMING_SNAKE_CASE : Union[str, Any] = """luke""" def __init__( self : Optional[Any] , __UpperCamelCase : Dict=50_267 , __UpperCamelCase : List[Any]=500_000 , __UpperCamelCase : Union[str, Any]=768 , __UpperCamelCase : Optional[int]=256 , __UpperCamelCase : List[str]=12 , __UpperCamelCase : Tuple=12 , __UpperCamelCase : Union[str, Any]=3_072 , __UpperCamelCase : List[str]="gelu" , __UpperCamelCase : List[Any]=0.1 , __UpperCamelCase : Any=0.1 , __UpperCamelCase : Optional[Any]=512 , __UpperCamelCase : Dict=2 , __UpperCamelCase : List[str]=0.02 , __UpperCamelCase : List[Any]=1e-1_2 , __UpperCamelCase : Tuple=True , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : Dict=1 , __UpperCamelCase : Union[str, Any]=0 , __UpperCamelCase : List[str]=2 , **__UpperCamelCase : str , ): super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase ) _UpperCAmelCase = vocab_size _UpperCAmelCase = entity_vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = entity_emb_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = hidden_act _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = use_entity_aware_attention _UpperCAmelCase = classifier_dropout
684
import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class __SCREAMING_SNAKE_CASE ( unittest.TestCase): def UpperCAmelCase__ ( self : Any ): _UpperCAmelCase = 10 def UpperCAmelCase__ ( self : Optional[int] ): _UpperCAmelCase = [1, 2, 3, 4] _UpperCAmelCase = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase ) def UpperCAmelCase__ ( self : List[Any] ): _UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] _UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase ) def UpperCAmelCase__ ( self : int ): _UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] _UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase ) def UpperCAmelCase__ ( self : Union[str, Any] ): _UpperCAmelCase = "It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this." _UpperCAmelCase , _UpperCAmelCase = process_story(__UpperCamelCase ) self.assertEqual(__UpperCamelCase , [] ) def UpperCAmelCase__ ( self : str ): _UpperCAmelCase = "" _UpperCAmelCase , _UpperCAmelCase = process_story(__UpperCamelCase ) self.assertEqual(__UpperCamelCase , [] ) self.assertEqual(__UpperCamelCase , [] ) def UpperCAmelCase__ ( self : Optional[Any] ): _UpperCAmelCase = ( "It was the year of Our Lord one thousand seven hundred and " "seventy-five\n\nSpiritual revelations were conceded to England " "at that favoured period, as at this.\n@highlight\n\nIt was the best of times" ) _UpperCAmelCase , _UpperCAmelCase = process_story(__UpperCamelCase ) _UpperCAmelCase = [ "It was the year of Our Lord one thousand seven hundred and seventy-five.", "Spiritual revelations were conceded to England at that favoured period, as at this.", ] self.assertEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = ["It was the best of times."] self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def UpperCAmelCase__ ( self : Dict ): _UpperCAmelCase = torch.tensor([1, 2, 3, 4] ) _UpperCAmelCase = torch.tensor([1, 1, 1, 1] ) np.testing.assert_array_equal(build_mask(__UpperCamelCase , 0 ).numpy() , expected.numpy() ) def UpperCAmelCase__ ( self : Optional[Any] ): _UpperCAmelCase = torch.tensor([1, 2, 3, 4, 23, 23, 23] ) _UpperCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(__UpperCamelCase , 23 ).numpy() , expected.numpy() ) def UpperCAmelCase__ ( self : Optional[int] ): _UpperCAmelCase = torch.tensor([8, 2, 3, 4, 1, 1, 1] ) _UpperCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(__UpperCamelCase , 1 ).numpy() , expected.numpy() ) def UpperCAmelCase__ ( self : Dict ): _UpperCAmelCase = 101 _UpperCAmelCase = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] ) _UpperCAmelCase = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] ) _UpperCAmelCase = compute_token_type_ids(__UpperCamelCase , __UpperCamelCase ) np.testing.assert_array_equal(__UpperCamelCase , __UpperCamelCase )
684
1
from collections.abc import Sequence def __lowerCamelCase ( _lowerCAmelCase = None ) -> int: if nums is None or not nums: raise ValueError("Input sequence should not be empty" ) _UpperCAmelCase = nums[0] for i in range(1 , len(_lowerCAmelCase ) ): _UpperCAmelCase = nums[i] _UpperCAmelCase = max(_lowerCAmelCase , ans + num , _lowerCAmelCase ) return ans if __name__ == "__main__": import doctest doctest.testmod() # Try on a sample input from the user __lowerCAmelCase = int(input("Enter number of elements : ").strip()) __lowerCAmelCase = list(map(int, input("\nEnter the numbers : ").strip().split()))[:n] print(max_subsequence_sum(array))
684
from __future__ import annotations from collections import namedtuple def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> tuple: _UpperCAmelCase = namedtuple("result" , "name value" ) if (voltage, current, power).count(0 ) != 1: raise ValueError("Only one argument must be 0" ) elif power < 0: raise ValueError( "Power cannot be negative in any electrical/electronics system" ) elif voltage == 0: return result("voltage" , power / current ) elif current == 0: return result("current" , power / voltage ) elif power == 0: return result("power" , float(round(abs(voltage * current ) , 2 ) ) ) else: raise ValueError("Exactly one argument must be 0" ) if __name__ == "__main__": import doctest doctest.testmod()
684
1
from typing import Dict import numpy as np from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException if is_tf_available(): import tensorflow as tf from ..tf_utils import stable_softmax if is_torch_available(): import torch __lowerCAmelCase = logging.get_logger(__name__) @add_end_docstrings( lowercase , R""" top_k (`int`, defaults to 5): The number of predictions to return. targets (`str` or `List[str]`, *optional*): When passed, the model will limit the scores to the passed targets instead of looking up in the whole vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting token will be used (with a warning, and that might be slower). """ , ) class __SCREAMING_SNAKE_CASE ( lowercase): def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : GenericTensor ): if self.framework == "tf": _UpperCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy() elif self.framework == "pt": _UpperCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__UpperCamelCase ) else: raise ValueError("Unsupported framework" ) return masked_index def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : GenericTensor ): _UpperCAmelCase = self.get_masked_index(__UpperCamelCase ) _UpperCAmelCase = np.prod(masked_index.shape ) if numel < 1: raise PipelineException( "fill-mask" , self.model.base_model_prefix , F'''No mask_token ({self.tokenizer.mask_token}) found on the input''' , ) def UpperCAmelCase__ ( self : str , __UpperCamelCase : GenericTensor ): if isinstance(__UpperCamelCase , __UpperCamelCase ): for model_input in model_inputs: self._ensure_exactly_one_mask_token(model_input["input_ids"][0] ) else: for input_ids in model_inputs["input_ids"]: self._ensure_exactly_one_mask_token(__UpperCamelCase ) def UpperCAmelCase__ ( self : int , __UpperCamelCase : str , __UpperCamelCase : Dict=None , **__UpperCamelCase : Optional[Any] ): if return_tensors is None: _UpperCAmelCase = self.framework _UpperCAmelCase = self.tokenizer(__UpperCamelCase , return_tensors=__UpperCamelCase ) self.ensure_exactly_one_mask_token(__UpperCamelCase ) return model_inputs def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : Any ): _UpperCAmelCase = self.model(**__UpperCamelCase ) _UpperCAmelCase = model_inputs["input_ids"] return model_outputs def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any]=5 , __UpperCamelCase : Union[str, Any]=None ): # Cap top_k if there are targets if target_ids is not None and target_ids.shape[0] < top_k: _UpperCAmelCase = target_ids.shape[0] _UpperCAmelCase = model_outputs["input_ids"][0] _UpperCAmelCase = model_outputs["logits"] if self.framework == "tf": _UpperCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0] _UpperCAmelCase = outputs.numpy() _UpperCAmelCase = outputs[0, masked_index, :] _UpperCAmelCase = stable_softmax(__UpperCamelCase , axis=-1 ) if target_ids is not None: _UpperCAmelCase = tf.gather_nd(tf.squeeze(__UpperCamelCase , 0 ) , target_ids.reshape(-1 , 1 ) ) _UpperCAmelCase = tf.expand_dims(__UpperCamelCase , 0 ) _UpperCAmelCase = tf.math.top_k(__UpperCamelCase , k=__UpperCamelCase ) _UpperCAmelCase , _UpperCAmelCase = topk.values.numpy(), topk.indices.numpy() else: _UpperCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__UpperCamelCase ).squeeze(-1 ) # Fill mask pipeline supports only one ${mask_token} per sample _UpperCAmelCase = outputs[0, masked_index, :] _UpperCAmelCase = logits.softmax(dim=-1 ) if target_ids is not None: _UpperCAmelCase = probs[..., target_ids] _UpperCAmelCase , _UpperCAmelCase = probs.topk(__UpperCamelCase ) _UpperCAmelCase = [] _UpperCAmelCase = values.shape[0] == 1 for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ): _UpperCAmelCase = [] for v, p in zip(_values , _predictions ): # Copy is important since we're going to modify this array in place _UpperCAmelCase = input_ids.numpy().copy() if target_ids is not None: _UpperCAmelCase = target_ids[p].tolist() _UpperCAmelCase = p # Filter padding out: _UpperCAmelCase = tokens[np.where(tokens != self.tokenizer.pad_token_id )] # Originally we skip special tokens to give readable output. # For multi masks though, the other [MASK] would be removed otherwise # making the output look odd, so we add them back _UpperCAmelCase = self.tokenizer.decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase ) _UpperCAmelCase = {"score": v, "token": p, "token_str": self.tokenizer.decode([p] ), "sequence": sequence} row.append(__UpperCamelCase ) result.append(__UpperCamelCase ) if single_mask: return result[0] return result def UpperCAmelCase__ ( self : Any , __UpperCamelCase : str , __UpperCamelCase : Tuple=None ): if isinstance(__UpperCamelCase , __UpperCamelCase ): _UpperCAmelCase = [targets] try: _UpperCAmelCase = self.tokenizer.get_vocab() except Exception: _UpperCAmelCase = {} _UpperCAmelCase = [] for target in targets: _UpperCAmelCase = vocab.get(__UpperCamelCase , __UpperCamelCase ) if id_ is None: _UpperCAmelCase = self.tokenizer( __UpperCamelCase , add_special_tokens=__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , max_length=1 , truncation=__UpperCamelCase , )["input_ids"] if len(__UpperCamelCase ) == 0: logger.warning( F'''The specified target token `{target}` does not exist in the model vocabulary. ''' "We cannot replace it with anything meaningful, ignoring it" ) continue _UpperCAmelCase = input_ids[0] # XXX: If users encounter this pass # it becomes pretty slow, so let's make sure # The warning enables them to fix the input to # get faster performance. logger.warning( F'''The specified target token `{target}` does not exist in the model vocabulary. ''' F'''Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.''' ) target_ids.append(id_ ) _UpperCAmelCase = list(set(__UpperCamelCase ) ) if len(__UpperCamelCase ) == 0: raise ValueError("At least one target must be provided when passed." ) _UpperCAmelCase = np.array(__UpperCamelCase ) return target_ids def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : int=None , __UpperCamelCase : Optional[Any]=None ): _UpperCAmelCase = {} if targets is not None: _UpperCAmelCase = self.get_target_ids(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = target_ids if top_k is not None: _UpperCAmelCase = top_k if self.tokenizer.mask_token_id is None: raise PipelineException( "fill-mask" , self.model.base_model_prefix , "The tokenizer does not define a `mask_token`." ) return {}, {}, postprocess_params def __call__( self : Any , __UpperCamelCase : int , *__UpperCamelCase : Dict , **__UpperCamelCase : str ): _UpperCAmelCase = super().__call__(__UpperCamelCase , **__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) and len(__UpperCamelCase ) == 1: return outputs[0] return outputs
684
import argparse import math import traceback import dateutil.parser as date_parser import requests def __lowerCamelCase ( _lowerCAmelCase ) -> Any: _UpperCAmelCase = {} _UpperCAmelCase = job["started_at"] _UpperCAmelCase = job["completed_at"] _UpperCAmelCase = date_parser.parse(_lowerCAmelCase ) _UpperCAmelCase = date_parser.parse(_lowerCAmelCase ) _UpperCAmelCase = round((end_datetime - start_datetime).total_seconds() / 60.0 ) _UpperCAmelCase = start _UpperCAmelCase = end _UpperCAmelCase = duration_in_min return job_info def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=None ) -> str: _UpperCAmelCase = None if token is not None: _UpperCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''} _UpperCAmelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100''' _UpperCAmelCase = requests.get(_lowerCAmelCase , headers=_lowerCAmelCase ).json() _UpperCAmelCase = {} try: job_time.update({job["name"]: extract_time_from_single_job(_lowerCAmelCase ) for job in result["jobs"]} ) _UpperCAmelCase = math.ceil((result["total_count"] - 100) / 100 ) for i in range(_lowerCAmelCase ): _UpperCAmelCase = requests.get(url + F'''&page={i + 2}''' , headers=_lowerCAmelCase ).json() job_time.update({job["name"]: extract_time_from_single_job(_lowerCAmelCase ) for job in result["jobs"]} ) return job_time except Exception: print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' ) return {} if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.") __lowerCAmelCase = parser.parse_args() __lowerCAmelCase = get_job_time(args.workflow_run_id) __lowerCAmelCase = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True)) for k, v in job_time.items(): print(F'''{k}: {v["duration"]}''')
684
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowerCAmelCase = {"configuration_mbart": ["MBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "MBartConfig", "MBartOnnxConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = ["MBartTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = ["MBartTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ "MBART_PRETRAINED_MODEL_ARCHIVE_LIST", "MBartForCausalLM", "MBartForConditionalGeneration", "MBartForQuestionAnswering", "MBartForSequenceClassification", "MBartModel", "MBartPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ "TFMBartForConditionalGeneration", "TFMBartModel", "TFMBartPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ "FlaxMBartForConditionalGeneration", "FlaxMBartForQuestionAnswering", "FlaxMBartForSequenceClassification", "FlaxMBartModel", "FlaxMBartPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart import MBartTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart_fast import MBartTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mbart import ( MBART_PRETRAINED_MODEL_ARCHIVE_LIST, MBartForCausalLM, MBartForConditionalGeneration, MBartForQuestionAnswering, MBartForSequenceClassification, MBartModel, MBartPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mbart import ( FlaxMBartForConditionalGeneration, FlaxMBartForQuestionAnswering, FlaxMBartForSequenceClassification, FlaxMBartModel, FlaxMBartPreTrainedModel, ) else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
684
import argparse import math import os from copy import deepcopy import torch from audio_diffusion.models import DiffusionAttnUnetaD from diffusion import sampling from torch import nn from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel __lowerCAmelCase = { "gwf-440k": { "url": "https://model-server.zqevans2.workers.dev/gwf-440k.ckpt", "sample_rate": 4_8_0_0_0, "sample_size": 6_5_5_3_6, }, "jmann-small-190k": { "url": "https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt", "sample_rate": 4_8_0_0_0, "sample_size": 6_5_5_3_6, }, "jmann-large-580k": { "url": "https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt", "sample_rate": 4_8_0_0_0, "sample_size": 1_3_1_0_7_2, }, "maestro-uncond-150k": { "url": "https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt", "sample_rate": 1_6_0_0_0, "sample_size": 6_5_5_3_6, }, "unlocked-uncond-250k": { "url": "https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt", "sample_rate": 1_6_0_0_0, "sample_size": 6_5_5_3_6, }, "honk-140k": { "url": "https://model-server.zqevans2.workers.dev/honk-140k.ckpt", "sample_rate": 1_6_0_0_0, "sample_size": 6_5_5_3_6, }, } def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: return torch.atana(_lowerCAmelCase , _lowerCAmelCase ) / math.pi * 2 def __lowerCamelCase ( _lowerCAmelCase ) -> Union[str, Any]: _UpperCAmelCase = torch.sin(t * math.pi / 2 ) ** 2 _UpperCAmelCase = (1 - sigma**2) ** 0.5 return alpha_sigma_to_t(_lowerCAmelCase , _lowerCAmelCase ) class __SCREAMING_SNAKE_CASE ( lowercase): pass class __SCREAMING_SNAKE_CASE ( nn.Module): def __init__( self : str , __UpperCamelCase : Optional[int] ): super().__init__() _UpperCAmelCase = DiffusionAttnUnetaD(__UpperCamelCase , n_attn_layers=4 ) _UpperCAmelCase = deepcopy(self.diffusion ) _UpperCAmelCase = torch.quasirandom.SobolEngine(1 , scramble=__UpperCamelCase ) def __lowerCamelCase ( _lowerCAmelCase ) -> int: _UpperCAmelCase = MODELS_MAP[model_name]["url"] os.system(F'''wget {url} ./''' ) return F'''./{model_name}.ckpt''' __lowerCAmelCase = { "1": "resnets.0", "2": "attentions.0", "3": "resnets.1", "4": "attentions.1", "5": "resnets.2", "6": "attentions.2", } __lowerCAmelCase = { "8": "resnets.0", "9": "attentions.0", "10": "resnets.1", "11": "attentions.1", "12": "resnets.2", "13": "attentions.2", } __lowerCAmelCase = { "1": "resnets.0", "2": "attentions.0", "3": "resnets.1", "4": "attentions.1", "5": "resnets.2", "6": "attentions.2", "8": "resnets.3", "9": "attentions.3", "10": "resnets.4", "11": "attentions.4", "12": "resnets.5", "13": "attentions.5", } __lowerCAmelCase = { "0": "resnets.0", "1": "resnets.1", "2": "resnets.2", "4": "resnets.0", "5": "resnets.1", "6": "resnets.2", } __lowerCAmelCase = { "skip": "conv_skip", "main.0": "conv_1", "main.1": "group_norm_1", "main.3": "conv_2", "main.4": "group_norm_2", } __lowerCAmelCase = { "norm": "group_norm", "qkv_proj": ["query", "key", "value"], "out_proj": ["proj_attn"], } def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]: if name.startswith("skip" ): return name.replace("skip" , RES_CONV_MAP["skip"] ) # name has to be of format main.{digit} if not name.startswith("main." ): raise ValueError(F'''ResConvBlock error with {name}''' ) return name.replace(name[:6] , RES_CONV_MAP[name[:6]] ) def __lowerCamelCase ( _lowerCAmelCase ) -> Optional[Any]: for key, value in ATTN_MAP.items(): if name.startswith(_lowerCAmelCase ) and not isinstance(_lowerCAmelCase , _lowerCAmelCase ): return name.replace(_lowerCAmelCase , _lowerCAmelCase ) elif name.startswith(_lowerCAmelCase ): return [name.replace(_lowerCAmelCase , _lowerCAmelCase ) for v in value] raise ValueError(F'''Attn error with {name}''' ) def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=13 ) -> List[Any]: _UpperCAmelCase = input_string if string.split("." )[0] == "timestep_embed": return string.replace("timestep_embed" , "time_proj" ) _UpperCAmelCase = 0 if string.startswith("net.3." ): depth += 1 _UpperCAmelCase = string[6:] elif string.startswith("net." ): _UpperCAmelCase = string[4:] while string.startswith("main.7." ): depth += 1 _UpperCAmelCase = string[7:] if string.startswith("main." ): _UpperCAmelCase = string[5:] # mid block if string[:2].isdigit(): _UpperCAmelCase = string[:2] _UpperCAmelCase = string[2:] else: _UpperCAmelCase = string[0] _UpperCAmelCase = string[1:] if depth == max_depth: _UpperCAmelCase = MID_NUM_TO_LAYER[layer_num] _UpperCAmelCase = "mid_block" elif depth > 0 and int(_lowerCAmelCase ) < 7: _UpperCAmelCase = DOWN_NUM_TO_LAYER[layer_num] _UpperCAmelCase = F'''down_blocks.{depth}''' elif depth > 0 and int(_lowerCAmelCase ) > 7: _UpperCAmelCase = UP_NUM_TO_LAYER[layer_num] _UpperCAmelCase = F'''up_blocks.{max_depth - depth - 1}''' elif depth == 0: _UpperCAmelCase = DEPTH_0_TO_LAYER[layer_num] _UpperCAmelCase = F'''up_blocks.{max_depth - 1}''' if int(_lowerCAmelCase ) > 3 else "down_blocks.0" if not string_left.startswith("." ): raise ValueError(F'''Naming error with {input_string} and string_left: {string_left}.''' ) _UpperCAmelCase = string_left[1:] if "resnets" in new_layer: _UpperCAmelCase = convert_resconv_naming(_lowerCAmelCase ) elif "attentions" in new_layer: _UpperCAmelCase = convert_attn_naming(_lowerCAmelCase ) _UpperCAmelCase = new_string_left if not isinstance(_lowerCAmelCase , _lowerCAmelCase ): _UpperCAmelCase = prefix + "." + new_layer + "." + string_left else: _UpperCAmelCase = [prefix + "." + new_layer + "." + s for s in string_left] return new_string def __lowerCamelCase ( _lowerCAmelCase ) -> Optional[int]: _UpperCAmelCase = {} for k, v in state_dict.items(): if k.endswith("kernel" ): # up- and downsample layers, don't have trainable weights continue _UpperCAmelCase = rename(_lowerCAmelCase ) # check if we need to transform from Conv => Linear for attention if isinstance(_lowerCAmelCase , _lowerCAmelCase ): _UpperCAmelCase = transform_conv_attns(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) else: _UpperCAmelCase = v return new_state_dict def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]: if len(_lowerCAmelCase ) == 1: if len(v.shape ) == 3: # weight _UpperCAmelCase = v[:, :, 0] else: # bias _UpperCAmelCase = v else: # qkv matrices _UpperCAmelCase = v.shape[0] _UpperCAmelCase = trippled_shape // 3 for i in range(3 ): if len(v.shape ) == 3: _UpperCAmelCase = v[i * single_shape : (i + 1) * single_shape, :, 0] else: _UpperCAmelCase = v[i * single_shape : (i + 1) * single_shape] return new_state_dict def __lowerCamelCase ( _lowerCAmelCase ) -> Tuple: _UpperCAmelCase = torch.device("cuda" if torch.cuda.is_available() else "cpu" ) _UpperCAmelCase = args.model_path.split("/" )[-1].split("." )[0] if not os.path.isfile(args.model_path ): assert ( model_name == args.model_path ), F'''Make sure to provide one of the official model names {MODELS_MAP.keys()}''' _UpperCAmelCase = download(_lowerCAmelCase ) _UpperCAmelCase = MODELS_MAP[model_name]["sample_rate"] _UpperCAmelCase = MODELS_MAP[model_name]["sample_size"] _UpperCAmelCase = Object() _UpperCAmelCase = sample_size _UpperCAmelCase = sample_rate _UpperCAmelCase = 0 _UpperCAmelCase = UNetaDModel(sample_size=_lowerCAmelCase , sample_rate=_lowerCAmelCase ) _UpperCAmelCase = diffusers_model.state_dict() _UpperCAmelCase = DiffusionUncond(_lowerCAmelCase ) orig_model.load_state_dict(torch.load(args.model_path , map_location=_lowerCAmelCase )["state_dict"] ) _UpperCAmelCase = orig_model.diffusion_ema.eval() _UpperCAmelCase = orig_model.state_dict() _UpperCAmelCase = rename_orig_weights(_lowerCAmelCase ) _UpperCAmelCase = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() ) _UpperCAmelCase = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() ) assert len(_lowerCAmelCase ) == 0, F'''Problem with {renamed_minus_diffusers}''' assert all(k.endswith("kernel" ) for k in list(_lowerCAmelCase ) ), F'''Problem with {diffusers_minus_renamed}''' for key, value in renamed_state_dict.items(): assert ( diffusers_state_dict[key].squeeze().shape == value.squeeze().shape ), F'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}''' if key == "time_proj.weight": _UpperCAmelCase = value.squeeze() _UpperCAmelCase = value diffusers_model.load_state_dict(_lowerCAmelCase ) _UpperCAmelCase = 100 _UpperCAmelCase = 33 _UpperCAmelCase = IPNDMScheduler(num_train_timesteps=_lowerCAmelCase ) _UpperCAmelCase = torch.manual_seed(_lowerCAmelCase ) _UpperCAmelCase = torch.randn([1, 2, config.sample_size] , generator=_lowerCAmelCase ).to(_lowerCAmelCase ) _UpperCAmelCase = torch.linspace(1 , 0 , steps + 1 , device=_lowerCAmelCase )[:-1] _UpperCAmelCase = get_crash_schedule(_lowerCAmelCase ) _UpperCAmelCase = DanceDiffusionPipeline(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase ) _UpperCAmelCase = torch.manual_seed(33 ) _UpperCAmelCase = pipe(num_inference_steps=_lowerCAmelCase , generator=_lowerCAmelCase ).audios _UpperCAmelCase = sampling.iplms_sample(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , {} ) _UpperCAmelCase = generated.clamp(-1 , 1 ) _UpperCAmelCase = (generated - audio).abs().sum() _UpperCAmelCase = (generated - audio).abs().max() if args.save: pipe.save_pretrained(args.checkpoint_path ) print("Diff sum" , _lowerCAmelCase ) print("Diff max" , _lowerCAmelCase ) assert diff_max < 1E-3, F'''Diff max: {diff_max} is too much :-/''' print(F'''Conversion for {model_name} successful!''' ) if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.") parser.add_argument( "--save", default=True, type=bool, required=False, help="Whether to save the converted model or not." ) parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.") __lowerCAmelCase = parser.parse_args() main(args)
684
1
from __future__ import annotations def __lowerCamelCase ( _lowerCAmelCase ) -> int: _UpperCAmelCase = len(_lowerCAmelCase ) // 2 # choose the middle 3 elements _UpperCAmelCase = lst[m - 1 : m + 2] # if middle element is peak if three[1] > three[0] and three[1] > three[2]: return three[1] # if increasing, recurse on right elif three[0] < three[2]: if len(lst[:m] ) == 2: m -= 1 return peak(lst[m:] ) # decreasing else: if len(lst[:m] ) == 2: m += 1 return peak(lst[:m] ) if __name__ == "__main__": import doctest doctest.testmod()
684
import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 __lowerCAmelCase = get_tests_dir("fixtures") class __SCREAMING_SNAKE_CASE ( unittest.TestCase): def UpperCAmelCase__ ( self : Dict ): # A mock response for an HTTP head request to emulate server down _UpperCAmelCase = mock.Mock() _UpperCAmelCase = 500 _UpperCAmelCase = {} _UpperCAmelCase = HTTPError _UpperCAmelCase = {} # Download this model to make sure it's in the cache. _UpperCAmelCase = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("requests.Session.request" , return_value=__UpperCamelCase ) as mock_head: _UpperCAmelCase = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" ) # This check we did call the fake head request mock_head.assert_called() def UpperCAmelCase__ ( self : List[Any] ): # This test is for deprecated behavior and can be removed in v5 _UpperCAmelCase = ViTImageProcessor.from_pretrained( "https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json" ) def UpperCAmelCase__ ( self : Dict ): with self.assertRaises(__UpperCamelCase ): # config is in subfolder, the following should not work without specifying the subfolder _UpperCAmelCase = AutoImageProcessor.from_pretrained("hf-internal-testing/stable-diffusion-all-variants" ) _UpperCAmelCase = AutoImageProcessor.from_pretrained( "hf-internal-testing/stable-diffusion-all-variants" , subfolder="feature_extractor" ) self.assertIsNotNone(__UpperCamelCase ) @is_staging_test class __SCREAMING_SNAKE_CASE ( unittest.TestCase): @classmethod def UpperCAmelCase__ ( cls : str ): _UpperCAmelCase = TOKEN HfFolder.save_token(__UpperCamelCase ) @classmethod def UpperCAmelCase__ ( cls : Optional[Any] ): try: delete_repo(token=cls._token , repo_id="test-image-processor" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="valid_org/test-image-processor-org" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="test-dynamic-image-processor" ) except HTTPError: pass def UpperCAmelCase__ ( self : Union[str, Any] ): _UpperCAmelCase = ViTImageProcessor.from_pretrained(__UpperCamelCase ) image_processor.push_to_hub("test-image-processor" , use_auth_token=self._token ) _UpperCAmelCase = ViTImageProcessor.from_pretrained(F'''{USER}/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) ) # Reset repo delete_repo(token=self._token , repo_id="test-image-processor" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( __UpperCamelCase , repo_id="test-image-processor" , push_to_hub=__UpperCamelCase , use_auth_token=self._token ) _UpperCAmelCase = ViTImageProcessor.from_pretrained(F'''{USER}/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) ) def UpperCAmelCase__ ( self : Union[str, Any] ): _UpperCAmelCase = ViTImageProcessor.from_pretrained(__UpperCamelCase ) image_processor.push_to_hub("valid_org/test-image-processor" , use_auth_token=self._token ) _UpperCAmelCase = ViTImageProcessor.from_pretrained("valid_org/test-image-processor" ) for k, v in image_processor.__dict__.items(): self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) ) # Reset repo delete_repo(token=self._token , repo_id="valid_org/test-image-processor" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( __UpperCamelCase , repo_id="valid_org/test-image-processor-org" , push_to_hub=__UpperCamelCase , use_auth_token=self._token ) _UpperCAmelCase = ViTImageProcessor.from_pretrained("valid_org/test-image-processor-org" ) for k, v in image_processor.__dict__.items(): self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) ) def UpperCAmelCase__ ( self : int ): CustomImageProcessor.register_for_auto_class() _UpperCAmelCase = CustomImageProcessor.from_pretrained(__UpperCamelCase ) image_processor.push_to_hub("test-dynamic-image-processor" , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map , {"AutoImageProcessor": "custom_image_processing.CustomImageProcessor"} , ) _UpperCAmelCase = AutoImageProcessor.from_pretrained( F'''{USER}/test-dynamic-image-processor''' , trust_remote_code=__UpperCamelCase ) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__ , "CustomImageProcessor" )
684
1
from ..utils import ( OptionalDependencyNotAvailable, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .scheduling_consistency_models import CMStochasticIterativeScheduler from .scheduling_ddim import DDIMScheduler from .scheduling_ddim_inverse import DDIMInverseScheduler from .scheduling_ddim_parallel import DDIMParallelScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_ddpm_parallel import DDPMParallelScheduler from .scheduling_deis_multistep import DEISMultistepScheduler from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler from .scheduling_euler_discrete import EulerDiscreteScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_repaint import RePaintScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_unclip import UnCLIPScheduler from .scheduling_unipc_multistep import UniPCMultistepScheduler from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin from .scheduling_vq_diffusion import VQDiffusionScheduler try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * # noqa F403 else: from .scheduling_ddim_flax import FlaxDDIMScheduler from .scheduling_ddpm_flax import FlaxDDPMScheduler from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler from .scheduling_pndm_flax import FlaxPNDMScheduler from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler from .scheduling_utils_flax import ( FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .scheduling_lms_discrete import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
684
from operator import delitem, getitem, setitem import pytest from data_structures.hashing.hash_map import HashMap def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]: return getitem, k def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]: return setitem, k, v def __lowerCamelCase ( _lowerCAmelCase ) -> str: return delitem, k def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase ) -> Optional[int]: try: return fun(_lowerCAmelCase , *_lowerCAmelCase ), None except Exception as e: return None, e __lowerCAmelCase = ( _set("key_a", "val_a"), _set("key_b", "val_b"), ) __lowerCAmelCase = [ _set("key_a", "val_a"), _set("key_a", "val_b"), ] __lowerCAmelCase = [ _set("key_a", "val_a"), _set("key_b", "val_b"), _del("key_a"), _del("key_b"), _set("key_a", "val_a"), _del("key_a"), ] __lowerCAmelCase = [ _get("key_a"), _del("key_a"), _set("key_a", "val_a"), _del("key_a"), _del("key_a"), _get("key_a"), ] __lowerCAmelCase = [ *[_set(x, x) for x in range(5)], # guaranteed upsize ] __lowerCAmelCase = [ *[_set(x, x) for x in range(5)], # guaranteed upsize *[_del(x) for x in range(5)], _set("key_a", "val_b"), ] @pytest.mark.parametrize( "operations" , ( pytest.param(_add_items , id="add items" ), pytest.param(_overwrite_items , id="overwrite items" ), pytest.param(_delete_items , id="delete items" ), pytest.param(_access_absent_items , id="access absent items" ), pytest.param(_add_with_resize_up , id="add with resize up" ), pytest.param(_add_with_resize_down , id="add with resize down" ), ) , ) def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]: _UpperCAmelCase = HashMap(initial_block_size=4 ) _UpperCAmelCase = {} for _, (fun, *args) in enumerate(_lowerCAmelCase ): _UpperCAmelCase , _UpperCAmelCase = _run_operation(_lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase ) _UpperCAmelCase , _UpperCAmelCase = _run_operation(_lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase ) assert my_res == py_res assert str(_lowerCAmelCase ) == str(_lowerCAmelCase ) assert set(_lowerCAmelCase ) == set(_lowerCAmelCase ) assert len(_lowerCAmelCase ) == len(_lowerCAmelCase ) assert set(my.items() ) == set(py.items() ) def __lowerCamelCase ( ) -> List[Any]: def is_public(_lowerCAmelCase ) -> bool: return not name.startswith("_" ) _UpperCAmelCase = {name for name in dir({} ) if is_public(_lowerCAmelCase )} _UpperCAmelCase = {name for name in dir(HashMap() ) if is_public(_lowerCAmelCase )} assert dict_public_names > hash_public_names
684
1
import numpy as np def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> np.ndarray: return np.where(vector > 0 , _lowerCAmelCase , (alpha * (np.exp(_lowerCAmelCase ) - 1)) ) if __name__ == "__main__": import doctest doctest.testmod()
684
def __lowerCamelCase ( _lowerCAmelCase ) -> list: _UpperCAmelCase = len(_lowerCAmelCase ) for i in range(1 , _lowerCAmelCase ): _UpperCAmelCase = collection[i] _UpperCAmelCase = 0 _UpperCAmelCase = i - 1 while low <= high: _UpperCAmelCase = (low + high) // 2 if val < collection[mid]: _UpperCAmelCase = mid - 1 else: _UpperCAmelCase = mid + 1 for j in range(_lowerCAmelCase , _lowerCAmelCase , -1 ): _UpperCAmelCase = collection[j - 1] _UpperCAmelCase = val return collection if __name__ == "__main__": __lowerCAmelCase = input("Enter numbers separated by a comma:\n").strip() __lowerCAmelCase = [int(item) for item in user_input.split(",")] print(binary_insertion_sort(unsorted))
684
1
import json import os import unittest from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __SCREAMING_SNAKE_CASE ( lowercase , unittest.TestCase): __SCREAMING_SNAKE_CASE : Union[str, Any] = OpenAIGPTTokenizer __SCREAMING_SNAKE_CASE : List[str] = OpenAIGPTTokenizerFast __SCREAMING_SNAKE_CASE : Optional[int] = True __SCREAMING_SNAKE_CASE : Optional[Any] = False def UpperCAmelCase__ ( self : List[str] ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt _UpperCAmelCase = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", "<unk>", ] _UpperCAmelCase = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) ) _UpperCAmelCase = ["#version: 0.2", "l o", "lo w", "e r</w>", ""] _UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) _UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" ) as fp: fp.write(json.dumps(__UpperCamelCase ) ) with open(self.merges_file , "w" ) as fp: fp.write("\n".join(__UpperCamelCase ) ) def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Any ): return "lower newer", "lower newer" def UpperCAmelCase__ ( self : str ): _UpperCAmelCase = OpenAIGPTTokenizer(self.vocab_file , self.merges_file ) _UpperCAmelCase = "lower" _UpperCAmelCase = ["low", "er</w>"] _UpperCAmelCase = tokenizer.tokenize(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = tokens + ["<unk>"] _UpperCAmelCase = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase ) def UpperCAmelCase__ ( self : List[Any] , __UpperCamelCase : str=15 ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): _UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase ) # Simple input _UpperCAmelCase = "This is a simple input" _UpperCAmelCase = ["This is a simple input 1", "This is a simple input 2"] _UpperCAmelCase = ("This is a simple input", "This is a pair") _UpperCAmelCase = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests self.assertRaises(__UpperCamelCase , tokenizer_r.encode , __UpperCamelCase , max_length=__UpperCamelCase , padding="max_length" ) # Simple input self.assertRaises(__UpperCamelCase , tokenizer_r.encode_plus , __UpperCamelCase , max_length=__UpperCamelCase , padding="max_length" ) # Simple input self.assertRaises( __UpperCamelCase , tokenizer_r.batch_encode_plus , __UpperCamelCase , max_length=__UpperCamelCase , padding="max_length" , ) # Pair input self.assertRaises(__UpperCamelCase , tokenizer_r.encode , __UpperCamelCase , max_length=__UpperCamelCase , padding="max_length" ) # Pair input self.assertRaises(__UpperCamelCase , tokenizer_r.encode_plus , __UpperCamelCase , max_length=__UpperCamelCase , padding="max_length" ) # Pair input self.assertRaises( __UpperCamelCase , tokenizer_r.batch_encode_plus , __UpperCamelCase , max_length=__UpperCamelCase , padding="max_length" , ) def UpperCAmelCase__ ( self : Any ): pass @require_ftfy @require_spacy @require_tokenizers class __SCREAMING_SNAKE_CASE ( lowercase): pass
684
__lowerCAmelCase = 2_5_6 # Modulus to hash a string __lowerCAmelCase = 1_0_0_0_0_0_3 def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> bool: _UpperCAmelCase = len(_lowerCAmelCase ) _UpperCAmelCase = len(_lowerCAmelCase ) if p_len > t_len: return False _UpperCAmelCase = 0 _UpperCAmelCase = 0 _UpperCAmelCase = 1 # Calculating the hash of pattern and substring of text for i in range(_lowerCAmelCase ): _UpperCAmelCase = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus _UpperCAmelCase = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue _UpperCAmelCase = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash _UpperCAmelCase = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def __lowerCamelCase ( ) -> None: _UpperCAmelCase = "abc1abc12" _UpperCAmelCase = "alskfjaldsabc1abc1abc12k23adsfabcabc" _UpperCAmelCase = "alskfjaldsk23adsfabcabc" assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase ) and not rabin_karp(_lowerCAmelCase , _lowerCAmelCase ) # Test 2) _UpperCAmelCase = "ABABX" _UpperCAmelCase = "ABABZABABYABABX" assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase ) # Test 3) _UpperCAmelCase = "AAAB" _UpperCAmelCase = "ABAAAAAB" assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase ) # Test 4) _UpperCAmelCase = "abcdabcy" _UpperCAmelCase = "abcxabcdabxabcdabcdabcy" assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase ) # Test 5) _UpperCAmelCase = "Lü" _UpperCAmelCase = "Lüsai" assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase ) _UpperCAmelCase = "Lue" assert not rabin_karp(_lowerCAmelCase , _lowerCAmelCase ) print("Success." ) if __name__ == "__main__": test_rabin_karp()
684
1
def __lowerCamelCase ( _lowerCAmelCase ) -> int: _UpperCAmelCase = 0 while num > 0: digit_sum += num % 10 num //= 10 return digit_sum def __lowerCamelCase ( _lowerCAmelCase = 100 ) -> int: _UpperCAmelCase = 1 _UpperCAmelCase = 2 for i in range(2 , max_n + 1 ): _UpperCAmelCase = pre_numerator _UpperCAmelCase = 2 * i // 3 if i % 3 == 0 else 1 _UpperCAmelCase = cur_numerator _UpperCAmelCase = e_cont * pre_numerator + temp return sum_digits(_lowerCAmelCase ) if __name__ == "__main__": print(F'''{solution() = }''')
684
import itertools import os import random import tempfile import unittest import numpy as np from datasets import load_dataset from transformers import is_speech_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import WhisperFeatureExtractor if is_torch_available(): import torch __lowerCAmelCase = random.Random() def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=1.0 , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> List[str]: if rng is None: _UpperCAmelCase = global_rng _UpperCAmelCase = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class __SCREAMING_SNAKE_CASE ( unittest.TestCase): def __init__( self : Optional[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any]=7 , __UpperCamelCase : Union[str, Any]=400 , __UpperCamelCase : List[Any]=2_000 , __UpperCamelCase : Optional[Any]=10 , __UpperCamelCase : Optional[int]=160 , __UpperCamelCase : Any=8 , __UpperCamelCase : List[Any]=0.0 , __UpperCamelCase : Dict=4_000 , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : Tuple=True , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = min_seq_length _UpperCAmelCase = max_seq_length _UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) _UpperCAmelCase = padding_value _UpperCAmelCase = sampling_rate _UpperCAmelCase = return_attention_mask _UpperCAmelCase = do_normalize _UpperCAmelCase = feature_size _UpperCAmelCase = chunk_length _UpperCAmelCase = hop_length def UpperCAmelCase__ ( self : Optional[Any] ): return { "feature_size": self.feature_size, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : Tuple=False , __UpperCamelCase : Dict=False ): def _flatten(__UpperCamelCase : Any ): return list(itertools.chain(*__UpperCamelCase ) ) if equal_length: _UpperCAmelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size _UpperCAmelCase = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: _UpperCAmelCase = [np.asarray(__UpperCamelCase ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class __SCREAMING_SNAKE_CASE ( lowercase , unittest.TestCase): __SCREAMING_SNAKE_CASE : str = WhisperFeatureExtractor if is_speech_available() else None def UpperCAmelCase__ ( self : str ): _UpperCAmelCase = WhisperFeatureExtractionTester(self ) def UpperCAmelCase__ ( self : str ): _UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _UpperCAmelCase = feat_extract_first.save_pretrained(__UpperCamelCase )[0] check_json_file_has_correct_format(__UpperCamelCase ) _UpperCAmelCase = self.feature_extraction_class.from_pretrained(__UpperCamelCase ) _UpperCAmelCase = feat_extract_first.to_dict() _UpperCAmelCase = feat_extract_second.to_dict() _UpperCAmelCase = feat_extract_first.mel_filters _UpperCAmelCase = feat_extract_second.mel_filters self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase ) ) self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def UpperCAmelCase__ ( self : Union[str, Any] ): _UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _UpperCAmelCase = os.path.join(__UpperCamelCase , "feat_extract.json" ) feat_extract_first.to_json_file(__UpperCamelCase ) _UpperCAmelCase = self.feature_extraction_class.from_json_file(__UpperCamelCase ) _UpperCAmelCase = feat_extract_first.to_dict() _UpperCAmelCase = feat_extract_second.to_dict() _UpperCAmelCase = feat_extract_first.mel_filters _UpperCAmelCase = feat_extract_second.mel_filters self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase ) ) self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def UpperCAmelCase__ ( self : int ): # Tests that all call wrap to encode_plus and batch_encode_plus _UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 _UpperCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] _UpperCAmelCase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs] # Test feature size _UpperCAmelCase = feature_extractor(__UpperCamelCase , padding="max_length" , return_tensors="np" ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames ) self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size ) # Test not batched input _UpperCAmelCase = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features _UpperCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) ) # Test batched _UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features _UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ): self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. _UpperCAmelCase = [floats_list((1, x) )[0] for x in (800, 800, 800)] _UpperCAmelCase = np.asarray(__UpperCamelCase ) _UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features _UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ): self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) ) # Test truncation required _UpperCAmelCase = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )] _UpperCAmelCase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs] _UpperCAmelCase = [x[: feature_extractor.n_samples] for x in speech_inputs] _UpperCAmelCase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs_truncated] _UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features _UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ): self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) ) def UpperCAmelCase__ ( self : Union[str, Any] ): import torch _UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) _UpperCAmelCase = np.random.rand(100 , 32 ).astype(np.floataa ) _UpperCAmelCase = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: _UpperCAmelCase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) _UpperCAmelCase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : Tuple ): _UpperCAmelCase = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" ) # automatic decoding with librispeech _UpperCAmelCase = ds.sort("id" ).select(range(__UpperCamelCase ) )[:num_samples]["audio"] return [x["array"] for x in speech_samples] def UpperCAmelCase__ ( self : Tuple ): # fmt: off _UpperCAmelCase = torch.tensor( [ 0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951, 0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678, 0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554, -0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854 ] ) # fmt: on _UpperCAmelCase = self._load_datasamples(1 ) _UpperCAmelCase = WhisperFeatureExtractor() _UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="pt" ).input_features self.assertEqual(input_features.shape , (1, 80, 3_000) ) self.assertTrue(torch.allclose(input_features[0, 0, :30] , __UpperCamelCase , atol=1e-4 ) ) def UpperCAmelCase__ ( self : Optional[Any] ): _UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) _UpperCAmelCase = self._load_datasamples(1 )[0] _UpperCAmelCase = ((audio - audio.min()) / (audio.max() - audio.min())) * 65_535 # Rescale to [0, 65535] to show issue _UpperCAmelCase = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__UpperCamelCase )[0] self.assertTrue(np.all(np.mean(__UpperCamelCase ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(__UpperCamelCase ) - 1 ) < 1e-3 ) )
684
1
import asyncio import os import re import sys import tempfile import unittest from contextlib import contextmanager from copy import deepcopy from distutils.util import strtobool from enum import Enum from importlib.util import find_spec from pathlib import Path from unittest.mock import patch import pyarrow as pa import pytest import requests from packaging import version from datasets import config if config.PY_VERSION < version.parse("3.8"): import importlib_metadata else: import importlib.metadata as importlib_metadata def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=False ) -> Any: try: _UpperCAmelCase = os.environ[key] except KeyError: # KEY isn't set, default to `default`. _UpperCAmelCase = default else: # KEY is set, convert it to True or False. try: _UpperCAmelCase = strtobool(_lowerCAmelCase ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(F'''If set, {key} must be yes or no.''' ) return _value __lowerCAmelCase = parse_flag_from_env("RUN_SLOW", default=False) __lowerCAmelCase = parse_flag_from_env("RUN_REMOTE", default=False) __lowerCAmelCase = parse_flag_from_env("RUN_LOCAL", default=True) __lowerCAmelCase = parse_flag_from_env("RUN_PACKAGED", default=True) # Compression __lowerCAmelCase = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="test requires lz4") __lowerCAmelCase = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="test requires py7zr") __lowerCAmelCase = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="test requires zstandard") # Audio __lowerCAmelCase = pytest.mark.skipif( # On Windows and OS X, soundfile installs sndfile find_spec("soundfile") is None or version.parse(importlib_metadata.version("soundfile")) < version.parse("0.12.0"), reason="test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; ", ) # Beam __lowerCAmelCase = pytest.mark.skipif( not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("0.3.2"), reason="test requires apache-beam and a compatible dill version", ) # Dill-cloudpickle compatibility __lowerCAmelCase = pytest.mark.skipif( config.DILL_VERSION <= version.parse("0.3.2"), reason="test requires dill>0.3.2 for cloudpickle compatibility", ) # Windows __lowerCAmelCase = pytest.mark.skipif( sys.platform == "win32", reason="test should not be run on Windows", ) def __lowerCamelCase ( _lowerCAmelCase ) -> Tuple: try: import faiss # noqa except ImportError: _UpperCAmelCase = unittest.skip("test requires faiss" )(_lowerCAmelCase ) return test_case def __lowerCamelCase ( _lowerCAmelCase ) -> Union[str, Any]: try: import regex # noqa except ImportError: _UpperCAmelCase = unittest.skip("test requires regex" )(_lowerCAmelCase ) return test_case def __lowerCamelCase ( _lowerCAmelCase ) -> str: try: import elasticsearch # noqa except ImportError: _UpperCAmelCase = unittest.skip("test requires elasticsearch" )(_lowerCAmelCase ) return test_case def __lowerCamelCase ( _lowerCAmelCase ) -> Dict: try: import sqlalchemy # noqa except ImportError: _UpperCAmelCase = unittest.skip("test requires sqlalchemy" )(_lowerCAmelCase ) return test_case def __lowerCamelCase ( _lowerCAmelCase ) -> Union[str, Any]: if not config.TORCH_AVAILABLE: _UpperCAmelCase = unittest.skip("test requires PyTorch" )(_lowerCAmelCase ) return test_case def __lowerCamelCase ( _lowerCAmelCase ) -> int: if not config.TF_AVAILABLE: _UpperCAmelCase = unittest.skip("test requires TensorFlow" )(_lowerCAmelCase ) return test_case def __lowerCamelCase ( _lowerCAmelCase ) -> Optional[int]: if not config.JAX_AVAILABLE: _UpperCAmelCase = unittest.skip("test requires JAX" )(_lowerCAmelCase ) return test_case def __lowerCamelCase ( _lowerCAmelCase ) -> Union[str, Any]: if not config.PIL_AVAILABLE: _UpperCAmelCase = unittest.skip("test requires Pillow" )(_lowerCAmelCase ) return test_case def __lowerCamelCase ( _lowerCAmelCase ) -> int: try: import transformers # noqa F401 except ImportError: return unittest.skip("test requires transformers" )(_lowerCAmelCase ) else: return test_case def __lowerCamelCase ( _lowerCAmelCase ) -> Any: try: import tiktoken # noqa F401 except ImportError: return unittest.skip("test requires tiktoken" )(_lowerCAmelCase ) else: return test_case def __lowerCamelCase ( _lowerCAmelCase ) -> int: try: import spacy # noqa F401 except ImportError: return unittest.skip("test requires spacy" )(_lowerCAmelCase ) else: return test_case def __lowerCamelCase ( _lowerCAmelCase ) -> Tuple: def _require_spacy_model(_lowerCAmelCase ): try: import spacy # noqa F401 spacy.load(_lowerCAmelCase ) except ImportError: return unittest.skip("test requires spacy" )(_lowerCAmelCase ) except OSError: return unittest.skip("test requires spacy model '{}'".format(_lowerCAmelCase ) )(_lowerCAmelCase ) else: return test_case return _require_spacy_model def __lowerCamelCase ( _lowerCAmelCase ) -> str: try: import pyspark # noqa F401 except ImportError: return unittest.skip("test requires pyspark" )(_lowerCAmelCase ) else: return test_case def __lowerCamelCase ( _lowerCAmelCase ) -> Any: try: import joblibspark # noqa F401 except ImportError: return unittest.skip("test requires joblibspark" )(_lowerCAmelCase ) else: return test_case def __lowerCamelCase ( _lowerCAmelCase ) -> int: if not _run_slow_tests or _run_slow_tests == 0: _UpperCAmelCase = unittest.skip("test is slow" )(_lowerCAmelCase ) return test_case def __lowerCamelCase ( _lowerCAmelCase ) -> Optional[int]: if not _run_local_tests or _run_local_tests == 0: _UpperCAmelCase = unittest.skip("test is local" )(_lowerCAmelCase ) return test_case def __lowerCamelCase ( _lowerCAmelCase ) -> List[Any]: if not _run_packaged_tests or _run_packaged_tests == 0: _UpperCAmelCase = unittest.skip("test is packaged" )(_lowerCAmelCase ) return test_case def __lowerCamelCase ( _lowerCAmelCase ) -> Union[str, Any]: if not _run_remote_tests or _run_remote_tests == 0: _UpperCAmelCase = unittest.skip("test requires remote" )(_lowerCAmelCase ) return test_case def __lowerCamelCase ( *_lowerCAmelCase ) -> Tuple: def decorate(cls ): for name, fn in cls.__dict__.items(): if callable(_lowerCAmelCase ) and name.startswith("test" ): for decorator in decorators: _UpperCAmelCase = decorator(_lowerCAmelCase ) setattr(cls , _lowerCAmelCase , _lowerCAmelCase ) return cls return decorate class __SCREAMING_SNAKE_CASE ( lowercase): pass class __SCREAMING_SNAKE_CASE ( lowercase): __SCREAMING_SNAKE_CASE : List[str] = 0 __SCREAMING_SNAKE_CASE : Union[str, Any] = 1 __SCREAMING_SNAKE_CASE : List[Any] = 2 @contextmanager def __lowerCamelCase ( _lowerCAmelCase=OfflineSimulationMode.CONNECTION_FAILS , _lowerCAmelCase=1E-16 ) -> List[Any]: _UpperCAmelCase = requests.Session().request def timeout_request(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ): # Change the url to an invalid url so that the connection hangs _UpperCAmelCase = "https://10.255.255.1" if kwargs.get("timeout" ) is None: raise RequestWouldHangIndefinitelyError( F'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''' ) _UpperCAmelCase = timeout try: return online_request(_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ) except Exception as e: # The following changes in the error are just here to make the offline timeout error prettier _UpperCAmelCase = url _UpperCAmelCase = e.args[0] _UpperCAmelCase = (max_retry_error.args[0].replace("10.255.255.1" , F'''OfflineMock[{url}]''' ),) _UpperCAmelCase = (max_retry_error,) raise def raise_connection_error(_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ): raise requests.ConnectionError("Offline mode is enabled." , request=_lowerCAmelCase ) if mode is OfflineSimulationMode.CONNECTION_FAILS: with patch("requests.Session.send" , _lowerCAmelCase ): yield elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT: # inspired from https://stackoverflow.com/a/904609 with patch("requests.Session.request" , _lowerCAmelCase ): yield elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: with patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCAmelCase ): yield else: raise ValueError("Please use a value from the OfflineSimulationMode enum." ) @contextmanager def __lowerCamelCase ( *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: _UpperCAmelCase = str(Path().resolve() ) with tempfile.TemporaryDirectory(*_lowerCAmelCase , **_lowerCAmelCase ) as tmp_dir: try: os.chdir(_lowerCAmelCase ) yield finally: os.chdir(_lowerCAmelCase ) @contextmanager def __lowerCamelCase ( ) -> str: import gc gc.collect() _UpperCAmelCase = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase." @contextmanager def __lowerCamelCase ( ) -> Optional[Any]: import gc gc.collect() _UpperCAmelCase = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase." def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]: return deepcopy(_lowerCAmelCase ).integers(0 , 100 , 10 ).tolist() == deepcopy(_lowerCAmelCase ).integers(0 , 100 , 10 ).tolist() def __lowerCamelCase ( _lowerCAmelCase ) -> str: import decorator from requests.exceptions import HTTPError def _wrapper(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase ): try: return func(*_lowerCAmelCase , **_lowerCAmelCase ) except HTTPError as err: if str(_lowerCAmelCase ).startswith("500" ) or str(_lowerCAmelCase ).startswith("502" ): pytest.xfail(str(_lowerCAmelCase ) ) raise err return decorator.decorator(_wrapper , _lowerCAmelCase ) class __SCREAMING_SNAKE_CASE : def __init__( self : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : List[Any] ): _UpperCAmelCase = returncode _UpperCAmelCase = stdout _UpperCAmelCase = stderr async def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> List[str]: while True: _UpperCAmelCase = await stream.readline() if line: callback(_lowerCAmelCase ) else: break async def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False ) -> _RunOutput: if echo: print("\nRunning: " , " ".join(_lowerCAmelCase ) ) _UpperCAmelCase = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=_lowerCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCAmelCase , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) _UpperCAmelCase = [] _UpperCAmelCase = [] def tee(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase="" ): _UpperCAmelCase = line.decode("utf-8" ).rstrip() sink.append(_lowerCAmelCase ) if not quiet: print(_lowerCAmelCase , _lowerCAmelCase , file=_lowerCAmelCase ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ _read_stream(p.stdout , lambda _lowerCAmelCase : tee(_lowerCAmelCase , _lowerCAmelCase , sys.stdout , label="stdout:" ) ), _read_stream(p.stderr , lambda _lowerCAmelCase : tee(_lowerCAmelCase , _lowerCAmelCase , sys.stderr , label="stderr:" ) ), ] , timeout=_lowerCAmelCase , ) return _RunOutput(await p.wait() , _lowerCAmelCase , _lowerCAmelCase ) def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=180 , _lowerCAmelCase=False , _lowerCAmelCase=True ) -> _RunOutput: _UpperCAmelCase = asyncio.get_event_loop() _UpperCAmelCase = loop.run_until_complete( _stream_subprocess(_lowerCAmelCase , env=_lowerCAmelCase , stdin=_lowerCAmelCase , timeout=_lowerCAmelCase , quiet=_lowerCAmelCase , echo=_lowerCAmelCase ) ) _UpperCAmelCase = " ".join(_lowerCAmelCase ) if result.returncode > 0: _UpperCAmelCase = "\n".join(result.stderr ) raise RuntimeError( F'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n''' F'''The combined stderr from workers follows:\n{stderr}''' ) # check that the subprocess actually did run and produced some output, should the test rely on # the remote side to do the testing if not result.stdout and not result.stderr: raise RuntimeError(F'''\'{cmd_str}\' produced no output.''' ) return result def __lowerCamelCase ( ) -> Union[str, Any]: _UpperCAmelCase = os.environ.get("PYTEST_XDIST_WORKER" , "gw0" ) _UpperCAmelCase = re.sub(r"^gw" , "" , _lowerCAmelCase , 0 , re.M ) return int(_lowerCAmelCase ) def __lowerCamelCase ( ) -> Optional[Any]: _UpperCAmelCase = 29_500 _UpperCAmelCase = pytest_xdist_worker_id() return port + uniq_delta
684
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..utils import cached_file # docstyle-ignore __lowerCAmelCase = "\nHuman: <<task>>\n\nAssistant: " __lowerCAmelCase = "huggingface-tools/default-prompts" __lowerCAmelCase = {"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"} def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase="run" ) -> Union[str, Any]: if prompt_or_repo_id is None: _UpperCAmelCase = DEFAULT_PROMPTS_REPO # prompt is considered a repo ID when it does not contain any kind of space if re.search("\\s" , _lowerCAmelCase ) is not None: return prompt_or_repo_id _UpperCAmelCase = cached_file( _lowerCAmelCase , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} ) with open(_lowerCAmelCase , "r" , encoding="utf-8" ) as f: return f.read()
684
1
from copy import deepcopy class __SCREAMING_SNAKE_CASE : def __init__( self : str , __UpperCamelCase : list[int] | None = None , __UpperCamelCase : int | None = None ): if arr is None and size is not None: _UpperCAmelCase = size _UpperCAmelCase = [0] * size elif arr is not None: self.init(__UpperCamelCase ) else: raise ValueError("Either arr or size must be specified" ) def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : list[int] ): _UpperCAmelCase = len(__UpperCamelCase ) _UpperCAmelCase = deepcopy(__UpperCamelCase ) for i in range(1 , self.size ): _UpperCAmelCase = self.next_(__UpperCamelCase ) if j < self.size: self.tree[j] += self.tree[i] def UpperCAmelCase__ ( self : int ): _UpperCAmelCase = self.tree[:] for i in range(self.size - 1 , 0 , -1 ): _UpperCAmelCase = self.next_(__UpperCamelCase ) if j < self.size: arr[j] -= arr[i] return arr @staticmethod def UpperCAmelCase__ ( __UpperCamelCase : int ): return index + (index & (-index)) @staticmethod def UpperCAmelCase__ ( __UpperCamelCase : int ): return index - (index & (-index)) def UpperCAmelCase__ ( self : int , __UpperCamelCase : int , __UpperCamelCase : int ): if index == 0: self.tree[0] += value return while index < self.size: self.tree[index] += value _UpperCAmelCase = self.next_(__UpperCamelCase ) def UpperCAmelCase__ ( self : Any , __UpperCamelCase : int , __UpperCamelCase : int ): self.add(__UpperCamelCase , value - self.get(__UpperCamelCase ) ) def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : int ): if right == 0: return 0 _UpperCAmelCase = self.tree[0] right -= 1 # make right inclusive while right > 0: result += self.tree[right] _UpperCAmelCase = self.prev(__UpperCamelCase ) return result def UpperCAmelCase__ ( self : int , __UpperCamelCase : int , __UpperCamelCase : int ): return self.prefix(__UpperCamelCase ) - self.prefix(__UpperCamelCase ) def UpperCAmelCase__ ( self : str , __UpperCamelCase : int ): return self.query(__UpperCamelCase , index + 1 ) def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : int ): value -= self.tree[0] if value < 0: return -1 _UpperCAmelCase = 1 # Largest power of 2 <= size while j * 2 < self.size: j *= 2 _UpperCAmelCase = 0 while j > 0: if i + j < self.size and self.tree[i + j] <= value: value -= self.tree[i + j] i += j j //= 2 return i if __name__ == "__main__": import doctest doctest.testmod()
684
from itertools import permutations def __lowerCamelCase ( _lowerCAmelCase ) -> bool: if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False _UpperCAmelCase = [7, 11, 13, 17] for i, test in enumerate(_lowerCAmelCase ): if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def __lowerCamelCase ( _lowerCAmelCase = 10 ) -> int: return sum( int("".join(map(_lowerCAmelCase , _lowerCAmelCase ) ) ) for num in permutations(range(_lowerCAmelCase ) ) if is_substring_divisible(_lowerCAmelCase ) ) if __name__ == "__main__": print(F'''{solution() = }''')
684
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available, is_vision_available, ) __lowerCAmelCase = {"configuration_beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig", "BeitOnnxConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = ["BeitFeatureExtractor"] __lowerCAmelCase = ["BeitImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ "BEIT_PRETRAINED_MODEL_ARCHIVE_LIST", "BeitForImageClassification", "BeitForMaskedImageModeling", "BeitForSemanticSegmentation", "BeitModel", "BeitPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ "FlaxBeitForImageClassification", "FlaxBeitForMaskedImageModeling", "FlaxBeitModel", "FlaxBeitPreTrainedModel", ] if TYPE_CHECKING: from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_beit import BeitFeatureExtractor from .image_processing_beit import BeitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_beit import ( BEIT_PRETRAINED_MODEL_ARCHIVE_LIST, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, BeitPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_beit import ( FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel, FlaxBeitPreTrainedModel, ) else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
684
import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot import BlenderbotTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = { "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } __lowerCAmelCase = { "vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"}, "merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"}, "tokenizer_config_file": { "facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json" }, } __lowerCAmelCase = {"facebook/blenderbot-3B": 1_2_8} class __SCREAMING_SNAKE_CASE ( lowercase): __SCREAMING_SNAKE_CASE : Optional[int] = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE : str = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE : List[Any] = ["""input_ids""", """attention_mask"""] __SCREAMING_SNAKE_CASE : List[str] = BlenderbotTokenizer def __init__( self : Tuple , __UpperCamelCase : List[str]=None , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : List[str]=None , __UpperCamelCase : Union[str, Any]="replace" , __UpperCamelCase : Tuple="<s>" , __UpperCamelCase : str="</s>" , __UpperCamelCase : Dict="</s>" , __UpperCamelCase : Union[str, Any]="<s>" , __UpperCamelCase : Union[str, Any]="<unk>" , __UpperCamelCase : Tuple="<pad>" , __UpperCamelCase : Optional[int]="<mask>" , __UpperCamelCase : Union[str, Any]=False , __UpperCamelCase : List[str]=True , **__UpperCamelCase : int , ): super().__init__( __UpperCamelCase , __UpperCamelCase , tokenizer_file=__UpperCamelCase , errors=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , trim_offsets=__UpperCamelCase , **__UpperCamelCase , ) _UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , __UpperCamelCase ) != add_prefix_space: _UpperCAmelCase = getattr(__UpperCamelCase , pre_tok_state.pop("type" ) ) _UpperCAmelCase = add_prefix_space _UpperCAmelCase = pre_tok_class(**__UpperCamelCase ) _UpperCAmelCase = add_prefix_space _UpperCAmelCase = "post_processor" _UpperCAmelCase = getattr(self.backend_tokenizer , __UpperCamelCase , __UpperCamelCase ) if tokenizer_component_instance: _UpperCAmelCase = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: _UpperCAmelCase = tuple(state["sep"] ) if "cls" in state: _UpperCAmelCase = tuple(state["cls"] ) _UpperCAmelCase = False if state.get("add_prefix_space" , __UpperCamelCase ) != add_prefix_space: _UpperCAmelCase = add_prefix_space _UpperCAmelCase = True if state.get("trim_offsets" , __UpperCamelCase ) != trim_offsets: _UpperCAmelCase = trim_offsets _UpperCAmelCase = True if changes_to_apply: _UpperCAmelCase = getattr(__UpperCamelCase , state.pop("type" ) ) _UpperCAmelCase = component_class(**__UpperCamelCase ) setattr(self.backend_tokenizer , __UpperCamelCase , __UpperCamelCase ) @property # Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot def UpperCAmelCase__ ( self : Union[str, Any] ): if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Union[str, Any] ): _UpperCAmelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else value _UpperCAmelCase = value def UpperCAmelCase__ ( self : int , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : List[Any] ): _UpperCAmelCase = kwargs.get("is_split_into_words" , __UpperCamelCase ) assert self.add_prefix_space or not is_split_into_words, ( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__UpperCamelCase , **__UpperCamelCase ) def UpperCAmelCase__ ( self : Tuple , *__UpperCamelCase : int , **__UpperCamelCase : Union[str, Any] ): _UpperCAmelCase = kwargs.get("is_split_into_words" , __UpperCamelCase ) assert self.add_prefix_space or not is_split_into_words, ( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*__UpperCamelCase , **__UpperCamelCase ) def UpperCAmelCase__ ( self : str , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ): _UpperCAmelCase = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase ) return tuple(__UpperCamelCase ) def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ): _UpperCAmelCase = [self.sep_token_id] _UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ): return token_ids_a + [self.eos_token_id] def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : "Conversation" ): _UpperCAmelCase = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(" " + text ) else: # Generated responses should contain them already. inputs.append(__UpperCamelCase ) _UpperCAmelCase = " ".join(__UpperCamelCase ) _UpperCAmelCase = self.encode(__UpperCamelCase ) if len(__UpperCamelCase ) > self.model_max_length: _UpperCAmelCase = input_ids[-self.model_max_length :] logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' ) return input_ids
684
1
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing the experiment tracking capability, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __lowerCAmelCase = 1_6 __lowerCAmelCase = 3_2 def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase = 16 ) -> Tuple: _UpperCAmelCase = AutoTokenizer.from_pretrained("bert-base-cased" ) _UpperCAmelCase = load_dataset("glue" , "mrpc" ) def tokenize_function(_lowerCAmelCase ): # max_length=None => use the model max length (it's actually the default) _UpperCAmelCase = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _UpperCAmelCase = datasets.map( _lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _UpperCAmelCase = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(_lowerCAmelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. _UpperCAmelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _UpperCAmelCase = 16 elif accelerator.mixed_precision != "no": _UpperCAmelCase = 8 else: _UpperCAmelCase = None return tokenizer.pad( _lowerCAmelCase , padding="longest" , max_length=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_tensors="pt" , ) # Instantiate dataloaders. _UpperCAmelCase = DataLoader( tokenized_datasets["train"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase ) _UpperCAmelCase = DataLoader( tokenized_datasets["validation"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders __lowerCAmelCase = mocked_dataloaders # noqa: F811 def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> int: # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS" , _lowerCAmelCase ) == "1": _UpperCAmelCase = 2 # Initialize Accelerator # New Code # # We pass in "all" to `log_with` to grab all available trackers in the environment # Note: If using a custom `Tracker` class, should be passed in here such as: # >>> log_with = ["all", MyCustomTrackerClassInstance()] if args.with_tracking: _UpperCAmelCase = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir ) else: _UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _UpperCAmelCase = config["lr"] _UpperCAmelCase = int(config["num_epochs"] ) _UpperCAmelCase = int(config["seed"] ) _UpperCAmelCase = int(config["batch_size"] ) set_seed(_lowerCAmelCase ) _UpperCAmelCase , _UpperCAmelCase = get_dataloaders(_lowerCAmelCase , _lowerCAmelCase ) _UpperCAmelCase = evaluate.load("glue" , "mrpc" ) # If the batch size is too big we use gradient accumulation _UpperCAmelCase = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: _UpperCAmelCase = batch_size // MAX_GPU_BATCH_SIZE _UpperCAmelCase = MAX_GPU_BATCH_SIZE # Instantiate the model (we build the model here so that the seed also control new weights initialization) _UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_lowerCAmelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _UpperCAmelCase = model.to(accelerator.device ) # Instantiate optimizer _UpperCAmelCase = AdamW(params=model.parameters() , lr=_lowerCAmelCase ) # Instantiate scheduler _UpperCAmelCase = get_linear_schedule_with_warmup( optimizer=_lowerCAmelCase , num_warmup_steps=100 , num_training_steps=(len(_lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = accelerator.prepare( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # New Code # # We need to initialize the trackers we use. Overall configurations can also be stored if args.with_tracking: _UpperCAmelCase = os.path.split(_lowerCAmelCase )[-1].split("." )[0] accelerator.init_trackers(_lowerCAmelCase , _lowerCAmelCase ) # Now we train the model for epoch in range(_lowerCAmelCase ): model.train() # New Code # # For our tracking example, we will log the total loss of each epoch if args.with_tracking: _UpperCAmelCase = 0 for step, batch in enumerate(_lowerCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) _UpperCAmelCase = model(**_lowerCAmelCase ) _UpperCAmelCase = outputs.loss # New Code # if args.with_tracking: total_loss += loss.detach().float() _UpperCAmelCase = loss / gradient_accumulation_steps accelerator.backward(_lowerCAmelCase ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_lowerCAmelCase ): # We could avoid this line since we set the accelerator with `device_placement=True` (the default). batch.to(accelerator.device ) with torch.no_grad(): _UpperCAmelCase = model(**_lowerCAmelCase ) _UpperCAmelCase = outputs.logits.argmax(dim=-1 ) _UpperCAmelCase , _UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=_lowerCAmelCase , references=_lowerCAmelCase , ) _UpperCAmelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , _lowerCAmelCase ) # New Code # # To actually log, we call `Accelerator.log` # The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int` if args.with_tracking: accelerator.log( { "accuracy": eval_metric["accuracy"], "f1": eval_metric["f1"], "train_loss": total_loss.item() / len(_lowerCAmelCase ), "epoch": epoch, } , step=_lowerCAmelCase , ) # New Code # # When a run is finished, you should call `accelerator.end_training()` # to close all of the open trackers if args.with_tracking: accelerator.end_training() def __lowerCamelCase ( ) -> Dict: _UpperCAmelCase = argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=_lowerCAmelCase , default=_lowerCAmelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) parser.add_argument( "--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , ) parser.add_argument( "--project_dir" , type=_lowerCAmelCase , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , ) _UpperCAmelCase = parser.parse_args() _UpperCAmelCase = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(_lowerCAmelCase , _lowerCAmelCase ) if __name__ == "__main__": main()
684
import argparse import torch from transformers import ( WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForAudioFrameClassification, WavaVecaForSequenceClassification, WavaVecaForXVector, logging, ) logging.set_verbosity_info() __lowerCAmelCase = logging.get_logger(__name__) def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: _UpperCAmelCase = WavaVecaForSequenceClassification.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase ) _UpperCAmelCase = downstream_dict["projector.weight"] _UpperCAmelCase = downstream_dict["projector.bias"] _UpperCAmelCase = downstream_dict["model.post_net.linear.weight"] _UpperCAmelCase = downstream_dict["model.post_net.linear.bias"] return model def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str: _UpperCAmelCase = WavaVecaForAudioFrameClassification.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase ) _UpperCAmelCase = downstream_dict["model.linear.weight"] _UpperCAmelCase = downstream_dict["model.linear.bias"] return model def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict: _UpperCAmelCase = WavaVecaForXVector.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase ) _UpperCAmelCase = downstream_dict["connector.weight"] _UpperCAmelCase = downstream_dict["connector.bias"] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): _UpperCAmelCase = downstream_dict[ F'''model.framelevel_feature_extractor.module.{i}.kernel.weight''' ] _UpperCAmelCase = downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias'''] _UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"] _UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"] _UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"] _UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"] _UpperCAmelCase = downstream_dict["objective.W"] return model @torch.no_grad() def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict: _UpperCAmelCase = torch.load(_lowerCAmelCase , map_location="cpu" ) _UpperCAmelCase = checkpoint["Downstream"] _UpperCAmelCase = WavaVecaConfig.from_pretrained(_lowerCAmelCase ) _UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained( _lowerCAmelCase , return_attention_mask=_lowerCAmelCase , do_normalize=_lowerCAmelCase ) _UpperCAmelCase = hf_config.architectures[0] if arch.endswith("ForSequenceClassification" ): _UpperCAmelCase = convert_classification(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) elif arch.endswith("ForAudioFrameClassification" ): _UpperCAmelCase = convert_diarization(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) elif arch.endswith("ForXVector" ): _UpperCAmelCase = convert_xvector(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) else: raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''' ) if hf_config.use_weighted_layer_sum: _UpperCAmelCase = checkpoint["Featurizer"]["weights"] hf_feature_extractor.save_pretrained(_lowerCAmelCase ) hf_model.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() parser.add_argument( "--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model." ) parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.") parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.") __lowerCAmelCase = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
684
1
from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def __lowerCamelCase ( _lowerCAmelCase ) -> None: _UpperCAmelCase , _UpperCAmelCase = analyze_text(_lowerCAmelCase ) _UpperCAmelCase = list(" " + ascii_lowercase ) # what is our total sum of probabilities. _UpperCAmelCase = sum(single_char_strings.values() ) # one length string _UpperCAmelCase = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: _UpperCAmelCase = single_char_strings[ch] _UpperCAmelCase = my_str / all_sum my_fir_sum += prob * math.loga(_lowerCAmelCase ) # entropy formula. # print entropy print(F'''{round(-1 * my_fir_sum ):.1f}''' ) # two len string _UpperCAmelCase = sum(two_char_strings.values() ) _UpperCAmelCase = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: _UpperCAmelCase = cha + cha if sequence in two_char_strings: _UpperCAmelCase = two_char_strings[sequence] _UpperCAmelCase = int(_lowerCAmelCase ) / all_sum my_sec_sum += prob * math.loga(_lowerCAmelCase ) # print second entropy print(F'''{round(-1 * my_sec_sum ):.1f}''' ) # print the difference between them print(F'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' ) def __lowerCamelCase ( _lowerCAmelCase ) -> tuple[dict, dict]: _UpperCAmelCase = Counter() # type: ignore _UpperCAmelCase = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0 , len(_lowerCAmelCase ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def __lowerCamelCase ( ) -> Union[str, Any]: import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
684
def __lowerCamelCase ( _lowerCAmelCase ) -> str: _UpperCAmelCase = [] _UpperCAmelCase = set({"(", "[", "{"} ) _UpperCAmelCase = set({")", "]", "}"} ) _UpperCAmelCase = {"{": "}", "[": "]", "(": ")"} for i in range(len(_lowerCAmelCase ) ): if s[i] in open_brackets: stack.append(s[i] ) elif s[i] in closed_brackets and ( len(_lowerCAmelCase ) == 0 or (len(_lowerCAmelCase ) > 0 and open_to_closed[stack.pop()] != s[i]) ): return False return len(_lowerCAmelCase ) == 0 def __lowerCamelCase ( ) -> str: _UpperCAmelCase = input("Enter sequence of brackets: " ) if is_balanced(_lowerCAmelCase ): print(_lowerCAmelCase , "is balanced" ) else: print(_lowerCAmelCase , "is not balanced" ) if __name__ == "__main__": main()
684
1
def __lowerCamelCase ( _lowerCAmelCase ) -> int: _UpperCAmelCase = hex_num.strip() if not hex_num: raise ValueError("No value was passed to the function" ) _UpperCAmelCase = hex_num[0] == "-" if is_negative: _UpperCAmelCase = hex_num[1:] try: _UpperCAmelCase = int(_lowerCAmelCase , 16 ) except ValueError: raise ValueError("Invalid value was passed to the function" ) _UpperCAmelCase = "" while int_num > 0: _UpperCAmelCase = str(int_num % 2 ) + bin_str int_num >>= 1 return int(("-" + bin_str) if is_negative else bin_str ) if __name__ == "__main__": import doctest doctest.testmod()
684
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> tuple[float, float]: # Check if the input is valid if not len(_lowerCAmelCase ) == len(_lowerCAmelCase ) == 3: raise ValueError("Please enter a valid equation." ) if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0: raise ValueError("Both a & b of two equations can't be zero." ) # Extract the coefficients _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = equationa _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = equationa # Calculate the determinants of the matrices _UpperCAmelCase = aa * ba - aa * ba _UpperCAmelCase = ca * ba - ca * ba _UpperCAmelCase = aa * ca - aa * ca # Check if the system of linear equations has a solution (using Cramer's rule) if determinant == 0: if determinant_x == determinant_y == 0: raise ValueError("Infinite solutions. (Consistent system)" ) else: raise ValueError("No solution. (Inconsistent system)" ) else: if determinant_x == determinant_y == 0: # Trivial solution (Inconsistent system) return (0.0, 0.0) else: _UpperCAmelCase = determinant_x / determinant _UpperCAmelCase = determinant_y / determinant # Non-Trivial Solution (Consistent system) return (x, y)
684
1
import heapq def __lowerCamelCase ( _lowerCAmelCase ) -> set[int]: _UpperCAmelCase = [] # for each node and his adjacency list add them and the rank of the node to queue # using heapq module the queue will be filled like a Priority Queue # heapq works with a min priority queue, so I used -1*len(v) to build it for key, value in graph.items(): # O(log(n)) heapq.heappush(_lowerCAmelCase , [-1 * len(_lowerCAmelCase ), (key, value)] ) # chosen_vertices = set of chosen vertices _UpperCAmelCase = set() # while queue isn't empty and there are still edges # (queue[0][0] is the rank of the node with max rank) while queue and queue[0][0] != 0: # extract vertex with max rank from queue and add it to chosen_vertices _UpperCAmelCase = heapq.heappop(_lowerCAmelCase )[1][0] chosen_vertices.add(_lowerCAmelCase ) # Remove all arcs adjacent to argmax for elem in queue: # if v haven't adjacent node, skip if elem[0] == 0: continue # if argmax is reachable from elem # remove argmax from elem's adjacent list and update his rank if argmax in elem[1][1]: _UpperCAmelCase = elem[1][1].index(_lowerCAmelCase ) del elem[1][1][index] elem[0] += 1 # re-order the queue heapq.heapify(_lowerCAmelCase ) return chosen_vertices if __name__ == "__main__": import doctest doctest.testmod() __lowerCAmelCase = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} print(F'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
684
import argparse import torch from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert from transformers.utils import logging logging.set_verbosity_info() def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: # Initialise PyTorch model _UpperCAmelCase = RemBertConfig.from_json_file(_lowerCAmelCase ) print("Building PyTorch model from configuration: {}".format(str(_lowerCAmelCase ) ) ) _UpperCAmelCase = RemBertModel(_lowerCAmelCase ) # Load weights from tf checkpoint load_tf_weights_in_rembert(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # Save pytorch-model print("Save PyTorch model to {}".format(_lowerCAmelCase ) ) torch.save(model.state_dict() , _lowerCAmelCase ) if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--rembert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained RemBERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) __lowerCAmelCase = parser.parse_args() convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
684
1
from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker __lowerCAmelCase = "CompVis/stable-diffusion-v1-1" __lowerCAmelCase = "CompVis/stable-diffusion-v1-2" __lowerCAmelCase = "CompVis/stable-diffusion-v1-3" __lowerCAmelCase = "CompVis/stable-diffusion-v1-4" class __SCREAMING_SNAKE_CASE ( lowercase): def __init__( self : Optional[int] , __UpperCamelCase : AutoencoderKL , __UpperCamelCase : CLIPTextModel , __UpperCamelCase : CLIPTokenizer , __UpperCamelCase : UNetaDConditionModel , __UpperCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __UpperCamelCase : StableDiffusionSafetyChecker , __UpperCamelCase : CLIPImageProcessor , __UpperCamelCase : bool = True , ): super()._init_() _UpperCAmelCase = StableDiffusionPipeline.from_pretrained(__UpperCamelCase ) _UpperCAmelCase = StableDiffusionPipeline.from_pretrained(__UpperCamelCase ) _UpperCAmelCase = StableDiffusionPipeline.from_pretrained(__UpperCamelCase ) _UpperCAmelCase = StableDiffusionPipeline( vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , requires_safety_checker=__UpperCamelCase , ) self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea ) @property def UpperCAmelCase__ ( self : List[Any] ): return {k: getattr(self , __UpperCamelCase ) for k in self.config.keys() if not k.startswith("_" )} def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : Optional[Union[str, int]] = "auto" ): if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory _UpperCAmelCase = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(__UpperCamelCase ) def UpperCAmelCase__ ( self : Any ): self.enable_attention_slicing(__UpperCamelCase ) @torch.no_grad() def UpperCAmelCase__ ( self : Optional[int] , __UpperCamelCase : Union[str, List[str]] , __UpperCamelCase : int = 512 , __UpperCamelCase : int = 512 , __UpperCamelCase : int = 50 , __UpperCamelCase : float = 7.5 , __UpperCamelCase : Optional[Union[str, List[str]]] = None , __UpperCamelCase : Optional[int] = 1 , __UpperCamelCase : float = 0.0 , __UpperCamelCase : Optional[torch.Generator] = None , __UpperCamelCase : Optional[torch.FloatTensor] = None , __UpperCamelCase : Optional[str] = "pil" , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCamelCase : int = 1 , **__UpperCamelCase : Optional[int] , ): return self.pipea( prompt=__UpperCamelCase , height=__UpperCamelCase , width=__UpperCamelCase , num_inference_steps=__UpperCamelCase , guidance_scale=__UpperCamelCase , negative_prompt=__UpperCamelCase , num_images_per_prompt=__UpperCamelCase , eta=__UpperCamelCase , generator=__UpperCamelCase , latents=__UpperCamelCase , output_type=__UpperCamelCase , return_dict=__UpperCamelCase , callback=__UpperCamelCase , callback_steps=__UpperCamelCase , **__UpperCamelCase , ) @torch.no_grad() def UpperCAmelCase__ ( self : str , __UpperCamelCase : Union[str, List[str]] , __UpperCamelCase : int = 512 , __UpperCamelCase : int = 512 , __UpperCamelCase : int = 50 , __UpperCamelCase : float = 7.5 , __UpperCamelCase : Optional[Union[str, List[str]]] = None , __UpperCamelCase : Optional[int] = 1 , __UpperCamelCase : float = 0.0 , __UpperCamelCase : Optional[torch.Generator] = None , __UpperCamelCase : Optional[torch.FloatTensor] = None , __UpperCamelCase : Optional[str] = "pil" , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCamelCase : int = 1 , **__UpperCamelCase : Optional[Any] , ): return self.pipea( prompt=__UpperCamelCase , height=__UpperCamelCase , width=__UpperCamelCase , num_inference_steps=__UpperCamelCase , guidance_scale=__UpperCamelCase , negative_prompt=__UpperCamelCase , num_images_per_prompt=__UpperCamelCase , eta=__UpperCamelCase , generator=__UpperCamelCase , latents=__UpperCamelCase , output_type=__UpperCamelCase , return_dict=__UpperCamelCase , callback=__UpperCamelCase , callback_steps=__UpperCamelCase , **__UpperCamelCase , ) @torch.no_grad() def UpperCAmelCase__ ( self : str , __UpperCamelCase : Union[str, List[str]] , __UpperCamelCase : int = 512 , __UpperCamelCase : int = 512 , __UpperCamelCase : int = 50 , __UpperCamelCase : float = 7.5 , __UpperCamelCase : Optional[Union[str, List[str]]] = None , __UpperCamelCase : Optional[int] = 1 , __UpperCamelCase : float = 0.0 , __UpperCamelCase : Optional[torch.Generator] = None , __UpperCamelCase : Optional[torch.FloatTensor] = None , __UpperCamelCase : Optional[str] = "pil" , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCamelCase : int = 1 , **__UpperCamelCase : Optional[Any] , ): return self.pipea( prompt=__UpperCamelCase , height=__UpperCamelCase , width=__UpperCamelCase , num_inference_steps=__UpperCamelCase , guidance_scale=__UpperCamelCase , negative_prompt=__UpperCamelCase , num_images_per_prompt=__UpperCamelCase , eta=__UpperCamelCase , generator=__UpperCamelCase , latents=__UpperCamelCase , output_type=__UpperCamelCase , return_dict=__UpperCamelCase , callback=__UpperCamelCase , callback_steps=__UpperCamelCase , **__UpperCamelCase , ) @torch.no_grad() def UpperCAmelCase__ ( self : int , __UpperCamelCase : Union[str, List[str]] , __UpperCamelCase : int = 512 , __UpperCamelCase : int = 512 , __UpperCamelCase : int = 50 , __UpperCamelCase : float = 7.5 , __UpperCamelCase : Optional[Union[str, List[str]]] = None , __UpperCamelCase : Optional[int] = 1 , __UpperCamelCase : float = 0.0 , __UpperCamelCase : Optional[torch.Generator] = None , __UpperCamelCase : Optional[torch.FloatTensor] = None , __UpperCamelCase : Optional[str] = "pil" , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCamelCase : int = 1 , **__UpperCamelCase : Any , ): return self.pipea( prompt=__UpperCamelCase , height=__UpperCamelCase , width=__UpperCamelCase , num_inference_steps=__UpperCamelCase , guidance_scale=__UpperCamelCase , negative_prompt=__UpperCamelCase , num_images_per_prompt=__UpperCamelCase , eta=__UpperCamelCase , generator=__UpperCamelCase , latents=__UpperCamelCase , output_type=__UpperCamelCase , return_dict=__UpperCamelCase , callback=__UpperCamelCase , callback_steps=__UpperCamelCase , **__UpperCamelCase , ) @torch.no_grad() def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : Union[str, List[str]] , __UpperCamelCase : int = 512 , __UpperCamelCase : int = 512 , __UpperCamelCase : int = 50 , __UpperCamelCase : float = 7.5 , __UpperCamelCase : Optional[Union[str, List[str]]] = None , __UpperCamelCase : Optional[int] = 1 , __UpperCamelCase : float = 0.0 , __UpperCamelCase : Optional[torch.Generator] = None , __UpperCamelCase : Optional[torch.FloatTensor] = None , __UpperCamelCase : Optional[str] = "pil" , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCamelCase : int = 1 , **__UpperCamelCase : Any , ): _UpperCAmelCase = "cuda" if torch.cuda.is_available() else "cpu" self.to(__UpperCamelCase ) # Checks if the height and width are divisible by 8 or not if height % 8 != 0 or width % 8 != 0: raise ValueError(F'''`height` and `width` must be divisible by 8 but are {height} and {width}.''' ) # Get first result from Stable Diffusion Checkpoint v1.1 _UpperCAmelCase = self.textaimg_sda_a( prompt=__UpperCamelCase , height=__UpperCamelCase , width=__UpperCamelCase , num_inference_steps=__UpperCamelCase , guidance_scale=__UpperCamelCase , negative_prompt=__UpperCamelCase , num_images_per_prompt=__UpperCamelCase , eta=__UpperCamelCase , generator=__UpperCamelCase , latents=__UpperCamelCase , output_type=__UpperCamelCase , return_dict=__UpperCamelCase , callback=__UpperCamelCase , callback_steps=__UpperCamelCase , **__UpperCamelCase , ) # Get first result from Stable Diffusion Checkpoint v1.2 _UpperCAmelCase = self.textaimg_sda_a( prompt=__UpperCamelCase , height=__UpperCamelCase , width=__UpperCamelCase , num_inference_steps=__UpperCamelCase , guidance_scale=__UpperCamelCase , negative_prompt=__UpperCamelCase , num_images_per_prompt=__UpperCamelCase , eta=__UpperCamelCase , generator=__UpperCamelCase , latents=__UpperCamelCase , output_type=__UpperCamelCase , return_dict=__UpperCamelCase , callback=__UpperCamelCase , callback_steps=__UpperCamelCase , **__UpperCamelCase , ) # Get first result from Stable Diffusion Checkpoint v1.3 _UpperCAmelCase = self.textaimg_sda_a( prompt=__UpperCamelCase , height=__UpperCamelCase , width=__UpperCamelCase , num_inference_steps=__UpperCamelCase , guidance_scale=__UpperCamelCase , negative_prompt=__UpperCamelCase , num_images_per_prompt=__UpperCamelCase , eta=__UpperCamelCase , generator=__UpperCamelCase , latents=__UpperCamelCase , output_type=__UpperCamelCase , return_dict=__UpperCamelCase , callback=__UpperCamelCase , callback_steps=__UpperCamelCase , **__UpperCamelCase , ) # Get first result from Stable Diffusion Checkpoint v1.4 _UpperCAmelCase = self.textaimg_sda_a( prompt=__UpperCamelCase , height=__UpperCamelCase , width=__UpperCamelCase , num_inference_steps=__UpperCamelCase , guidance_scale=__UpperCamelCase , negative_prompt=__UpperCamelCase , num_images_per_prompt=__UpperCamelCase , eta=__UpperCamelCase , generator=__UpperCamelCase , latents=__UpperCamelCase , output_type=__UpperCamelCase , return_dict=__UpperCamelCase , callback=__UpperCamelCase , callback_steps=__UpperCamelCase , **__UpperCamelCase , ) # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
684
import unittest from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __SCREAMING_SNAKE_CASE : @staticmethod def UpperCAmelCase__ ( *__UpperCamelCase : Dict , **__UpperCamelCase : Optional[int] ): pass @is_pipeline_test @require_torch @require_vision class __SCREAMING_SNAKE_CASE ( unittest.TestCase): __SCREAMING_SNAKE_CASE : List[Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] ): _UpperCAmelCase = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" ) _UpperCAmelCase = [ { "image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ), "question": "How many cats are there?", }, { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "question": "How many cats are there?", }, ] return vqa_pipeline, examples def UpperCAmelCase__ ( self : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] ): _UpperCAmelCase = vqa_pipeline(__UpperCamelCase , top_k=1 ) self.assertEqual( __UpperCamelCase , [ [{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}], [{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}], ] , ) @require_torch def UpperCAmelCase__ ( self : List[Any] ): _UpperCAmelCase = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" ) _UpperCAmelCase = "./tests/fixtures/tests_samples/COCO/000000039769.png" _UpperCAmelCase = "How many cats are there?" _UpperCAmelCase = vqa_pipeline(image=__UpperCamelCase , question="How many cats are there?" , top_k=2 ) self.assertEqual( __UpperCamelCase , [{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}, {"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}] ) _UpperCAmelCase = vqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual( __UpperCamelCase , [{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}, {"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}] ) @slow @require_torch def UpperCAmelCase__ ( self : List[str] ): _UpperCAmelCase = pipeline("visual-question-answering" , model="dandelin/vilt-b32-finetuned-vqa" ) _UpperCAmelCase = "./tests/fixtures/tests_samples/COCO/000000039769.png" _UpperCAmelCase = "How many cats are there?" _UpperCAmelCase = vqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 ) self.assertEqual( nested_simplify(__UpperCamelCase , decimals=4 ) , [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] ) _UpperCAmelCase = vqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(__UpperCamelCase , decimals=4 ) , [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] ) _UpperCAmelCase = vqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 ) self.assertEqual( nested_simplify(__UpperCamelCase , decimals=4 ) , [[{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}]] * 2 , ) @require_tf @unittest.skip("Visual question answering not implemented in TF" ) def UpperCAmelCase__ ( self : Optional[int] ): pass
684
1
from ..utils import DummyObject, requires_backends class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : Any = ["""flax"""] def __init__( self : Optional[Any] , *__UpperCamelCase : List[Any] , **__UpperCamelCase : int ): requires_backends(self , ["flax"] ) @classmethod def UpperCAmelCase__ ( cls : Optional[int] , *__UpperCamelCase : Tuple , **__UpperCamelCase : Union[str, Any] ): requires_backends(cls , ["flax"] ) @classmethod def UpperCAmelCase__ ( cls : Any , *__UpperCamelCase : Tuple , **__UpperCamelCase : List[str] ): requires_backends(cls , ["flax"] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : int = ["""flax"""] def __init__( self : Any , *__UpperCamelCase : str , **__UpperCamelCase : Optional[int] ): requires_backends(self , ["flax"] ) @classmethod def UpperCAmelCase__ ( cls : Union[str, Any] , *__UpperCamelCase : Dict , **__UpperCamelCase : List[str] ): requires_backends(cls , ["flax"] ) @classmethod def UpperCAmelCase__ ( cls : int , *__UpperCamelCase : int , **__UpperCamelCase : int ): requires_backends(cls , ["flax"] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : List[str] = ["""flax"""] def __init__( self : Union[str, Any] , *__UpperCamelCase : str , **__UpperCamelCase : List[str] ): requires_backends(self , ["flax"] ) @classmethod def UpperCAmelCase__ ( cls : str , *__UpperCamelCase : Any , **__UpperCamelCase : List[str] ): requires_backends(cls , ["flax"] ) @classmethod def UpperCAmelCase__ ( cls : Union[str, Any] , *__UpperCamelCase : Any , **__UpperCamelCase : Optional[int] ): requires_backends(cls , ["flax"] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : Dict = ["""flax"""] def __init__( self : Union[str, Any] , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : List[Any] ): requires_backends(self , ["flax"] ) @classmethod def UpperCAmelCase__ ( cls : Tuple , *__UpperCamelCase : str , **__UpperCamelCase : Union[str, Any] ): requires_backends(cls , ["flax"] ) @classmethod def UpperCAmelCase__ ( cls : Tuple , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : Any ): requires_backends(cls , ["flax"] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : Union[str, Any] = ["""flax"""] def __init__( self : str , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : Union[str, Any] ): requires_backends(self , ["flax"] ) @classmethod def UpperCAmelCase__ ( cls : List[str] , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : int ): requires_backends(cls , ["flax"] ) @classmethod def UpperCAmelCase__ ( cls : Optional[int] , *__UpperCamelCase : Dict , **__UpperCamelCase : List[str] ): requires_backends(cls , ["flax"] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : Tuple = ["""flax"""] def __init__( self : List[str] , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : str ): requires_backends(self , ["flax"] ) @classmethod def UpperCAmelCase__ ( cls : List[Any] , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : str ): requires_backends(cls , ["flax"] ) @classmethod def UpperCAmelCase__ ( cls : Tuple , *__UpperCamelCase : List[str] , **__UpperCamelCase : Tuple ): requires_backends(cls , ["flax"] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : Union[str, Any] = ["""flax"""] def __init__( self : Optional[int] , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : Optional[Any] ): requires_backends(self , ["flax"] ) @classmethod def UpperCAmelCase__ ( cls : str , *__UpperCamelCase : int , **__UpperCamelCase : Optional[Any] ): requires_backends(cls , ["flax"] ) @classmethod def UpperCAmelCase__ ( cls : Any , *__UpperCamelCase : Dict , **__UpperCamelCase : Any ): requires_backends(cls , ["flax"] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : Optional[Any] = ["""flax"""] def __init__( self : Any , *__UpperCamelCase : int , **__UpperCamelCase : str ): requires_backends(self , ["flax"] ) @classmethod def UpperCAmelCase__ ( cls : List[Any] , *__UpperCamelCase : int , **__UpperCamelCase : Union[str, Any] ): requires_backends(cls , ["flax"] ) @classmethod def UpperCAmelCase__ ( cls : Optional[int] , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : str ): requires_backends(cls , ["flax"] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : Tuple = ["""flax"""] def __init__( self : List[str] , *__UpperCamelCase : Any , **__UpperCamelCase : Tuple ): requires_backends(self , ["flax"] ) @classmethod def UpperCAmelCase__ ( cls : str , *__UpperCamelCase : str , **__UpperCamelCase : Any ): requires_backends(cls , ["flax"] ) @classmethod def UpperCAmelCase__ ( cls : Union[str, Any] , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : Union[str, Any] ): requires_backends(cls , ["flax"] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : int = ["""flax"""] def __init__( self : Any , *__UpperCamelCase : List[str] , **__UpperCamelCase : List[str] ): requires_backends(self , ["flax"] ) @classmethod def UpperCAmelCase__ ( cls : int , *__UpperCamelCase : Any , **__UpperCamelCase : List[Any] ): requires_backends(cls , ["flax"] ) @classmethod def UpperCAmelCase__ ( cls : List[Any] , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : List[Any] ): requires_backends(cls , ["flax"] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : List[Any] = ["""flax"""] def __init__( self : Dict , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : List[str] ): requires_backends(self , ["flax"] ) @classmethod def UpperCAmelCase__ ( cls : Any , *__UpperCamelCase : str , **__UpperCamelCase : List[str] ): requires_backends(cls , ["flax"] ) @classmethod def UpperCAmelCase__ ( cls : str , *__UpperCamelCase : Dict , **__UpperCamelCase : Any ): requires_backends(cls , ["flax"] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : Dict = ["""flax"""] def __init__( self : int , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : List[str] ): requires_backends(self , ["flax"] ) @classmethod def UpperCAmelCase__ ( cls : List[Any] , *__UpperCamelCase : List[Any] , **__UpperCamelCase : Any ): requires_backends(cls , ["flax"] ) @classmethod def UpperCAmelCase__ ( cls : Tuple , *__UpperCamelCase : int , **__UpperCamelCase : Union[str, Any] ): requires_backends(cls , ["flax"] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : Tuple = ["""flax"""] def __init__( self : int , *__UpperCamelCase : Tuple , **__UpperCamelCase : Tuple ): requires_backends(self , ["flax"] ) @classmethod def UpperCAmelCase__ ( cls : int , *__UpperCamelCase : int , **__UpperCamelCase : Tuple ): requires_backends(cls , ["flax"] ) @classmethod def UpperCAmelCase__ ( cls : List[str] , *__UpperCamelCase : Dict , **__UpperCamelCase : Union[str, Any] ): requires_backends(cls , ["flax"] )
684
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, ) else: from .modeling_text_unet import UNetFlatConditionModel from .pipeline_versatile_diffusion import VersatileDiffusionPipeline from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
684
1
from ..utils import DummyObject, requires_backends class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : Any = ["""torch"""] def __init__( self : int , *__UpperCamelCase : int , **__UpperCamelCase : Union[str, Any] ): requires_backends(self , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Dict , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : Any ): requires_backends(cls , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Dict , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : Union[str, Any] ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : str = ["""torch"""] def __init__( self : Optional[Any] , *__UpperCamelCase : int , **__UpperCamelCase : str ): requires_backends(self , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Union[str, Any] , *__UpperCamelCase : str , **__UpperCamelCase : List[str] ): requires_backends(cls , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Optional[Any] , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : List[str] ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : int = ["""torch"""] def __init__( self : List[Any] , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : str ): requires_backends(self , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : int , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : Tuple ): requires_backends(cls , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Optional[int] , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : Optional[int] ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : int = ["""torch"""] def __init__( self : Optional[int] , *__UpperCamelCase : Tuple , **__UpperCamelCase : List[str] ): requires_backends(self , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Dict , *__UpperCamelCase : Any , **__UpperCamelCase : Optional[int] ): requires_backends(cls , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Any , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : int ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : str = ["""torch"""] def __init__( self : Union[str, Any] , *__UpperCamelCase : List[Any] , **__UpperCamelCase : Any ): requires_backends(self , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : List[Any] , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : Any ): requires_backends(cls , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Optional[Any] , *__UpperCamelCase : List[Any] , **__UpperCamelCase : str ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : Dict = ["""torch"""] def __init__( self : Union[str, Any] , *__UpperCamelCase : str , **__UpperCamelCase : Tuple ): requires_backends(self , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Any , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : str ): requires_backends(cls , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Tuple , *__UpperCamelCase : Tuple , **__UpperCamelCase : Dict ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : List[str] = ["""torch"""] def __init__( self : List[str] , *__UpperCamelCase : str , **__UpperCamelCase : int ): requires_backends(self , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Any , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : Optional[Any] ): requires_backends(cls , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : int , *__UpperCamelCase : List[str] , **__UpperCamelCase : Optional[Any] ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : Union[str, Any] = ["""torch"""] def __init__( self : List[Any] , *__UpperCamelCase : Dict , **__UpperCamelCase : Any ): requires_backends(self , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Dict , *__UpperCamelCase : Tuple , **__UpperCamelCase : Dict ): requires_backends(cls , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Dict , *__UpperCamelCase : Dict , **__UpperCamelCase : List[Any] ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : Optional[Any] = ["""torch"""] def __init__( self : int , *__UpperCamelCase : int , **__UpperCamelCase : int ): requires_backends(self , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : str , *__UpperCamelCase : int , **__UpperCamelCase : Any ): requires_backends(cls , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Tuple , *__UpperCamelCase : Dict , **__UpperCamelCase : Any ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : List[str] = ["""torch"""] def __init__( self : int , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : Any ): requires_backends(self , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Optional[int] , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : Any ): requires_backends(cls , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Any , *__UpperCamelCase : Tuple , **__UpperCamelCase : List[str] ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : Dict = ["""torch"""] def __init__( self : Any , *__UpperCamelCase : Dict , **__UpperCamelCase : int ): requires_backends(self , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Optional[int] , *__UpperCamelCase : Any , **__UpperCamelCase : int ): requires_backends(cls , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Any , *__UpperCamelCase : str , **__UpperCamelCase : Optional[Any] ): requires_backends(cls , ["torch"] ) def __lowerCamelCase ( *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(_lowerCAmelCase , ["torch"] ) def __lowerCamelCase ( *_lowerCAmelCase , **_lowerCAmelCase ) -> Any: requires_backends(_lowerCAmelCase , ["torch"] ) def __lowerCamelCase ( *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(_lowerCAmelCase , ["torch"] ) def __lowerCamelCase ( *_lowerCAmelCase , **_lowerCAmelCase ) -> int: requires_backends(_lowerCAmelCase , ["torch"] ) def __lowerCamelCase ( *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[int]: requires_backends(_lowerCAmelCase , ["torch"] ) def __lowerCamelCase ( *_lowerCAmelCase , **_lowerCAmelCase ) -> List[Any]: requires_backends(_lowerCAmelCase , ["torch"] ) def __lowerCamelCase ( *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: requires_backends(_lowerCAmelCase , ["torch"] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : Dict = ["""torch"""] def __init__( self : Optional[int] , *__UpperCamelCase : List[Any] , **__UpperCamelCase : str ): requires_backends(self , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Dict , *__UpperCamelCase : str , **__UpperCamelCase : Any ): requires_backends(cls , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Any , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : Optional[Any] ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : str = ["""torch"""] def __init__( self : Any , *__UpperCamelCase : List[str] , **__UpperCamelCase : Any ): requires_backends(self , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Dict , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : List[Any] ): requires_backends(cls , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Optional[int] , *__UpperCamelCase : int , **__UpperCamelCase : Tuple ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : Optional[int] = ["""torch"""] def __init__( self : Optional[Any] , *__UpperCamelCase : int , **__UpperCamelCase : Union[str, Any] ): requires_backends(self , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Optional[int] , *__UpperCamelCase : int , **__UpperCamelCase : List[str] ): requires_backends(cls , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Any , *__UpperCamelCase : List[Any] , **__UpperCamelCase : Dict ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : str = ["""torch"""] def __init__( self : List[str] , *__UpperCamelCase : Tuple , **__UpperCamelCase : Optional[Any] ): requires_backends(self , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Any , *__UpperCamelCase : int , **__UpperCamelCase : Union[str, Any] ): requires_backends(cls , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : List[str] , *__UpperCamelCase : int , **__UpperCamelCase : Union[str, Any] ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : Dict = ["""torch"""] def __init__( self : Tuple , *__UpperCamelCase : str , **__UpperCamelCase : Tuple ): requires_backends(self , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Union[str, Any] , *__UpperCamelCase : Dict , **__UpperCamelCase : Optional[Any] ): requires_backends(cls , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Optional[Any] , *__UpperCamelCase : int , **__UpperCamelCase : int ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : List[str] = ["""torch"""] def __init__( self : Optional[int] , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : Any ): requires_backends(self , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Optional[Any] , *__UpperCamelCase : Any , **__UpperCamelCase : Tuple ): requires_backends(cls , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Optional[int] , *__UpperCamelCase : str , **__UpperCamelCase : Tuple ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : List[str] = ["""torch"""] def __init__( self : Dict , *__UpperCamelCase : Any , **__UpperCamelCase : str ): requires_backends(self , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Optional[int] , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : Tuple ): requires_backends(cls , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : List[Any] , *__UpperCamelCase : List[Any] , **__UpperCamelCase : List[Any] ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : Dict = ["""torch"""] def __init__( self : Optional[Any] , *__UpperCamelCase : Dict , **__UpperCamelCase : Union[str, Any] ): requires_backends(self , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : str , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : Any ): requires_backends(cls , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : int , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : int ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : Optional[int] = ["""torch"""] def __init__( self : Any , *__UpperCamelCase : Tuple , **__UpperCamelCase : Dict ): requires_backends(self , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : str , *__UpperCamelCase : List[Any] , **__UpperCamelCase : int ): requires_backends(cls , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : List[str] , *__UpperCamelCase : int , **__UpperCamelCase : Tuple ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : Any = ["""torch"""] def __init__( self : List[Any] , *__UpperCamelCase : Dict , **__UpperCamelCase : str ): requires_backends(self , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : str , *__UpperCamelCase : Any , **__UpperCamelCase : int ): requires_backends(cls , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Union[str, Any] , *__UpperCamelCase : int , **__UpperCamelCase : Union[str, Any] ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : Optional[int] = ["""torch"""] def __init__( self : Optional[int] , *__UpperCamelCase : Dict , **__UpperCamelCase : Tuple ): requires_backends(self , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Any , *__UpperCamelCase : Dict , **__UpperCamelCase : Dict ): requires_backends(cls , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : List[Any] , *__UpperCamelCase : Tuple , **__UpperCamelCase : int ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : Tuple = ["""torch"""] def __init__( self : List[str] , *__UpperCamelCase : Tuple , **__UpperCamelCase : Optional[Any] ): requires_backends(self , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Dict , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : Union[str, Any] ): requires_backends(cls , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Union[str, Any] , *__UpperCamelCase : List[str] , **__UpperCamelCase : Optional[int] ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : Optional[int] = ["""torch"""] def __init__( self : List[str] , *__UpperCamelCase : Dict , **__UpperCamelCase : Optional[Any] ): requires_backends(self , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : List[Any] , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : Union[str, Any] ): requires_backends(cls , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Union[str, Any] , *__UpperCamelCase : Dict , **__UpperCamelCase : str ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : Dict = ["""torch"""] def __init__( self : List[Any] , *__UpperCamelCase : List[Any] , **__UpperCamelCase : Union[str, Any] ): requires_backends(self , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : List[Any] , *__UpperCamelCase : int , **__UpperCamelCase : Optional[int] ): requires_backends(cls , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : int , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : Dict ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : Union[str, Any] = ["""torch"""] def __init__( self : Tuple , *__UpperCamelCase : List[str] , **__UpperCamelCase : Tuple ): requires_backends(self , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Any , *__UpperCamelCase : Tuple , **__UpperCamelCase : str ): requires_backends(cls , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Optional[Any] , *__UpperCamelCase : List[Any] , **__UpperCamelCase : str ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : Any = ["""torch"""] def __init__( self : Optional[int] , *__UpperCamelCase : Any , **__UpperCamelCase : Union[str, Any] ): requires_backends(self , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : List[str] , *__UpperCamelCase : Any , **__UpperCamelCase : List[str] ): requires_backends(cls , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Dict , *__UpperCamelCase : Any , **__UpperCamelCase : str ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : Optional[Any] = ["""torch"""] def __init__( self : Any , *__UpperCamelCase : str , **__UpperCamelCase : Union[str, Any] ): requires_backends(self , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Union[str, Any] , *__UpperCamelCase : Tuple , **__UpperCamelCase : List[Any] ): requires_backends(cls , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : List[str] , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : str ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : Dict = ["""torch"""] def __init__( self : Optional[int] , *__UpperCamelCase : str , **__UpperCamelCase : Optional[int] ): requires_backends(self , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Union[str, Any] , *__UpperCamelCase : List[str] , **__UpperCamelCase : Dict ): requires_backends(cls , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Union[str, Any] , *__UpperCamelCase : Dict , **__UpperCamelCase : Optional[int] ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : Optional[int] = ["""torch"""] def __init__( self : Dict , *__UpperCamelCase : List[str] , **__UpperCamelCase : Optional[Any] ): requires_backends(self , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : List[str] , *__UpperCamelCase : Any , **__UpperCamelCase : List[str] ): requires_backends(cls , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : int , *__UpperCamelCase : Tuple , **__UpperCamelCase : str ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : Tuple = ["""torch"""] def __init__( self : Tuple , *__UpperCamelCase : Dict , **__UpperCamelCase : List[str] ): requires_backends(self , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Any , *__UpperCamelCase : List[Any] , **__UpperCamelCase : Optional[Any] ): requires_backends(cls , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : int , *__UpperCamelCase : Dict , **__UpperCamelCase : Optional[int] ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : Union[str, Any] = ["""torch"""] def __init__( self : List[str] , *__UpperCamelCase : Tuple , **__UpperCamelCase : Optional[Any] ): requires_backends(self , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : int , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : int ): requires_backends(cls , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : int , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : Union[str, Any] ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : Optional[Any] = ["""torch"""] def __init__( self : str , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : Optional[int] ): requires_backends(self , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Optional[int] , *__UpperCamelCase : Tuple , **__UpperCamelCase : Optional[Any] ): requires_backends(cls , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Tuple , *__UpperCamelCase : str , **__UpperCamelCase : Optional[Any] ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : str = ["""torch"""] def __init__( self : str , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : List[str] ): requires_backends(self , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : str , *__UpperCamelCase : List[Any] , **__UpperCamelCase : str ): requires_backends(cls , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : List[Any] , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : Any ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : Union[str, Any] = ["""torch"""] def __init__( self : List[Any] , *__UpperCamelCase : Dict , **__UpperCamelCase : List[Any] ): requires_backends(self , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Union[str, Any] , *__UpperCamelCase : Tuple , **__UpperCamelCase : int ): requires_backends(cls , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Optional[Any] , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : Optional[int] ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : str = ["""torch"""] def __init__( self : List[Any] , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : str ): requires_backends(self , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : List[str] , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : int ): requires_backends(cls , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Optional[int] , *__UpperCamelCase : str , **__UpperCamelCase : Optional[int] ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : List[str] = ["""torch"""] def __init__( self : Any , *__UpperCamelCase : str , **__UpperCamelCase : Dict ): requires_backends(self , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Any , *__UpperCamelCase : List[str] , **__UpperCamelCase : Dict ): requires_backends(cls , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Optional[int] , *__UpperCamelCase : Tuple , **__UpperCamelCase : Dict ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : List[Any] = ["""torch"""] def __init__( self : List[Any] , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : int ): requires_backends(self , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Tuple , *__UpperCamelCase : Tuple , **__UpperCamelCase : Union[str, Any] ): requires_backends(cls , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Tuple , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : int ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : Dict = ["""torch"""] def __init__( self : Dict , *__UpperCamelCase : str , **__UpperCamelCase : int ): requires_backends(self , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : List[str] , *__UpperCamelCase : Dict , **__UpperCamelCase : Dict ): requires_backends(cls , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Any , *__UpperCamelCase : int , **__UpperCamelCase : Optional[Any] ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : Dict = ["""torch"""] def __init__( self : Dict , *__UpperCamelCase : str , **__UpperCamelCase : Optional[int] ): requires_backends(self , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : List[Any] , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : Dict ): requires_backends(cls , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : List[str] , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : Any ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : Union[str, Any] = ["""torch"""] def __init__( self : List[str] , *__UpperCamelCase : int , **__UpperCamelCase : int ): requires_backends(self , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : List[Any] , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : List[Any] ): requires_backends(cls , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : List[Any] , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : Tuple ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : int = ["""torch"""] def __init__( self : Union[str, Any] , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : int ): requires_backends(self , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : int , *__UpperCamelCase : List[str] , **__UpperCamelCase : Optional[Any] ): requires_backends(cls , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Union[str, Any] , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : Any ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : Optional[int] = ["""torch"""] def __init__( self : Tuple , *__UpperCamelCase : int , **__UpperCamelCase : Optional[int] ): requires_backends(self , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Dict , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : str ): requires_backends(cls , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : int , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : Tuple ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : Optional[Any] = ["""torch"""] def __init__( self : Any , *__UpperCamelCase : List[Any] , **__UpperCamelCase : List[str] ): requires_backends(self , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Any , *__UpperCamelCase : List[Any] , **__UpperCamelCase : Union[str, Any] ): requires_backends(cls , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : List[str] , *__UpperCamelCase : List[Any] , **__UpperCamelCase : int ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : Optional[Any] = ["""torch"""] def __init__( self : Tuple , *__UpperCamelCase : List[Any] , **__UpperCamelCase : Optional[int] ): requires_backends(self , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : int , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : Union[str, Any] ): requires_backends(cls , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Optional[Any] , *__UpperCamelCase : Tuple , **__UpperCamelCase : Tuple ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : Dict = ["""torch"""] def __init__( self : List[Any] , *__UpperCamelCase : str , **__UpperCamelCase : List[Any] ): requires_backends(self , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Optional[int] , *__UpperCamelCase : Any , **__UpperCamelCase : Any ): requires_backends(cls , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Any , *__UpperCamelCase : Dict , **__UpperCamelCase : List[str] ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : Dict = ["""torch"""] def __init__( self : List[Any] , *__UpperCamelCase : Dict , **__UpperCamelCase : Optional[Any] ): requires_backends(self , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Optional[Any] , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : Tuple ): requires_backends(cls , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Optional[Any] , *__UpperCamelCase : Tuple , **__UpperCamelCase : Optional[Any] ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : Any = ["""torch"""] def __init__( self : Optional[Any] , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : Any ): requires_backends(self , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Optional[Any] , *__UpperCamelCase : Dict , **__UpperCamelCase : int ): requires_backends(cls , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : List[str] , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : int ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : Tuple = ["""torch"""] def __init__( self : Optional[int] , *__UpperCamelCase : List[str] , **__UpperCamelCase : Any ): requires_backends(self , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Any , *__UpperCamelCase : Tuple , **__UpperCamelCase : Dict ): requires_backends(cls , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Optional[int] , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : Any ): requires_backends(cls , ["torch"] ) class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : Optional[int] = ["""torch"""] def __init__( self : Optional[Any] , *__UpperCamelCase : Tuple , **__UpperCamelCase : Union[str, Any] ): requires_backends(self , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : List[str] , *__UpperCamelCase : str , **__UpperCamelCase : List[str] ): requires_backends(cls , ["torch"] ) @classmethod def UpperCAmelCase__ ( cls : Tuple , *__UpperCamelCase : List[str] , **__UpperCamelCase : List[str] ): requires_backends(cls , ["torch"] )
684
import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class __SCREAMING_SNAKE_CASE ( lowercase): __SCREAMING_SNAKE_CASE : str = (UniPCMultistepScheduler,) __SCREAMING_SNAKE_CASE : Dict = (("""num_inference_steps""", 25),) def UpperCAmelCase__ ( self : str , **__UpperCamelCase : Any ): _UpperCAmelCase = { "num_train_timesteps": 1_000, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", "solver_order": 2, "solver_type": "bh2", } config.update(**__UpperCamelCase ) return config def UpperCAmelCase__ ( self : int , __UpperCamelCase : Any=0 , **__UpperCamelCase : Any ): _UpperCAmelCase = dict(self.forward_default_kwargs ) _UpperCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase ) _UpperCAmelCase = self.dummy_sample _UpperCAmelCase = 0.1 * sample _UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: _UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residuals _UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__UpperCamelCase ) _UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase ) new_scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residuals _UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] _UpperCAmelCase , _UpperCAmelCase = sample, sample for t in range(__UpperCamelCase , time_step + scheduler.config.solver_order + 1 ): _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample _UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Union[str, Any]=0 , **__UpperCamelCase : List[Any] ): _UpperCAmelCase = dict(self.forward_default_kwargs ) _UpperCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase ) _UpperCAmelCase = self.dummy_sample _UpperCAmelCase = 0.1 * sample _UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: _UpperCAmelCase = self.get_scheduler_config() _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residuals (must be after setting timesteps) _UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__UpperCamelCase ) _UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residual (must be after setting timesteps) _UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample _UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : Dict=None , **__UpperCamelCase : Optional[Any] ): if scheduler is None: _UpperCAmelCase = self.scheduler_classes[0] _UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) _UpperCAmelCase = self.scheduler_classes[0] _UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) _UpperCAmelCase = 10 _UpperCAmelCase = self.dummy_model() _UpperCAmelCase = self.dummy_sample_deter scheduler.set_timesteps(__UpperCamelCase ) for i, t in enumerate(scheduler.timesteps ): _UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample return sample def UpperCAmelCase__ ( self : List[str] ): _UpperCAmelCase = dict(self.forward_default_kwargs ) _UpperCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase ) for scheduler_class in self.scheduler_classes: _UpperCAmelCase = self.get_scheduler_config() _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) _UpperCAmelCase = self.dummy_sample _UpperCAmelCase = 0.1 * sample if num_inference_steps is not None and hasattr(__UpperCamelCase , "set_timesteps" ): scheduler.set_timesteps(__UpperCamelCase ) elif num_inference_steps is not None and not hasattr(__UpperCamelCase , "set_timesteps" ): _UpperCAmelCase = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) _UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10] _UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] _UpperCAmelCase = scheduler.timesteps[5] _UpperCAmelCase = scheduler.timesteps[6] _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def UpperCAmelCase__ ( self : Union[str, Any] ): # make sure that iterating over schedulers with same config names gives same results # for defaults _UpperCAmelCase = UniPCMultistepScheduler(**self.get_scheduler_config() ) _UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2464 ) < 1e-3 _UpperCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config ) _UpperCAmelCase = DEISMultistepScheduler.from_config(scheduler.config ) _UpperCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config ) _UpperCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config ) _UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2464 ) < 1e-3 def UpperCAmelCase__ ( self : str ): for timesteps in [25, 50, 100, 999, 1_000]: self.check_over_configs(num_train_timesteps=__UpperCamelCase ) def UpperCAmelCase__ ( self : int ): self.check_over_configs(thresholding=__UpperCamelCase ) for order in [1, 2, 3]: for solver_type in ["bh1", "bh2"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=__UpperCamelCase , prediction_type=__UpperCamelCase , sample_max_value=__UpperCamelCase , solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , ) def UpperCAmelCase__ ( self : int ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__UpperCamelCase ) def UpperCAmelCase__ ( self : int ): for solver_type in ["bh1", "bh2"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , ) _UpperCAmelCase = self.full_loop( solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , ) assert not torch.isnan(__UpperCamelCase ).any(), "Samples have nan numbers" def UpperCAmelCase__ ( self : Optional[int] ): self.check_over_configs(lower_order_final=__UpperCamelCase ) self.check_over_configs(lower_order_final=__UpperCamelCase ) def UpperCAmelCase__ ( self : Optional[Any] ): for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]: self.check_over_forward(num_inference_steps=__UpperCamelCase , time_step=0 ) def UpperCAmelCase__ ( self : List[str] ): _UpperCAmelCase = self.full_loop() _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2464 ) < 1e-3 def UpperCAmelCase__ ( self : Optional[Any] ): _UpperCAmelCase = self.full_loop(prediction_type="v_prediction" ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.1014 ) < 1e-3 def UpperCAmelCase__ ( self : Tuple ): _UpperCAmelCase = self.scheduler_classes[0] _UpperCAmelCase = self.get_scheduler_config(thresholding=__UpperCamelCase , dynamic_thresholding_ratio=0 ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) _UpperCAmelCase = 10 _UpperCAmelCase = self.dummy_model() _UpperCAmelCase = self.dummy_sample_deter.half() scheduler.set_timesteps(__UpperCamelCase ) for i, t in enumerate(scheduler.timesteps ): _UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample assert sample.dtype == torch.floataa def UpperCAmelCase__ ( self : str , **__UpperCamelCase : Optional[Any] ): for scheduler_class in self.scheduler_classes: _UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) scheduler.set_timesteps(scheduler.config.num_train_timesteps ) assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
684
1
import string import numpy def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> int: return b if a == 0 else greatest_common_divisor(b % a , _lowerCAmelCase ) class __SCREAMING_SNAKE_CASE : __SCREAMING_SNAKE_CASE : Union[str, Any] = string.ascii_uppercase + string.digits # This cipher takes alphanumerics into account # i.e. a total of 36 characters # take x and return x % len(key_string) __SCREAMING_SNAKE_CASE : Optional[Any] = numpy.vectorize(lambda lowercase: x % 36) __SCREAMING_SNAKE_CASE : Union[str, Any] = numpy.vectorize(lowercase) def __init__( self : str , __UpperCamelCase : numpy.ndarray ): _UpperCAmelCase = self.modulus(__UpperCamelCase ) # mod36 calc's on the encrypt key self.check_determinant() # validate the determinant of the encryption key _UpperCAmelCase = encrypt_key.shape[0] def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : str ): return self.key_string.index(__UpperCamelCase ) def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : int ): return self.key_string[round(__UpperCamelCase )] def UpperCAmelCase__ ( self : int ): _UpperCAmelCase = round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: _UpperCAmelCase = det % len(self.key_string ) _UpperCAmelCase = len(self.key_string ) if greatest_common_divisor(__UpperCamelCase , len(self.key_string ) ) != 1: _UpperCAmelCase = ( F'''determinant modular {req_l} of encryption key({det}) ''' F'''is not co prime w.r.t {req_l}.\nTry another key.''' ) raise ValueError(__UpperCamelCase ) def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : str ): _UpperCAmelCase = [char for char in text.upper() if char in self.key_string] _UpperCAmelCase = chars[-1] while len(__UpperCamelCase ) % self.break_key != 0: chars.append(__UpperCamelCase ) return "".join(__UpperCamelCase ) def UpperCAmelCase__ ( self : Any , __UpperCamelCase : str ): _UpperCAmelCase = self.process_text(text.upper() ) _UpperCAmelCase = "" for i in range(0 , len(__UpperCamelCase ) - self.break_key + 1 , self.break_key ): _UpperCAmelCase = text[i : i + self.break_key] _UpperCAmelCase = [self.replace_letters(__UpperCamelCase ) for char in batch] _UpperCAmelCase = numpy.array([vec] ).T _UpperCAmelCase = self.modulus(self.encrypt_key.dot(__UpperCamelCase ) ).T.tolist()[ 0 ] _UpperCAmelCase = "".join( self.replace_digits(__UpperCamelCase ) for num in batch_encrypted ) encrypted += encrypted_batch return encrypted def UpperCAmelCase__ ( self : Tuple ): _UpperCAmelCase = round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: _UpperCAmelCase = det % len(self.key_string ) _UpperCAmelCase = None for i in range(len(self.key_string ) ): if (det * i) % len(self.key_string ) == 1: _UpperCAmelCase = i break _UpperCAmelCase = ( det_inv * numpy.linalg.det(self.encrypt_key ) * numpy.linalg.inv(self.encrypt_key ) ) return self.to_int(self.modulus(__UpperCamelCase ) ) def UpperCAmelCase__ ( self : int , __UpperCamelCase : str ): _UpperCAmelCase = self.make_decrypt_key() _UpperCAmelCase = self.process_text(text.upper() ) _UpperCAmelCase = "" for i in range(0 , len(__UpperCamelCase ) - self.break_key + 1 , self.break_key ): _UpperCAmelCase = text[i : i + self.break_key] _UpperCAmelCase = [self.replace_letters(__UpperCamelCase ) for char in batch] _UpperCAmelCase = numpy.array([vec] ).T _UpperCAmelCase = self.modulus(decrypt_key.dot(__UpperCamelCase ) ).T.tolist()[0] _UpperCAmelCase = "".join( self.replace_digits(__UpperCamelCase ) for num in batch_decrypted ) decrypted += decrypted_batch return decrypted def __lowerCamelCase ( ) -> None: _UpperCAmelCase = int(input("Enter the order of the encryption key: " ) ) _UpperCAmelCase = [] print("Enter each row of the encryption key with space separated integers" ) for _ in range(_lowerCAmelCase ): _UpperCAmelCase = [int(_lowerCAmelCase ) for x in input().split()] hill_matrix.append(_lowerCAmelCase ) _UpperCAmelCase = HillCipher(numpy.array(_lowerCAmelCase ) ) print("Would you like to encrypt or decrypt some text? (1 or 2)" ) _UpperCAmelCase = input("\n1. Encrypt\n2. Decrypt\n" ) if option == "1": _UpperCAmelCase = input("What text would you like to encrypt?: " ) print("Your encrypted text is:" ) print(hc.encrypt(_lowerCAmelCase ) ) elif option == "2": _UpperCAmelCase = input("What text would you like to decrypt?: " ) print("Your decrypted text is:" ) print(hc.decrypt(_lowerCAmelCase ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
684
import math class __SCREAMING_SNAKE_CASE : def __init__( self : Union[str, Any] , __UpperCamelCase : List[Any]=0 ): # a graph with Node 0,1,...,N-1 _UpperCAmelCase = n _UpperCAmelCase = [ [math.inf for j in range(0 , __UpperCamelCase )] for i in range(0 , __UpperCamelCase ) ] # adjacency matrix for weight _UpperCAmelCase = [ [math.inf for j in range(0 , __UpperCamelCase )] for i in range(0 , __UpperCamelCase ) ] # dp[i][j] stores minimum distance from i to j def UpperCAmelCase__ ( self : str , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] ): _UpperCAmelCase = w def UpperCAmelCase__ ( self : Dict ): for k in range(0 , self.n ): for i in range(0 , self.n ): for j in range(0 , self.n ): _UpperCAmelCase = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] ) def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any ): return self.dp[u][v] if __name__ == "__main__": __lowerCAmelCase = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 1_0) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 1_0) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
684
1
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]: if b == 0: return 1 if (b % 2) == 0: return actual_power(_lowerCAmelCase , int(b / 2 ) ) * actual_power(_lowerCAmelCase , int(b / 2 ) ) else: return a * actual_power(_lowerCAmelCase , int(b / 2 ) ) * actual_power(_lowerCAmelCase , int(b / 2 ) ) def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> float: if b < 0: return 1 / actual_power(_lowerCAmelCase , _lowerCAmelCase ) return actual_power(_lowerCAmelCase , _lowerCAmelCase ) if __name__ == "__main__": print(power(-2, -3))
684
import unittest import torch from diffusers import VQModel from diffusers.utils import floats_tensor, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class __SCREAMING_SNAKE_CASE ( lowercase , lowercase , unittest.TestCase): __SCREAMING_SNAKE_CASE : Dict = VQModel __SCREAMING_SNAKE_CASE : Optional[int] = """sample""" @property def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Optional[int]=(32, 32) ): _UpperCAmelCase = 4 _UpperCAmelCase = 3 _UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCamelCase ) return {"sample": image} @property def UpperCAmelCase__ ( self : Tuple ): return (3, 32, 32) @property def UpperCAmelCase__ ( self : str ): return (3, 32, 32) def UpperCAmelCase__ ( self : Dict ): _UpperCAmelCase = { "block_out_channels": [32, 64], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 3, } _UpperCAmelCase = self.dummy_input return init_dict, inputs_dict def UpperCAmelCase__ ( self : Dict ): pass def UpperCAmelCase__ ( self : str ): pass def UpperCAmelCase__ ( self : List[str] ): _UpperCAmelCase , _UpperCAmelCase = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) self.assertEqual(len(loading_info["missing_keys"] ) , 0 ) model.to(__UpperCamelCase ) _UpperCAmelCase = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def UpperCAmelCase__ ( self : List[Any] ): _UpperCAmelCase = VQModel.from_pretrained("fusing/vqgan-dummy" ) model.to(__UpperCamelCase ).eval() torch.manual_seed(0 ) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0 ) _UpperCAmelCase = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size ) _UpperCAmelCase = image.to(__UpperCamelCase ) with torch.no_grad(): _UpperCAmelCase = model(__UpperCamelCase ).sample _UpperCAmelCase = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off _UpperCAmelCase = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] ) # fmt: on self.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
684
1
import pytest from datasets.parallel import ParallelBackendConfig, parallel_backend from datasets.utils.py_utils import map_nested from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows def __lowerCamelCase ( _lowerCAmelCase ) -> Union[str, Any]: # picklable for multiprocessing return i + 1 @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows def __lowerCamelCase ( ) -> Any: with parallel_backend("spark" ): assert ParallelBackendConfig.backend_name == "spark" _UpperCAmelCase = [1, 2, 3] with pytest.raises(_lowerCAmelCase ): with parallel_backend("unsupported backend" ): map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=2 ) with pytest.raises(_lowerCAmelCase ): with parallel_backend("unsupported backend" ): map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=-1 ) @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows @pytest.mark.parametrize("num_proc" , [2, -1] ) def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]: _UpperCAmelCase = [1, 2] _UpperCAmelCase = {"a": 1, "b": 2} _UpperCAmelCase = {"a": [1, 2], "b": [3, 4]} _UpperCAmelCase = {"a": {"1": 1}, "b": 2} _UpperCAmelCase = {"a": 1, "b": 2, "c": 3, "d": 4} _UpperCAmelCase = [2, 3] _UpperCAmelCase = {"a": 2, "b": 3} _UpperCAmelCase = {"a": [2, 3], "b": [4, 5]} _UpperCAmelCase = {"a": {"1": 2}, "b": 3} _UpperCAmelCase = {"a": 2, "b": 3, "c": 4, "d": 5} with parallel_backend("spark" ): assert map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) == expected_map_nested_sa assert map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) == expected_map_nested_sa assert map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) == expected_map_nested_sa assert map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) == expected_map_nested_sa assert map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) == expected_map_nested_sa
684
import requests __lowerCAmelCase = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey=" def __lowerCamelCase ( _lowerCAmelCase ) -> None: # fetching a list of articles in json format _UpperCAmelCase = requests.get(_NEWS_API + bbc_news_api_key ).json() # each article in the list is a dict for i, article in enumerate(bbc_news_page["articles"] , 1 ): print(F'''{i}.) {article["title"]}''' ) if __name__ == "__main__": fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
684
1
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, ) else: from .modeling_text_unet import UNetFlatConditionModel from .pipeline_versatile_diffusion import VersatileDiffusionPipeline from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
684
import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class __SCREAMING_SNAKE_CASE ( unittest.TestCase): def UpperCAmelCase__ ( self : Any ): _UpperCAmelCase = 10 def UpperCAmelCase__ ( self : Optional[int] ): _UpperCAmelCase = [1, 2, 3, 4] _UpperCAmelCase = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase ) def UpperCAmelCase__ ( self : List[Any] ): _UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] _UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase ) def UpperCAmelCase__ ( self : int ): _UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] _UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase ) def UpperCAmelCase__ ( self : Union[str, Any] ): _UpperCAmelCase = "It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this." _UpperCAmelCase , _UpperCAmelCase = process_story(__UpperCamelCase ) self.assertEqual(__UpperCamelCase , [] ) def UpperCAmelCase__ ( self : str ): _UpperCAmelCase = "" _UpperCAmelCase , _UpperCAmelCase = process_story(__UpperCamelCase ) self.assertEqual(__UpperCamelCase , [] ) self.assertEqual(__UpperCamelCase , [] ) def UpperCAmelCase__ ( self : Optional[Any] ): _UpperCAmelCase = ( "It was the year of Our Lord one thousand seven hundred and " "seventy-five\n\nSpiritual revelations were conceded to England " "at that favoured period, as at this.\n@highlight\n\nIt was the best of times" ) _UpperCAmelCase , _UpperCAmelCase = process_story(__UpperCamelCase ) _UpperCAmelCase = [ "It was the year of Our Lord one thousand seven hundred and seventy-five.", "Spiritual revelations were conceded to England at that favoured period, as at this.", ] self.assertEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = ["It was the best of times."] self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def UpperCAmelCase__ ( self : Dict ): _UpperCAmelCase = torch.tensor([1, 2, 3, 4] ) _UpperCAmelCase = torch.tensor([1, 1, 1, 1] ) np.testing.assert_array_equal(build_mask(__UpperCamelCase , 0 ).numpy() , expected.numpy() ) def UpperCAmelCase__ ( self : Optional[Any] ): _UpperCAmelCase = torch.tensor([1, 2, 3, 4, 23, 23, 23] ) _UpperCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(__UpperCamelCase , 23 ).numpy() , expected.numpy() ) def UpperCAmelCase__ ( self : Optional[int] ): _UpperCAmelCase = torch.tensor([8, 2, 3, 4, 1, 1, 1] ) _UpperCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(__UpperCamelCase , 1 ).numpy() , expected.numpy() ) def UpperCAmelCase__ ( self : Dict ): _UpperCAmelCase = 101 _UpperCAmelCase = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] ) _UpperCAmelCase = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] ) _UpperCAmelCase = compute_token_type_ids(__UpperCamelCase , __UpperCamelCase ) np.testing.assert_array_equal(__UpperCamelCase , __UpperCamelCase )
684
1
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]: _UpperCAmelCase = SwinvaConfig() _UpperCAmelCase = swinva_name.split("_" ) _UpperCAmelCase = name_split[1] if "to" in name_split[3]: _UpperCAmelCase = int(name_split[3][-3:] ) else: _UpperCAmelCase = int(name_split[3] ) if "to" in name_split[2]: _UpperCAmelCase = int(name_split[2][-2:] ) else: _UpperCAmelCase = int(name_split[2][6:] ) if model_size == "tiny": _UpperCAmelCase = 96 _UpperCAmelCase = (2, 2, 6, 2) _UpperCAmelCase = (3, 6, 12, 24) elif model_size == "small": _UpperCAmelCase = 96 _UpperCAmelCase = (2, 2, 18, 2) _UpperCAmelCase = (3, 6, 12, 24) elif model_size == "base": _UpperCAmelCase = 128 _UpperCAmelCase = (2, 2, 18, 2) _UpperCAmelCase = (4, 8, 16, 32) else: _UpperCAmelCase = 192 _UpperCAmelCase = (2, 2, 18, 2) _UpperCAmelCase = (6, 12, 24, 48) if "to" in swinva_name: _UpperCAmelCase = (12, 12, 12, 6) if ("22k" in swinva_name) and ("to" not in swinva_name): _UpperCAmelCase = 21_841 _UpperCAmelCase = "huggingface/label-files" _UpperCAmelCase = "imagenet-22k-id2label.json" _UpperCAmelCase = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) ) _UpperCAmelCase = {int(_lowerCAmelCase ): v for k, v in idalabel.items()} _UpperCAmelCase = idalabel _UpperCAmelCase = {v: k for k, v in idalabel.items()} else: _UpperCAmelCase = 1_000 _UpperCAmelCase = "huggingface/label-files" _UpperCAmelCase = "imagenet-1k-id2label.json" _UpperCAmelCase = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) ) _UpperCAmelCase = {int(_lowerCAmelCase ): v for k, v in idalabel.items()} _UpperCAmelCase = idalabel _UpperCAmelCase = {v: k for k, v in idalabel.items()} _UpperCAmelCase = img_size _UpperCAmelCase = num_classes _UpperCAmelCase = embed_dim _UpperCAmelCase = depths _UpperCAmelCase = num_heads _UpperCAmelCase = window_size return config def __lowerCamelCase ( _lowerCAmelCase ) -> int: if "patch_embed.proj" in name: _UpperCAmelCase = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "patch_embed.norm" in name: _UpperCAmelCase = name.replace("patch_embed.norm" , "embeddings.norm" ) if "layers" in name: _UpperCAmelCase = "encoder." + name if "attn.proj" in name: _UpperCAmelCase = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: _UpperCAmelCase = name.replace("attn" , "attention.self" ) if "norm1" in name: _UpperCAmelCase = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: _UpperCAmelCase = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: _UpperCAmelCase = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: _UpperCAmelCase = name.replace("mlp.fc2" , "output.dense" ) if "q_bias" in name: _UpperCAmelCase = name.replace("q_bias" , "query.bias" ) if "k_bias" in name: _UpperCAmelCase = name.replace("k_bias" , "key.bias" ) if "v_bias" in name: _UpperCAmelCase = name.replace("v_bias" , "value.bias" ) if "cpb_mlp" in name: _UpperCAmelCase = name.replace("cpb_mlp" , "continuous_position_bias_mlp" ) if name == "norm.weight": _UpperCAmelCase = "layernorm.weight" if name == "norm.bias": _UpperCAmelCase = "layernorm.bias" if "head" in name: _UpperCAmelCase = name.replace("head" , "classifier" ) else: _UpperCAmelCase = "swinv2." + name return name def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Any: for key in orig_state_dict.copy().keys(): _UpperCAmelCase = orig_state_dict.pop(_lowerCAmelCase ) if "mask" in key: continue elif "qkv" in key: _UpperCAmelCase = key.split("." ) _UpperCAmelCase = int(key_split[1] ) _UpperCAmelCase = int(key_split[3] ) _UpperCAmelCase = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: _UpperCAmelCase = val[:dim, :] _UpperCAmelCase = val[dim : dim * 2, :] _UpperCAmelCase = val[-dim:, :] else: _UpperCAmelCase = val[:dim] _UpperCAmelCase = val[ dim : dim * 2 ] _UpperCAmelCase = val[-dim:] else: _UpperCAmelCase = val return orig_state_dict def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]: _UpperCAmelCase = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase ) timm_model.eval() _UpperCAmelCase = get_swinva_config(_lowerCAmelCase ) _UpperCAmelCase = SwinvaForImageClassification(_lowerCAmelCase ) model.eval() _UpperCAmelCase = convert_state_dict(timm_model.state_dict() , _lowerCAmelCase ) model.load_state_dict(_lowerCAmelCase ) _UpperCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg" _UpperCAmelCase = AutoImageProcessor.from_pretrained("microsoft/{}".format(swinva_name.replace("_" , "-" ) ) ) _UpperCAmelCase = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ) _UpperCAmelCase = image_processor(images=_lowerCAmelCase , return_tensors="pt" ) _UpperCAmelCase = timm_model(inputs["pixel_values"] ) _UpperCAmelCase = model(**_lowerCAmelCase ).logits assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) print(F'''Saving model {swinva_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(_lowerCAmelCase ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(_lowerCAmelCase ) model.push_to_hub( repo_path_or_name=Path(_lowerCAmelCase , _lowerCAmelCase ) , organization="nandwalritik" , commit_message="Add model" , ) if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( "--swinv2_name", default="swinv2_tiny_patch4_window8_256", type=str, help="Name of the Swinv2 timm model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) __lowerCAmelCase = parser.parse_args() convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
684
from __future__ import annotations from collections import namedtuple def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> tuple: _UpperCAmelCase = namedtuple("result" , "name value" ) if (voltage, current, power).count(0 ) != 1: raise ValueError("Only one argument must be 0" ) elif power < 0: raise ValueError( "Power cannot be negative in any electrical/electronics system" ) elif voltage == 0: return result("voltage" , power / current ) elif current == 0: return result("current" , power / voltage ) elif power == 0: return result("power" , float(round(abs(voltage * current ) , 2 ) ) ) else: raise ValueError("Exactly one argument must be 0" ) if __name__ == "__main__": import doctest doctest.testmod()
684
1
import warnings from typing import Dict, List, Optional, Tuple from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __lowerCAmelCase = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( lowercase): __SCREAMING_SNAKE_CASE : int = ["""input_ids""", """attention_mask"""] def __init__( self : str , __UpperCamelCase : List[Any]="</s>" , __UpperCamelCase : str="<unk>" , __UpperCamelCase : str="<pad>" , __UpperCamelCase : Optional[Any]=125 , __UpperCamelCase : Any=None , **__UpperCamelCase : Dict , ): # Add extra_ids to the special token list if extra_ids > 0 and additional_special_tokens is None: _UpperCAmelCase = [F'''<extra_id_{i}>''' for i in range(__UpperCamelCase )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens _UpperCAmelCase = len(set(filter(lambda __UpperCamelCase : bool("extra_id" in str(__UpperCamelCase ) ) , __UpperCamelCase ) ) ) if extra_tokens != extra_ids: raise ValueError( F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are''' " provided to ByT5Tokenizer. In this case the additional_special_tokens must include the" " extra_ids tokens" ) _UpperCAmelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else pad_token _UpperCAmelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else eos_token _UpperCAmelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else unk_token super().__init__( eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , extra_ids=__UpperCamelCase , additional_special_tokens=__UpperCamelCase , **__UpperCamelCase , ) _UpperCAmelCase = extra_ids _UpperCAmelCase = 2**8 # utf is 8 bits # define special tokens dict _UpperCAmelCase = { self.pad_token: 0, self.eos_token: 1, self.unk_token: 2, } _UpperCAmelCase = len(self.special_tokens_encoder ) _UpperCAmelCase = len(__UpperCamelCase ) for i, token in enumerate(__UpperCamelCase ): _UpperCAmelCase = self.vocab_size + i - n _UpperCAmelCase = {v: k for k, v in self.special_tokens_encoder.items()} @property def UpperCAmelCase__ ( self : Dict ): return self._utf_vocab_size + self._num_special_tokens + self._extra_ids def UpperCAmelCase__ ( self : Optional[int] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None , __UpperCamelCase : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase ) # normal case: some special tokens if token_ids_a is None: return ([0] * len(__UpperCamelCase )) + [1] return ([0] * len(__UpperCamelCase )) + [1] + ([0] * len(__UpperCamelCase )) + [1] def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : List[int] ): if len(__UpperCamelCase ) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( F'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated''' " eos tokens being added." ) return token_ids else: return token_ids + [self.eos_token_id] def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ): _UpperCAmelCase = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def UpperCAmelCase__ ( self : Optional[int] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ): _UpperCAmelCase = self._add_eos_if_not_present(__UpperCamelCase ) if token_ids_a is None: return token_ids_a else: _UpperCAmelCase = self._add_eos_if_not_present(__UpperCamelCase ) return token_ids_a + token_ids_a def UpperCAmelCase__ ( self : str , __UpperCamelCase : str ): _UpperCAmelCase = [chr(__UpperCamelCase ) for i in text.encode("utf-8" )] return tokens def UpperCAmelCase__ ( self : Optional[int] , __UpperCamelCase : Dict ): if token in self.special_tokens_encoder: _UpperCAmelCase = self.special_tokens_encoder[token] elif token in self.added_tokens_encoder: _UpperCAmelCase = self.added_tokens_encoder[token] elif len(__UpperCamelCase ) != 1: _UpperCAmelCase = self.unk_token_id else: _UpperCAmelCase = ord(__UpperCamelCase ) + self._num_special_tokens return token_id def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Union[str, Any] ): if index in self.special_tokens_decoder: _UpperCAmelCase = self.special_tokens_decoder[index] else: _UpperCAmelCase = chr(index - self._num_special_tokens ) return token def UpperCAmelCase__ ( self : Any , __UpperCamelCase : Tuple ): _UpperCAmelCase = B"" for token in tokens: if token in self.special_tokens_decoder: _UpperCAmelCase = self.special_tokens_decoder[token].encode("utf-8" ) elif token in self.added_tokens_decoder: _UpperCAmelCase = self.special_tokens_decoder[token].encode("utf-8" ) elif token in self.special_tokens_encoder: _UpperCAmelCase = token.encode("utf-8" ) elif token in self.added_tokens_encoder: _UpperCAmelCase = token.encode("utf-8" ) else: _UpperCAmelCase = bytes([ord(__UpperCamelCase )] ) bstring += tok_string _UpperCAmelCase = bstring.decode("utf-8" , errors="ignore" ) return string def UpperCAmelCase__ ( self : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ): return ()
684
import argparse import math import traceback import dateutil.parser as date_parser import requests def __lowerCamelCase ( _lowerCAmelCase ) -> Any: _UpperCAmelCase = {} _UpperCAmelCase = job["started_at"] _UpperCAmelCase = job["completed_at"] _UpperCAmelCase = date_parser.parse(_lowerCAmelCase ) _UpperCAmelCase = date_parser.parse(_lowerCAmelCase ) _UpperCAmelCase = round((end_datetime - start_datetime).total_seconds() / 60.0 ) _UpperCAmelCase = start _UpperCAmelCase = end _UpperCAmelCase = duration_in_min return job_info def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=None ) -> str: _UpperCAmelCase = None if token is not None: _UpperCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''} _UpperCAmelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100''' _UpperCAmelCase = requests.get(_lowerCAmelCase , headers=_lowerCAmelCase ).json() _UpperCAmelCase = {} try: job_time.update({job["name"]: extract_time_from_single_job(_lowerCAmelCase ) for job in result["jobs"]} ) _UpperCAmelCase = math.ceil((result["total_count"] - 100) / 100 ) for i in range(_lowerCAmelCase ): _UpperCAmelCase = requests.get(url + F'''&page={i + 2}''' , headers=_lowerCAmelCase ).json() job_time.update({job["name"]: extract_time_from_single_job(_lowerCAmelCase ) for job in result["jobs"]} ) return job_time except Exception: print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' ) return {} if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.") __lowerCAmelCase = parser.parse_args() __lowerCAmelCase = get_job_time(args.workflow_run_id) __lowerCAmelCase = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True)) for k, v in job_time.items(): print(F'''{k}: {v["duration"]}''')
684
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = { "YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json", "YituTech/conv-bert-medium-small": ( "https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json" ), "YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json", # See all ConvBERT models at https://huggingface.co/models?filter=convbert } class __SCREAMING_SNAKE_CASE ( lowercase): __SCREAMING_SNAKE_CASE : Union[str, Any] = """convbert""" def __init__( self : List[Any] , __UpperCamelCase : str=30_522 , __UpperCamelCase : Any=768 , __UpperCamelCase : Optional[int]=12 , __UpperCamelCase : Optional[int]=12 , __UpperCamelCase : Tuple=3_072 , __UpperCamelCase : Tuple="gelu" , __UpperCamelCase : Any=0.1 , __UpperCamelCase : Dict=0.1 , __UpperCamelCase : Any=512 , __UpperCamelCase : List[str]=2 , __UpperCamelCase : List[Any]=0.02 , __UpperCamelCase : int=1e-1_2 , __UpperCamelCase : List[Any]=1 , __UpperCamelCase : Optional[Any]=0 , __UpperCamelCase : List[Any]=2 , __UpperCamelCase : Union[str, Any]=768 , __UpperCamelCase : Optional[Any]=2 , __UpperCamelCase : Tuple=9 , __UpperCamelCase : List[Any]=1 , __UpperCamelCase : Union[str, Any]=None , **__UpperCamelCase : Optional[int] , ): super().__init__( pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase , ) _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = embedding_size _UpperCAmelCase = head_ratio _UpperCAmelCase = conv_kernel_size _UpperCAmelCase = num_groups _UpperCAmelCase = classifier_dropout class __SCREAMING_SNAKE_CASE ( lowercase): @property def UpperCAmelCase__ ( self : Union[str, Any] ): if self.task == "multiple-choice": _UpperCAmelCase = {0: "batch", 1: "choice", 2: "sequence"} else: _UpperCAmelCase = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis), ] )
684
import argparse import math import os from copy import deepcopy import torch from audio_diffusion.models import DiffusionAttnUnetaD from diffusion import sampling from torch import nn from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel __lowerCAmelCase = { "gwf-440k": { "url": "https://model-server.zqevans2.workers.dev/gwf-440k.ckpt", "sample_rate": 4_8_0_0_0, "sample_size": 6_5_5_3_6, }, "jmann-small-190k": { "url": "https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt", "sample_rate": 4_8_0_0_0, "sample_size": 6_5_5_3_6, }, "jmann-large-580k": { "url": "https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt", "sample_rate": 4_8_0_0_0, "sample_size": 1_3_1_0_7_2, }, "maestro-uncond-150k": { "url": "https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt", "sample_rate": 1_6_0_0_0, "sample_size": 6_5_5_3_6, }, "unlocked-uncond-250k": { "url": "https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt", "sample_rate": 1_6_0_0_0, "sample_size": 6_5_5_3_6, }, "honk-140k": { "url": "https://model-server.zqevans2.workers.dev/honk-140k.ckpt", "sample_rate": 1_6_0_0_0, "sample_size": 6_5_5_3_6, }, } def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: return torch.atana(_lowerCAmelCase , _lowerCAmelCase ) / math.pi * 2 def __lowerCamelCase ( _lowerCAmelCase ) -> Union[str, Any]: _UpperCAmelCase = torch.sin(t * math.pi / 2 ) ** 2 _UpperCAmelCase = (1 - sigma**2) ** 0.5 return alpha_sigma_to_t(_lowerCAmelCase , _lowerCAmelCase ) class __SCREAMING_SNAKE_CASE ( lowercase): pass class __SCREAMING_SNAKE_CASE ( nn.Module): def __init__( self : str , __UpperCamelCase : Optional[int] ): super().__init__() _UpperCAmelCase = DiffusionAttnUnetaD(__UpperCamelCase , n_attn_layers=4 ) _UpperCAmelCase = deepcopy(self.diffusion ) _UpperCAmelCase = torch.quasirandom.SobolEngine(1 , scramble=__UpperCamelCase ) def __lowerCamelCase ( _lowerCAmelCase ) -> int: _UpperCAmelCase = MODELS_MAP[model_name]["url"] os.system(F'''wget {url} ./''' ) return F'''./{model_name}.ckpt''' __lowerCAmelCase = { "1": "resnets.0", "2": "attentions.0", "3": "resnets.1", "4": "attentions.1", "5": "resnets.2", "6": "attentions.2", } __lowerCAmelCase = { "8": "resnets.0", "9": "attentions.0", "10": "resnets.1", "11": "attentions.1", "12": "resnets.2", "13": "attentions.2", } __lowerCAmelCase = { "1": "resnets.0", "2": "attentions.0", "3": "resnets.1", "4": "attentions.1", "5": "resnets.2", "6": "attentions.2", "8": "resnets.3", "9": "attentions.3", "10": "resnets.4", "11": "attentions.4", "12": "resnets.5", "13": "attentions.5", } __lowerCAmelCase = { "0": "resnets.0", "1": "resnets.1", "2": "resnets.2", "4": "resnets.0", "5": "resnets.1", "6": "resnets.2", } __lowerCAmelCase = { "skip": "conv_skip", "main.0": "conv_1", "main.1": "group_norm_1", "main.3": "conv_2", "main.4": "group_norm_2", } __lowerCAmelCase = { "norm": "group_norm", "qkv_proj": ["query", "key", "value"], "out_proj": ["proj_attn"], } def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]: if name.startswith("skip" ): return name.replace("skip" , RES_CONV_MAP["skip"] ) # name has to be of format main.{digit} if not name.startswith("main." ): raise ValueError(F'''ResConvBlock error with {name}''' ) return name.replace(name[:6] , RES_CONV_MAP[name[:6]] ) def __lowerCamelCase ( _lowerCAmelCase ) -> Optional[Any]: for key, value in ATTN_MAP.items(): if name.startswith(_lowerCAmelCase ) and not isinstance(_lowerCAmelCase , _lowerCAmelCase ): return name.replace(_lowerCAmelCase , _lowerCAmelCase ) elif name.startswith(_lowerCAmelCase ): return [name.replace(_lowerCAmelCase , _lowerCAmelCase ) for v in value] raise ValueError(F'''Attn error with {name}''' ) def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=13 ) -> List[Any]: _UpperCAmelCase = input_string if string.split("." )[0] == "timestep_embed": return string.replace("timestep_embed" , "time_proj" ) _UpperCAmelCase = 0 if string.startswith("net.3." ): depth += 1 _UpperCAmelCase = string[6:] elif string.startswith("net." ): _UpperCAmelCase = string[4:] while string.startswith("main.7." ): depth += 1 _UpperCAmelCase = string[7:] if string.startswith("main." ): _UpperCAmelCase = string[5:] # mid block if string[:2].isdigit(): _UpperCAmelCase = string[:2] _UpperCAmelCase = string[2:] else: _UpperCAmelCase = string[0] _UpperCAmelCase = string[1:] if depth == max_depth: _UpperCAmelCase = MID_NUM_TO_LAYER[layer_num] _UpperCAmelCase = "mid_block" elif depth > 0 and int(_lowerCAmelCase ) < 7: _UpperCAmelCase = DOWN_NUM_TO_LAYER[layer_num] _UpperCAmelCase = F'''down_blocks.{depth}''' elif depth > 0 and int(_lowerCAmelCase ) > 7: _UpperCAmelCase = UP_NUM_TO_LAYER[layer_num] _UpperCAmelCase = F'''up_blocks.{max_depth - depth - 1}''' elif depth == 0: _UpperCAmelCase = DEPTH_0_TO_LAYER[layer_num] _UpperCAmelCase = F'''up_blocks.{max_depth - 1}''' if int(_lowerCAmelCase ) > 3 else "down_blocks.0" if not string_left.startswith("." ): raise ValueError(F'''Naming error with {input_string} and string_left: {string_left}.''' ) _UpperCAmelCase = string_left[1:] if "resnets" in new_layer: _UpperCAmelCase = convert_resconv_naming(_lowerCAmelCase ) elif "attentions" in new_layer: _UpperCAmelCase = convert_attn_naming(_lowerCAmelCase ) _UpperCAmelCase = new_string_left if not isinstance(_lowerCAmelCase , _lowerCAmelCase ): _UpperCAmelCase = prefix + "." + new_layer + "." + string_left else: _UpperCAmelCase = [prefix + "." + new_layer + "." + s for s in string_left] return new_string def __lowerCamelCase ( _lowerCAmelCase ) -> Optional[int]: _UpperCAmelCase = {} for k, v in state_dict.items(): if k.endswith("kernel" ): # up- and downsample layers, don't have trainable weights continue _UpperCAmelCase = rename(_lowerCAmelCase ) # check if we need to transform from Conv => Linear for attention if isinstance(_lowerCAmelCase , _lowerCAmelCase ): _UpperCAmelCase = transform_conv_attns(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) else: _UpperCAmelCase = v return new_state_dict def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]: if len(_lowerCAmelCase ) == 1: if len(v.shape ) == 3: # weight _UpperCAmelCase = v[:, :, 0] else: # bias _UpperCAmelCase = v else: # qkv matrices _UpperCAmelCase = v.shape[0] _UpperCAmelCase = trippled_shape // 3 for i in range(3 ): if len(v.shape ) == 3: _UpperCAmelCase = v[i * single_shape : (i + 1) * single_shape, :, 0] else: _UpperCAmelCase = v[i * single_shape : (i + 1) * single_shape] return new_state_dict def __lowerCamelCase ( _lowerCAmelCase ) -> Tuple: _UpperCAmelCase = torch.device("cuda" if torch.cuda.is_available() else "cpu" ) _UpperCAmelCase = args.model_path.split("/" )[-1].split("." )[0] if not os.path.isfile(args.model_path ): assert ( model_name == args.model_path ), F'''Make sure to provide one of the official model names {MODELS_MAP.keys()}''' _UpperCAmelCase = download(_lowerCAmelCase ) _UpperCAmelCase = MODELS_MAP[model_name]["sample_rate"] _UpperCAmelCase = MODELS_MAP[model_name]["sample_size"] _UpperCAmelCase = Object() _UpperCAmelCase = sample_size _UpperCAmelCase = sample_rate _UpperCAmelCase = 0 _UpperCAmelCase = UNetaDModel(sample_size=_lowerCAmelCase , sample_rate=_lowerCAmelCase ) _UpperCAmelCase = diffusers_model.state_dict() _UpperCAmelCase = DiffusionUncond(_lowerCAmelCase ) orig_model.load_state_dict(torch.load(args.model_path , map_location=_lowerCAmelCase )["state_dict"] ) _UpperCAmelCase = orig_model.diffusion_ema.eval() _UpperCAmelCase = orig_model.state_dict() _UpperCAmelCase = rename_orig_weights(_lowerCAmelCase ) _UpperCAmelCase = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() ) _UpperCAmelCase = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() ) assert len(_lowerCAmelCase ) == 0, F'''Problem with {renamed_minus_diffusers}''' assert all(k.endswith("kernel" ) for k in list(_lowerCAmelCase ) ), F'''Problem with {diffusers_minus_renamed}''' for key, value in renamed_state_dict.items(): assert ( diffusers_state_dict[key].squeeze().shape == value.squeeze().shape ), F'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}''' if key == "time_proj.weight": _UpperCAmelCase = value.squeeze() _UpperCAmelCase = value diffusers_model.load_state_dict(_lowerCAmelCase ) _UpperCAmelCase = 100 _UpperCAmelCase = 33 _UpperCAmelCase = IPNDMScheduler(num_train_timesteps=_lowerCAmelCase ) _UpperCAmelCase = torch.manual_seed(_lowerCAmelCase ) _UpperCAmelCase = torch.randn([1, 2, config.sample_size] , generator=_lowerCAmelCase ).to(_lowerCAmelCase ) _UpperCAmelCase = torch.linspace(1 , 0 , steps + 1 , device=_lowerCAmelCase )[:-1] _UpperCAmelCase = get_crash_schedule(_lowerCAmelCase ) _UpperCAmelCase = DanceDiffusionPipeline(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase ) _UpperCAmelCase = torch.manual_seed(33 ) _UpperCAmelCase = pipe(num_inference_steps=_lowerCAmelCase , generator=_lowerCAmelCase ).audios _UpperCAmelCase = sampling.iplms_sample(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , {} ) _UpperCAmelCase = generated.clamp(-1 , 1 ) _UpperCAmelCase = (generated - audio).abs().sum() _UpperCAmelCase = (generated - audio).abs().max() if args.save: pipe.save_pretrained(args.checkpoint_path ) print("Diff sum" , _lowerCAmelCase ) print("Diff max" , _lowerCAmelCase ) assert diff_max < 1E-3, F'''Diff max: {diff_max} is too much :-/''' print(F'''Conversion for {model_name} successful!''' ) if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.") parser.add_argument( "--save", default=True, type=bool, required=False, help="Whether to save the converted model or not." ) parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.") __lowerCAmelCase = parser.parse_args() main(args)
684
1
__lowerCAmelCase = 2_5_6 # Modulus to hash a string __lowerCAmelCase = 1_0_0_0_0_0_3 def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> bool: _UpperCAmelCase = len(_lowerCAmelCase ) _UpperCAmelCase = len(_lowerCAmelCase ) if p_len > t_len: return False _UpperCAmelCase = 0 _UpperCAmelCase = 0 _UpperCAmelCase = 1 # Calculating the hash of pattern and substring of text for i in range(_lowerCAmelCase ): _UpperCAmelCase = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus _UpperCAmelCase = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue _UpperCAmelCase = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash _UpperCAmelCase = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def __lowerCamelCase ( ) -> None: _UpperCAmelCase = "abc1abc12" _UpperCAmelCase = "alskfjaldsabc1abc1abc12k23adsfabcabc" _UpperCAmelCase = "alskfjaldsk23adsfabcabc" assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase ) and not rabin_karp(_lowerCAmelCase , _lowerCAmelCase ) # Test 2) _UpperCAmelCase = "ABABX" _UpperCAmelCase = "ABABZABABYABABX" assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase ) # Test 3) _UpperCAmelCase = "AAAB" _UpperCAmelCase = "ABAAAAAB" assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase ) # Test 4) _UpperCAmelCase = "abcdabcy" _UpperCAmelCase = "abcxabcdabxabcdabcdabcy" assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase ) # Test 5) _UpperCAmelCase = "Lü" _UpperCAmelCase = "Lüsai" assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase ) _UpperCAmelCase = "Lue" assert not rabin_karp(_lowerCAmelCase , _lowerCAmelCase ) print("Success." ) if __name__ == "__main__": test_rabin_karp()
684
import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 __lowerCAmelCase = get_tests_dir("fixtures") class __SCREAMING_SNAKE_CASE ( unittest.TestCase): def UpperCAmelCase__ ( self : Dict ): # A mock response for an HTTP head request to emulate server down _UpperCAmelCase = mock.Mock() _UpperCAmelCase = 500 _UpperCAmelCase = {} _UpperCAmelCase = HTTPError _UpperCAmelCase = {} # Download this model to make sure it's in the cache. _UpperCAmelCase = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("requests.Session.request" , return_value=__UpperCamelCase ) as mock_head: _UpperCAmelCase = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" ) # This check we did call the fake head request mock_head.assert_called() def UpperCAmelCase__ ( self : List[Any] ): # This test is for deprecated behavior and can be removed in v5 _UpperCAmelCase = ViTImageProcessor.from_pretrained( "https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json" ) def UpperCAmelCase__ ( self : Dict ): with self.assertRaises(__UpperCamelCase ): # config is in subfolder, the following should not work without specifying the subfolder _UpperCAmelCase = AutoImageProcessor.from_pretrained("hf-internal-testing/stable-diffusion-all-variants" ) _UpperCAmelCase = AutoImageProcessor.from_pretrained( "hf-internal-testing/stable-diffusion-all-variants" , subfolder="feature_extractor" ) self.assertIsNotNone(__UpperCamelCase ) @is_staging_test class __SCREAMING_SNAKE_CASE ( unittest.TestCase): @classmethod def UpperCAmelCase__ ( cls : str ): _UpperCAmelCase = TOKEN HfFolder.save_token(__UpperCamelCase ) @classmethod def UpperCAmelCase__ ( cls : Optional[Any] ): try: delete_repo(token=cls._token , repo_id="test-image-processor" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="valid_org/test-image-processor-org" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="test-dynamic-image-processor" ) except HTTPError: pass def UpperCAmelCase__ ( self : Union[str, Any] ): _UpperCAmelCase = ViTImageProcessor.from_pretrained(__UpperCamelCase ) image_processor.push_to_hub("test-image-processor" , use_auth_token=self._token ) _UpperCAmelCase = ViTImageProcessor.from_pretrained(F'''{USER}/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) ) # Reset repo delete_repo(token=self._token , repo_id="test-image-processor" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( __UpperCamelCase , repo_id="test-image-processor" , push_to_hub=__UpperCamelCase , use_auth_token=self._token ) _UpperCAmelCase = ViTImageProcessor.from_pretrained(F'''{USER}/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) ) def UpperCAmelCase__ ( self : Union[str, Any] ): _UpperCAmelCase = ViTImageProcessor.from_pretrained(__UpperCamelCase ) image_processor.push_to_hub("valid_org/test-image-processor" , use_auth_token=self._token ) _UpperCAmelCase = ViTImageProcessor.from_pretrained("valid_org/test-image-processor" ) for k, v in image_processor.__dict__.items(): self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) ) # Reset repo delete_repo(token=self._token , repo_id="valid_org/test-image-processor" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( __UpperCamelCase , repo_id="valid_org/test-image-processor-org" , push_to_hub=__UpperCamelCase , use_auth_token=self._token ) _UpperCAmelCase = ViTImageProcessor.from_pretrained("valid_org/test-image-processor-org" ) for k, v in image_processor.__dict__.items(): self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) ) def UpperCAmelCase__ ( self : int ): CustomImageProcessor.register_for_auto_class() _UpperCAmelCase = CustomImageProcessor.from_pretrained(__UpperCamelCase ) image_processor.push_to_hub("test-dynamic-image-processor" , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map , {"AutoImageProcessor": "custom_image_processing.CustomImageProcessor"} , ) _UpperCAmelCase = AutoImageProcessor.from_pretrained( F'''{USER}/test-dynamic-image-processor''' , trust_remote_code=__UpperCamelCase ) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__ , "CustomImageProcessor" )
684
1
from argparse import ArgumentParser from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline from ..utils import logging from . import BaseTransformersCLICommand __lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name def __lowerCamelCase ( _lowerCAmelCase ) -> Dict: if not path: return "pipe" for ext in PipelineDataFormat.SUPPORTED_FORMATS: if path.endswith(_lowerCAmelCase ): return ext raise Exception( F'''Unable to determine file format from file extension {path}. ''' F'''Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}''' ) def __lowerCamelCase ( _lowerCAmelCase ) -> Dict: _UpperCAmelCase = pipeline( task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , ) _UpperCAmelCase = try_infer_format_from_ext(args.input ) if args.format == "infer" else args.format _UpperCAmelCase = PipelineDataFormat.from_str( format=_lowerCAmelCase , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , ) return RunCommand(_lowerCAmelCase , _lowerCAmelCase ) class __SCREAMING_SNAKE_CASE ( lowercase): def __init__( self : Tuple , __UpperCamelCase : Pipeline , __UpperCamelCase : PipelineDataFormat ): _UpperCAmelCase = nlp _UpperCAmelCase = reader @staticmethod def UpperCAmelCase__ ( __UpperCamelCase : ArgumentParser ): _UpperCAmelCase = parser.add_parser("run" , help="Run a pipeline through the CLI" ) run_parser.add_argument("--task" , choices=get_supported_tasks() , help="Task to run" ) run_parser.add_argument("--input" , type=__UpperCamelCase , help="Path to the file to use for inference" ) run_parser.add_argument("--output" , type=__UpperCamelCase , help="Path to the file that will be used post to write results." ) run_parser.add_argument("--model" , type=__UpperCamelCase , help="Name or path to the model to instantiate." ) run_parser.add_argument("--config" , type=__UpperCamelCase , help="Name or path to the model's config to instantiate." ) run_parser.add_argument( "--tokenizer" , type=__UpperCamelCase , help="Name of the tokenizer to use. (default: same as the model name)" ) run_parser.add_argument( "--column" , type=__UpperCamelCase , help="Name of the column to use as input. (For multi columns input as QA use column1,columns2)" , ) run_parser.add_argument( "--format" , type=__UpperCamelCase , default="infer" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="Input format to read from" , ) run_parser.add_argument( "--device" , type=__UpperCamelCase , default=-1 , help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" , ) run_parser.add_argument("--overwrite" , action="store_true" , help="Allow overwriting the output file." ) run_parser.set_defaults(func=__UpperCamelCase ) def UpperCAmelCase__ ( self : Tuple ): _UpperCAmelCase , _UpperCAmelCase = self._nlp, [] for entry in self._reader: _UpperCAmelCase = nlp(**__UpperCamelCase ) if self._reader.is_multi_columns else nlp(__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ): outputs.append(__UpperCamelCase ) else: outputs += output # Saving data if self._nlp.binary_output: _UpperCAmelCase = self._reader.save_binary(__UpperCamelCase ) logger.warning(F'''Current pipeline requires output to be in binary format, saving at {binary_path}''' ) else: self._reader.save(__UpperCamelCase )
684
from operator import delitem, getitem, setitem import pytest from data_structures.hashing.hash_map import HashMap def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]: return getitem, k def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]: return setitem, k, v def __lowerCamelCase ( _lowerCAmelCase ) -> str: return delitem, k def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase ) -> Optional[int]: try: return fun(_lowerCAmelCase , *_lowerCAmelCase ), None except Exception as e: return None, e __lowerCAmelCase = ( _set("key_a", "val_a"), _set("key_b", "val_b"), ) __lowerCAmelCase = [ _set("key_a", "val_a"), _set("key_a", "val_b"), ] __lowerCAmelCase = [ _set("key_a", "val_a"), _set("key_b", "val_b"), _del("key_a"), _del("key_b"), _set("key_a", "val_a"), _del("key_a"), ] __lowerCAmelCase = [ _get("key_a"), _del("key_a"), _set("key_a", "val_a"), _del("key_a"), _del("key_a"), _get("key_a"), ] __lowerCAmelCase = [ *[_set(x, x) for x in range(5)], # guaranteed upsize ] __lowerCAmelCase = [ *[_set(x, x) for x in range(5)], # guaranteed upsize *[_del(x) for x in range(5)], _set("key_a", "val_b"), ] @pytest.mark.parametrize( "operations" , ( pytest.param(_add_items , id="add items" ), pytest.param(_overwrite_items , id="overwrite items" ), pytest.param(_delete_items , id="delete items" ), pytest.param(_access_absent_items , id="access absent items" ), pytest.param(_add_with_resize_up , id="add with resize up" ), pytest.param(_add_with_resize_down , id="add with resize down" ), ) , ) def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]: _UpperCAmelCase = HashMap(initial_block_size=4 ) _UpperCAmelCase = {} for _, (fun, *args) in enumerate(_lowerCAmelCase ): _UpperCAmelCase , _UpperCAmelCase = _run_operation(_lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase ) _UpperCAmelCase , _UpperCAmelCase = _run_operation(_lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase ) assert my_res == py_res assert str(_lowerCAmelCase ) == str(_lowerCAmelCase ) assert set(_lowerCAmelCase ) == set(_lowerCAmelCase ) assert len(_lowerCAmelCase ) == len(_lowerCAmelCase ) assert set(my.items() ) == set(py.items() ) def __lowerCamelCase ( ) -> List[Any]: def is_public(_lowerCAmelCase ) -> bool: return not name.startswith("_" ) _UpperCAmelCase = {name for name in dir({} ) if is_public(_lowerCAmelCase )} _UpperCAmelCase = {name for name in dir(HashMap() ) if is_public(_lowerCAmelCase )} assert dict_public_names > hash_public_names
684
1
import math import unittest from transformers import BioGptConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptTokenizer, ) from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST class __SCREAMING_SNAKE_CASE : def __init__( self : int , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any]=13 , __UpperCamelCase : List[Any]=7 , __UpperCamelCase : List[Any]=True , __UpperCamelCase : List[Any]=True , __UpperCamelCase : List[Any]=False , __UpperCamelCase : Dict=True , __UpperCamelCase : Optional[int]=99 , __UpperCamelCase : int=32 , __UpperCamelCase : Optional[Any]=5 , __UpperCamelCase : Any=4 , __UpperCamelCase : Optional[Any]=37 , __UpperCamelCase : List[Any]="gelu" , __UpperCamelCase : Dict=0.1 , __UpperCamelCase : Tuple=0.1 , __UpperCamelCase : int=512 , __UpperCamelCase : Optional[int]=16 , __UpperCamelCase : Optional[Any]=2 , __UpperCamelCase : Union[str, Any]=0.02 , __UpperCamelCase : Optional[Any]=3 , __UpperCamelCase : Union[str, Any]=4 , __UpperCamelCase : Union[str, Any]=None , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_input_mask _UpperCAmelCase = use_token_type_ids _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = num_labels _UpperCAmelCase = num_choices _UpperCAmelCase = scope def UpperCAmelCase__ ( self : str ): _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = None if self.use_input_mask: _UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCAmelCase = None if self.use_token_type_ids: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) _UpperCAmelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase__ ( self : int ): return BioGptConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : Any , __UpperCamelCase : List[str] , __UpperCamelCase : int , __UpperCamelCase : Dict , __UpperCamelCase : Dict , __UpperCamelCase : Any ): _UpperCAmelCase = BioGptModel(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() _UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase ) _UpperCAmelCase = model(__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : List[str] , ): _UpperCAmelCase = BioGptForCausalLM(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() _UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : int , *__UpperCamelCase : str ): _UpperCAmelCase = BioGptModel(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() # create attention mask _UpperCAmelCase = torch.ones(input_ids.shape , dtype=torch.long , device=__UpperCamelCase ) _UpperCAmelCase = self.seq_length // 2 _UpperCAmelCase = 0 # first forward pass _UpperCAmelCase , _UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase ).to_tuple() # create hypothetical next token and extent to next_input_ids _UpperCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size ) # change a random masked slice from input_ids _UpperCAmelCase = ids_tensor((1,) , __UpperCamelCase ).item() + 1 _UpperCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 ) _UpperCAmelCase = random_other_next_tokens # append to next input_ids and attn_mask _UpperCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) _UpperCAmelCase = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=__UpperCamelCase )] , dim=1 , ) # get two different outputs _UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase )["last_hidden_state"] _UpperCAmelCase = model(__UpperCamelCase , past_key_values=__UpperCamelCase , attention_mask=__UpperCamelCase )["last_hidden_state"] # select random slice _UpperCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() _UpperCAmelCase = output_from_no_past[:, -1, random_slice_idx].detach() _UpperCAmelCase = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) ) def UpperCAmelCase__ ( self : Optional[int] , __UpperCamelCase : Any , __UpperCamelCase : Any , __UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : str , *__UpperCamelCase : Tuple ): _UpperCAmelCase = BioGptModel(config=__UpperCamelCase ).to(__UpperCamelCase ).eval() _UpperCAmelCase = torch.ones(input_ids.shape , dtype=torch.long , device=__UpperCamelCase ) # first forward pass _UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , use_cache=__UpperCamelCase ) _UpperCAmelCase , _UpperCAmelCase = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids _UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size ) _UpperCAmelCase = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and _UpperCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) _UpperCAmelCase = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) _UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase )["last_hidden_state"] _UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase )[ "last_hidden_state" ] # select random slice _UpperCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() _UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach() _UpperCAmelCase = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) ) def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , *__UpperCamelCase : Any , __UpperCamelCase : Tuple=False ): _UpperCAmelCase = BioGptForCausalLM(__UpperCamelCase ) model.to(__UpperCamelCase ) if gradient_checkpointing: model.gradient_checkpointing_enable() _UpperCAmelCase = model(__UpperCamelCase , labels=__UpperCamelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) result.loss.backward() def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : Any , *__UpperCamelCase : Optional[Any] ): _UpperCAmelCase = BioGptModel(__UpperCamelCase ) _UpperCAmelCase = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers ) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 ) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 ) def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple , *__UpperCamelCase : Tuple ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = BioGptForTokenClassification(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() _UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase__ ( self : List[str] ): _UpperCAmelCase = self.prepare_config_and_inputs() ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) = config_and_inputs _UpperCAmelCase = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( lowercase , lowercase , lowercase , unittest.TestCase): __SCREAMING_SNAKE_CASE : List[Any] = ( (BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification) if is_torch_available() else () ) __SCREAMING_SNAKE_CASE : Optional[Any] = (BioGptForCausalLM,) if is_torch_available() else () __SCREAMING_SNAKE_CASE : Optional[int] = ( { """feature-extraction""": BioGptModel, """text-classification""": BioGptForSequenceClassification, """text-generation""": BioGptForCausalLM, """token-classification""": BioGptForTokenClassification, """zero-shot""": BioGptForSequenceClassification, } if is_torch_available() else {} ) __SCREAMING_SNAKE_CASE : Any = False def UpperCAmelCase__ ( self : List[str] ): _UpperCAmelCase = BioGptModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 ) def UpperCAmelCase__ ( self : List[str] ): self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : List[Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def UpperCAmelCase__ ( self : Any ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _UpperCAmelCase = type self.model_tester.create_and_check_model(*__UpperCamelCase ) def UpperCAmelCase__ ( self : Optional[int] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_attention_mask_past(*__UpperCamelCase ) def UpperCAmelCase__ ( self : Optional[int] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*__UpperCamelCase , gradient_checkpointing=__UpperCamelCase ) def UpperCAmelCase__ ( self : Optional[Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_past_large_inputs(*__UpperCamelCase ) def UpperCAmelCase__ ( self : List[str] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_weight_initialization(*__UpperCamelCase ) def UpperCAmelCase__ ( self : Union[str, Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_for_token_classification(*__UpperCamelCase ) @slow def UpperCAmelCase__ ( self : Tuple ): _UpperCAmelCase = BioGptForCausalLM.from_pretrained("microsoft/biogpt" ) model.to(__UpperCamelCase ) _UpperCAmelCase = BioGptTokenizer.from_pretrained("microsoft/biogpt" ) _UpperCAmelCase = "left" # Define PAD Token = EOS Token = 50256 _UpperCAmelCase = tokenizer.eos_token _UpperCAmelCase = model.config.eos_token_id # use different length sentences to test batching _UpperCAmelCase = [ "Hello, my dog is a little", "Today, I", ] _UpperCAmelCase = tokenizer(__UpperCamelCase , return_tensors="pt" , padding=__UpperCamelCase ) _UpperCAmelCase = inputs["input_ids"].to(__UpperCamelCase ) _UpperCAmelCase = model.generate( input_ids=__UpperCamelCase , attention_mask=inputs["attention_mask"].to(__UpperCamelCase ) , ) _UpperCAmelCase = tokenizer(sentences[0] , return_tensors="pt" ).input_ids.to(__UpperCamelCase ) _UpperCAmelCase = model.generate(input_ids=__UpperCamelCase ) _UpperCAmelCase = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().cpu().item() _UpperCAmelCase = tokenizer(sentences[1] , return_tensors="pt" ).input_ids.to(__UpperCamelCase ) _UpperCAmelCase = model.generate(input_ids=__UpperCamelCase , max_length=model.config.max_length - num_paddings ) _UpperCAmelCase = tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase ) _UpperCAmelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__UpperCamelCase ) _UpperCAmelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=__UpperCamelCase ) _UpperCAmelCase = [ "Hello, my dog is a little bit bigger than a little bit.", "Today, I have a good idea of how to use the information", ] self.assertListEqual(__UpperCamelCase , __UpperCamelCase ) self.assertListEqual(__UpperCamelCase , [non_padded_sentence, padded_sentence] ) @slow def UpperCAmelCase__ ( self : List[Any] ): for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase = BioGptModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) def UpperCAmelCase__ ( self : Optional[int] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = 3 _UpperCAmelCase = input_dict["input_ids"] _UpperCAmelCase = input_ids.ne(1 ).to(__UpperCamelCase ) _UpperCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) _UpperCAmelCase = BioGptForSequenceClassification(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() _UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCAmelCase__ ( self : List[str] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = 3 _UpperCAmelCase = "multi_label_classification" _UpperCAmelCase = input_dict["input_ids"] _UpperCAmelCase = input_ids.ne(1 ).to(__UpperCamelCase ) _UpperCAmelCase = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) _UpperCAmelCase = BioGptForSequenceClassification(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() _UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @require_torch class __SCREAMING_SNAKE_CASE ( unittest.TestCase): @slow def UpperCAmelCase__ ( self : Union[str, Any] ): _UpperCAmelCase = BioGptForCausalLM.from_pretrained("microsoft/biogpt" ) _UpperCAmelCase = torch.tensor([[2, 4_805, 9, 656, 21]] ) _UpperCAmelCase = model(__UpperCamelCase )[0] _UpperCAmelCase = 42_384 _UpperCAmelCase = torch.Size((1, 5, vocab_size) ) self.assertEqual(output.shape , __UpperCamelCase ) _UpperCAmelCase = torch.tensor( [[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCamelCase , atol=1e-4 ) ) @slow def UpperCAmelCase__ ( self : List[str] ): _UpperCAmelCase = BioGptTokenizer.from_pretrained("microsoft/biogpt" ) _UpperCAmelCase = BioGptForCausalLM.from_pretrained("microsoft/biogpt" ) model.to(__UpperCamelCase ) torch.manual_seed(0 ) _UpperCAmelCase = tokenizer("COVID-19 is" , return_tensors="pt" ).to(__UpperCamelCase ) _UpperCAmelCase = model.generate( **__UpperCamelCase , min_length=100 , max_length=1_024 , num_beams=5 , early_stopping=__UpperCamelCase , ) _UpperCAmelCase = tokenizer.decode(output_ids[0] , skip_special_tokens=__UpperCamelCase ) _UpperCAmelCase = ( "COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the" " causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and" " territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK)," " and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and" " more than 800,000 deaths." ) self.assertEqual(__UpperCamelCase , __UpperCamelCase )
684
def __lowerCamelCase ( _lowerCAmelCase ) -> list: _UpperCAmelCase = len(_lowerCAmelCase ) for i in range(1 , _lowerCAmelCase ): _UpperCAmelCase = collection[i] _UpperCAmelCase = 0 _UpperCAmelCase = i - 1 while low <= high: _UpperCAmelCase = (low + high) // 2 if val < collection[mid]: _UpperCAmelCase = mid - 1 else: _UpperCAmelCase = mid + 1 for j in range(_lowerCAmelCase , _lowerCAmelCase , -1 ): _UpperCAmelCase = collection[j - 1] _UpperCAmelCase = val return collection if __name__ == "__main__": __lowerCAmelCase = input("Enter numbers separated by a comma:\n").strip() __lowerCAmelCase = [int(item) for item in user_input.split(",")] print(binary_insertion_sort(unsorted))
684
1
def __lowerCamelCase ( _lowerCAmelCase ) -> int: if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or number < 0: raise ValueError("Input must be a non-negative integer" ) _UpperCAmelCase = 0 while number: # This way we arrive at next set bit (next 1) instead of looping # through each bit and checking for 1s hence the # loop won't run 32 times it will only run the number of `1` times number &= number - 1 count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
684
__lowerCAmelCase = 2_5_6 # Modulus to hash a string __lowerCAmelCase = 1_0_0_0_0_0_3 def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> bool: _UpperCAmelCase = len(_lowerCAmelCase ) _UpperCAmelCase = len(_lowerCAmelCase ) if p_len > t_len: return False _UpperCAmelCase = 0 _UpperCAmelCase = 0 _UpperCAmelCase = 1 # Calculating the hash of pattern and substring of text for i in range(_lowerCAmelCase ): _UpperCAmelCase = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus _UpperCAmelCase = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue _UpperCAmelCase = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash _UpperCAmelCase = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def __lowerCamelCase ( ) -> None: _UpperCAmelCase = "abc1abc12" _UpperCAmelCase = "alskfjaldsabc1abc1abc12k23adsfabcabc" _UpperCAmelCase = "alskfjaldsk23adsfabcabc" assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase ) and not rabin_karp(_lowerCAmelCase , _lowerCAmelCase ) # Test 2) _UpperCAmelCase = "ABABX" _UpperCAmelCase = "ABABZABABYABABX" assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase ) # Test 3) _UpperCAmelCase = "AAAB" _UpperCAmelCase = "ABAAAAAB" assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase ) # Test 4) _UpperCAmelCase = "abcdabcy" _UpperCAmelCase = "abcxabcdabxabcdabcdabcy" assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase ) # Test 5) _UpperCAmelCase = "Lü" _UpperCAmelCase = "Lüsai" assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase ) _UpperCAmelCase = "Lue" assert not rabin_karp(_lowerCAmelCase , _lowerCAmelCase ) print("Success." ) if __name__ == "__main__": test_rabin_karp()
684
1
import json import os import shutil import warnings from argparse import ArgumentParser, Namespace from pathlib import Path from typing import List from ..utils import logging from . import BaseTransformersCLICommand try: from cookiecutter.main import cookiecutter __lowerCAmelCase = True except ImportError: __lowerCAmelCase = False __lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name def __lowerCamelCase ( _lowerCAmelCase ) -> Tuple: return AddNewModelCommand(args.testing , args.testing_file , path=args.path ) class __SCREAMING_SNAKE_CASE ( lowercase): @staticmethod def UpperCAmelCase__ ( __UpperCamelCase : ArgumentParser ): _UpperCAmelCase = parser.add_parser("add-new-model" ) add_new_model_parser.add_argument("--testing" , action="store_true" , help="If in testing mode." ) add_new_model_parser.add_argument("--testing_file" , type=__UpperCamelCase , help="Configuration file on which to run." ) add_new_model_parser.add_argument( "--path" , type=__UpperCamelCase , help="Path to cookiecutter. Should only be used for testing purposes." ) add_new_model_parser.set_defaults(func=__UpperCamelCase ) def __init__( self : Optional[Any] , __UpperCamelCase : bool , __UpperCamelCase : str , __UpperCamelCase : Optional[int]=None , *__UpperCamelCase : Optional[Any] ): _UpperCAmelCase = testing _UpperCAmelCase = testing_file _UpperCAmelCase = path def UpperCAmelCase__ ( self : List[str] ): warnings.warn( "The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. " "It is not actively maintained anymore, so might give a result that won't pass all tests and quality " "checks, you should use `transformers-cli add-new-model-like` instead." ) if not _has_cookiecutter: raise ImportError( "Model creation dependencies are required to use the `add_new_model` command. Install them by running " "the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n" ) # Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory _UpperCAmelCase = [directory for directory in os.listdir() if "cookiecutter-template-" == directory[:22]] if len(__UpperCamelCase ) > 0: raise ValueError( "Several directories starting with `cookiecutter-template-` in current working directory. " "Please clean your directory by removing all folders starting with `cookiecutter-template-` or " "change your working directory." ) _UpperCAmelCase = ( Path(__UpperCamelCase ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent ) _UpperCAmelCase = path_to_transformer_root / "templates" / "adding_a_new_model" # Execute cookiecutter if not self._testing: cookiecutter(str(__UpperCamelCase ) ) else: with open(self._testing_file , "r" ) as configuration_file: _UpperCAmelCase = json.load(__UpperCamelCase ) cookiecutter( str(path_to_cookiecutter if self._path is None else self._path ) , no_input=__UpperCamelCase , extra_context=__UpperCamelCase , ) _UpperCAmelCase = [directory for directory in os.listdir() if "cookiecutter-template-" in directory[:22]][0] # Retrieve configuration with open(directory + "/configuration.json" , "r" ) as configuration_file: _UpperCAmelCase = json.load(__UpperCamelCase ) _UpperCAmelCase = configuration["lowercase_modelname"] _UpperCAmelCase = configuration["generate_tensorflow_pytorch_and_flax"] os.remove(F'''{directory}/configuration.json''' ) _UpperCAmelCase = "PyTorch" in generate_tensorflow_pytorch_and_flax _UpperCAmelCase = "TensorFlow" in generate_tensorflow_pytorch_and_flax _UpperCAmelCase = "Flax" in generate_tensorflow_pytorch_and_flax _UpperCAmelCase = F'''{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}''' os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase ) os.makedirs(F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}''' , exist_ok=__UpperCamelCase ) # Tests require submodules as they have parent imports with open(F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py''' , "w" ): pass shutil.move( F'''{directory}/__init__.py''' , F'''{model_dir}/__init__.py''' , ) shutil.move( F'''{directory}/configuration_{lowercase_model_name}.py''' , F'''{model_dir}/configuration_{lowercase_model_name}.py''' , ) def remove_copy_lines(__UpperCamelCase : List[str] ): with open(__UpperCamelCase , "r" ) as f: _UpperCAmelCase = f.readlines() with open(__UpperCamelCase , "w" ) as f: for line in lines: if "# Copied from transformers." not in line: f.write(__UpperCamelCase ) if output_pytorch: if not self._testing: remove_copy_lines(F'''{directory}/modeling_{lowercase_model_name}.py''' ) shutil.move( F'''{directory}/modeling_{lowercase_model_name}.py''' , F'''{model_dir}/modeling_{lowercase_model_name}.py''' , ) shutil.move( F'''{directory}/test_modeling_{lowercase_model_name}.py''' , F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py''' , ) else: os.remove(F'''{directory}/modeling_{lowercase_model_name}.py''' ) os.remove(F'''{directory}/test_modeling_{lowercase_model_name}.py''' ) if output_tensorflow: if not self._testing: remove_copy_lines(F'''{directory}/modeling_tf_{lowercase_model_name}.py''' ) shutil.move( F'''{directory}/modeling_tf_{lowercase_model_name}.py''' , F'''{model_dir}/modeling_tf_{lowercase_model_name}.py''' , ) shutil.move( F'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' , F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py''' , ) else: os.remove(F'''{directory}/modeling_tf_{lowercase_model_name}.py''' ) os.remove(F'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' ) if output_flax: if not self._testing: remove_copy_lines(F'''{directory}/modeling_flax_{lowercase_model_name}.py''' ) shutil.move( F'''{directory}/modeling_flax_{lowercase_model_name}.py''' , F'''{model_dir}/modeling_flax_{lowercase_model_name}.py''' , ) shutil.move( F'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' , F'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py''' , ) else: os.remove(F'''{directory}/modeling_flax_{lowercase_model_name}.py''' ) os.remove(F'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' ) shutil.move( F'''{directory}/{lowercase_model_name}.md''' , F'''{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md''' , ) shutil.move( F'''{directory}/tokenization_{lowercase_model_name}.py''' , F'''{model_dir}/tokenization_{lowercase_model_name}.py''' , ) shutil.move( F'''{directory}/tokenization_fast_{lowercase_model_name}.py''' , F'''{model_dir}/tokenization_{lowercase_model_name}_fast.py''' , ) from os import fdopen, remove from shutil import copymode, move from tempfile import mkstemp def replace(__UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : List[str] ): # Create temp file _UpperCAmelCase , _UpperCAmelCase = mkstemp() _UpperCAmelCase = False with fdopen(__UpperCamelCase , "w" ) as new_file: with open(__UpperCamelCase ) as old_file: for line in old_file: new_file.write(__UpperCamelCase ) if line_to_copy_below in line: _UpperCAmelCase = True for line_to_copy in lines_to_copy: new_file.write(__UpperCamelCase ) if not line_found: raise ValueError(F'''Line {line_to_copy_below} was not found in file.''' ) # Copy the file permissions from the old file to the new file copymode(__UpperCamelCase , __UpperCamelCase ) # Remove original file remove(__UpperCamelCase ) # Move new file move(__UpperCamelCase , __UpperCamelCase ) def skip_units(__UpperCamelCase : Dict ): return ( ("generating PyTorch" in line and not output_pytorch) or ("generating TensorFlow" in line and not output_tensorflow) or ("generating Flax" in line and not output_flax) ) def replace_in_files(__UpperCamelCase : Union[str, Any] ): with open(__UpperCamelCase ) as datafile: _UpperCAmelCase = [] _UpperCAmelCase = False _UpperCAmelCase = False for line in datafile: if "# To replace in: " in line and "##" not in line: _UpperCAmelCase = line.split("\"" )[1] _UpperCAmelCase = skip_units(__UpperCamelCase ) elif "# Below: " in line and "##" not in line: _UpperCAmelCase = line.split("\"" )[1] _UpperCAmelCase = skip_units(__UpperCamelCase ) elif "# End." in line and "##" not in line: if not skip_file and not skip_snippet: replace(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = [] elif "# Replace with" in line and "##" not in line: _UpperCAmelCase = [] elif "##" not in line: lines_to_copy.append(__UpperCamelCase ) remove(__UpperCamelCase ) replace_in_files(F'''{directory}/to_replace_{lowercase_model_name}.py''' ) os.rmdir(__UpperCamelCase )
684
import itertools import os import random import tempfile import unittest import numpy as np from datasets import load_dataset from transformers import is_speech_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import WhisperFeatureExtractor if is_torch_available(): import torch __lowerCAmelCase = random.Random() def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=1.0 , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> List[str]: if rng is None: _UpperCAmelCase = global_rng _UpperCAmelCase = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class __SCREAMING_SNAKE_CASE ( unittest.TestCase): def __init__( self : Optional[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any]=7 , __UpperCamelCase : Union[str, Any]=400 , __UpperCamelCase : List[Any]=2_000 , __UpperCamelCase : Optional[Any]=10 , __UpperCamelCase : Optional[int]=160 , __UpperCamelCase : Any=8 , __UpperCamelCase : List[Any]=0.0 , __UpperCamelCase : Dict=4_000 , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : Tuple=True , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = min_seq_length _UpperCAmelCase = max_seq_length _UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) _UpperCAmelCase = padding_value _UpperCAmelCase = sampling_rate _UpperCAmelCase = return_attention_mask _UpperCAmelCase = do_normalize _UpperCAmelCase = feature_size _UpperCAmelCase = chunk_length _UpperCAmelCase = hop_length def UpperCAmelCase__ ( self : Optional[Any] ): return { "feature_size": self.feature_size, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : Tuple=False , __UpperCamelCase : Dict=False ): def _flatten(__UpperCamelCase : Any ): return list(itertools.chain(*__UpperCamelCase ) ) if equal_length: _UpperCAmelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size _UpperCAmelCase = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: _UpperCAmelCase = [np.asarray(__UpperCamelCase ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class __SCREAMING_SNAKE_CASE ( lowercase , unittest.TestCase): __SCREAMING_SNAKE_CASE : str = WhisperFeatureExtractor if is_speech_available() else None def UpperCAmelCase__ ( self : str ): _UpperCAmelCase = WhisperFeatureExtractionTester(self ) def UpperCAmelCase__ ( self : str ): _UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _UpperCAmelCase = feat_extract_first.save_pretrained(__UpperCamelCase )[0] check_json_file_has_correct_format(__UpperCamelCase ) _UpperCAmelCase = self.feature_extraction_class.from_pretrained(__UpperCamelCase ) _UpperCAmelCase = feat_extract_first.to_dict() _UpperCAmelCase = feat_extract_second.to_dict() _UpperCAmelCase = feat_extract_first.mel_filters _UpperCAmelCase = feat_extract_second.mel_filters self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase ) ) self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def UpperCAmelCase__ ( self : Union[str, Any] ): _UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _UpperCAmelCase = os.path.join(__UpperCamelCase , "feat_extract.json" ) feat_extract_first.to_json_file(__UpperCamelCase ) _UpperCAmelCase = self.feature_extraction_class.from_json_file(__UpperCamelCase ) _UpperCAmelCase = feat_extract_first.to_dict() _UpperCAmelCase = feat_extract_second.to_dict() _UpperCAmelCase = feat_extract_first.mel_filters _UpperCAmelCase = feat_extract_second.mel_filters self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase ) ) self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def UpperCAmelCase__ ( self : int ): # Tests that all call wrap to encode_plus and batch_encode_plus _UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 _UpperCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] _UpperCAmelCase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs] # Test feature size _UpperCAmelCase = feature_extractor(__UpperCamelCase , padding="max_length" , return_tensors="np" ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames ) self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size ) # Test not batched input _UpperCAmelCase = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features _UpperCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) ) # Test batched _UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features _UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ): self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. _UpperCAmelCase = [floats_list((1, x) )[0] for x in (800, 800, 800)] _UpperCAmelCase = np.asarray(__UpperCamelCase ) _UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features _UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ): self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) ) # Test truncation required _UpperCAmelCase = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )] _UpperCAmelCase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs] _UpperCAmelCase = [x[: feature_extractor.n_samples] for x in speech_inputs] _UpperCAmelCase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs_truncated] _UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features _UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ): self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) ) def UpperCAmelCase__ ( self : Union[str, Any] ): import torch _UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) _UpperCAmelCase = np.random.rand(100 , 32 ).astype(np.floataa ) _UpperCAmelCase = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: _UpperCAmelCase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) _UpperCAmelCase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : Tuple ): _UpperCAmelCase = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" ) # automatic decoding with librispeech _UpperCAmelCase = ds.sort("id" ).select(range(__UpperCamelCase ) )[:num_samples]["audio"] return [x["array"] for x in speech_samples] def UpperCAmelCase__ ( self : Tuple ): # fmt: off _UpperCAmelCase = torch.tensor( [ 0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951, 0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678, 0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554, -0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854 ] ) # fmt: on _UpperCAmelCase = self._load_datasamples(1 ) _UpperCAmelCase = WhisperFeatureExtractor() _UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="pt" ).input_features self.assertEqual(input_features.shape , (1, 80, 3_000) ) self.assertTrue(torch.allclose(input_features[0, 0, :30] , __UpperCamelCase , atol=1e-4 ) ) def UpperCAmelCase__ ( self : Optional[Any] ): _UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) _UpperCAmelCase = self._load_datasamples(1 )[0] _UpperCAmelCase = ((audio - audio.min()) / (audio.max() - audio.min())) * 65_535 # Rescale to [0, 65535] to show issue _UpperCAmelCase = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__UpperCamelCase )[0] self.assertTrue(np.all(np.mean(__UpperCamelCase ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(__UpperCamelCase ) - 1 ) < 1e-3 ) )
684
1
import collections import inspect import unittest from transformers import SwinvaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __SCREAMING_SNAKE_CASE : def __init__( self : Optional[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : str=13 , __UpperCamelCase : Tuple=32 , __UpperCamelCase : int=2 , __UpperCamelCase : List[Any]=3 , __UpperCamelCase : Optional[int]=16 , __UpperCamelCase : str=[1, 2, 1] , __UpperCamelCase : List[Any]=[2, 2, 4] , __UpperCamelCase : int=2 , __UpperCamelCase : Optional[int]=2.0 , __UpperCamelCase : List[Any]=True , __UpperCamelCase : Dict=0.0 , __UpperCamelCase : Union[str, Any]=0.0 , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : Any="gelu" , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : Dict=0.02 , __UpperCamelCase : Optional[Any]=1e-5 , __UpperCamelCase : Dict=True , __UpperCamelCase : Any=None , __UpperCamelCase : List[str]=True , __UpperCamelCase : str=10 , __UpperCamelCase : int=8 , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = image_size _UpperCAmelCase = patch_size _UpperCAmelCase = num_channels _UpperCAmelCase = embed_dim _UpperCAmelCase = depths _UpperCAmelCase = num_heads _UpperCAmelCase = window_size _UpperCAmelCase = mlp_ratio _UpperCAmelCase = qkv_bias _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = drop_path_rate _UpperCAmelCase = hidden_act _UpperCAmelCase = use_absolute_embeddings _UpperCAmelCase = patch_norm _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = initializer_range _UpperCAmelCase = is_training _UpperCAmelCase = scope _UpperCAmelCase = use_labels _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = encoder_stride def UpperCAmelCase__ ( self : Dict ): _UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = self.get_config() return config, pixel_values, labels def UpperCAmelCase__ ( self : Dict ): return SwinvaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def UpperCAmelCase__ ( self : str , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] ): _UpperCAmelCase = SwinvaModel(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() _UpperCAmelCase = model(__UpperCamelCase ) _UpperCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) _UpperCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] ): _UpperCAmelCase = SwinvaForMaskedImageModeling(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() _UpperCAmelCase = model(__UpperCamelCase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images _UpperCAmelCase = 1 _UpperCAmelCase = SwinvaForMaskedImageModeling(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() _UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _UpperCAmelCase = model(__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any ): _UpperCAmelCase = self.type_sequence_label_size _UpperCAmelCase = SwinvaForImageClassification(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() _UpperCAmelCase = model(__UpperCamelCase , labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCAmelCase__ ( self : Optional[int] ): _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs _UpperCAmelCase = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( lowercase , lowercase , unittest.TestCase): __SCREAMING_SNAKE_CASE : Union[str, Any] = ( (SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else () ) __SCREAMING_SNAKE_CASE : str = ( {"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification} if is_torch_available() else {} ) __SCREAMING_SNAKE_CASE : str = False __SCREAMING_SNAKE_CASE : List[Any] = False __SCREAMING_SNAKE_CASE : int = False __SCREAMING_SNAKE_CASE : Optional[int] = False def UpperCAmelCase__ ( self : int ): _UpperCAmelCase = SwinvaModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , embed_dim=37 ) def UpperCAmelCase__ ( self : Dict ): self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCAmelCase__ ( self : Any ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) @unittest.skip(reason="Got `CUDA error: misaligned address` with PyTorch 2.0.0." ) def UpperCAmelCase__ ( self : int ): pass @unittest.skip(reason="Swinv2 does not use inputs_embeds" ) def UpperCAmelCase__ ( self : str ): pass def UpperCAmelCase__ ( self : List[Any] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(__UpperCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _UpperCAmelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) ) def UpperCAmelCase__ ( self : Optional[int] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(__UpperCamelCase ) _UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase = [*signature.parameters.keys()] _UpperCAmelCase = ["pixel_values"] self.assertListEqual(arg_names[:1] , __UpperCamelCase ) def UpperCAmelCase__ ( self : List[Any] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = True for model_class in self.all_model_classes: _UpperCAmelCase = True _UpperCAmelCase = False _UpperCAmelCase = True _UpperCAmelCase = model_class(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) ) _UpperCAmelCase = outputs.attentions _UpperCAmelCase = len(self.model_tester.depths ) self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase ) # check that output_attentions also work using config del inputs_dict["output_attentions"] _UpperCAmelCase = True _UpperCAmelCase = config.window_size**2 _UpperCAmelCase = model_class(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) ) _UpperCAmelCase = outputs.attentions self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) _UpperCAmelCase = len(__UpperCamelCase ) # Check attention is always last and order is fine _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = model_class(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) ) if hasattr(self.model_tester , "num_hidden_states_types" ): _UpperCAmelCase = self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states _UpperCAmelCase = 2 self.assertEqual(out_len + added_hidden_states , len(__UpperCamelCase ) ) _UpperCAmelCase = outputs.attentions self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) def UpperCAmelCase__ ( self : Any , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[int] ): _UpperCAmelCase = model_class(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() with torch.no_grad(): _UpperCAmelCase = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) ) _UpperCAmelCase = outputs.hidden_states _UpperCAmelCase = getattr( self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase ) # Swinv2 has a different seq_length _UpperCAmelCase = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) _UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) _UpperCAmelCase = outputs.reshaped_hidden_states self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = reshaped_hidden_states[0].shape _UpperCAmelCase = ( reshaped_hidden_states[0].view(__UpperCamelCase , __UpperCamelCase , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def UpperCAmelCase__ ( self : List[str] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: _UpperCAmelCase = True self.check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCAmelCase = True self.check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) def UpperCAmelCase__ ( self : Optional[int] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = 3 _UpperCAmelCase = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) _UpperCAmelCase = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) _UpperCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) _UpperCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: _UpperCAmelCase = True self.check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCAmelCase = True self.check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , (padded_height, padded_width) ) def UpperCAmelCase__ ( self : Optional[int] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__UpperCamelCase ) def UpperCAmelCase__ ( self : Optional[int] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase ) @slow def UpperCAmelCase__ ( self : Tuple ): for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase = SwinvaModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) def UpperCAmelCase__ ( self : Any ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = _config_zero_init(__UpperCamelCase ) for model_class in self.all_model_classes: _UpperCAmelCase = model_class(config=__UpperCamelCase ) for name, param in model.named_parameters(): if "embeddings" not in name and "logit_scale" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @require_vision @require_torch class __SCREAMING_SNAKE_CASE ( unittest.TestCase): @cached_property def UpperCAmelCase__ ( self : Optional[Any] ): return ( AutoImageProcessor.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" ) if is_vision_available() else None ) @slow def UpperCAmelCase__ ( self : List[str] ): _UpperCAmelCase = SwinvaForImageClassification.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" ).to( __UpperCamelCase ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) _UpperCAmelCase = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase ) # forward pass with torch.no_grad(): _UpperCAmelCase = model(**__UpperCamelCase ) # verify the logits _UpperCAmelCase = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , __UpperCamelCase ) _UpperCAmelCase = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(__UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
684
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..utils import cached_file # docstyle-ignore __lowerCAmelCase = "\nHuman: <<task>>\n\nAssistant: " __lowerCAmelCase = "huggingface-tools/default-prompts" __lowerCAmelCase = {"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"} def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase="run" ) -> Union[str, Any]: if prompt_or_repo_id is None: _UpperCAmelCase = DEFAULT_PROMPTS_REPO # prompt is considered a repo ID when it does not contain any kind of space if re.search("\\s" , _lowerCAmelCase ) is not None: return prompt_or_repo_id _UpperCAmelCase = cached_file( _lowerCAmelCase , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} ) with open(_lowerCAmelCase , "r" , encoding="utf-8" ) as f: return f.read()
684
1
from __future__ import annotations import csv import requests from bsa import BeautifulSoup def __lowerCamelCase ( _lowerCAmelCase = "" ) -> dict[str, float]: _UpperCAmelCase = url or "https://www.imdb.com/chart/top/?ref_=nv_mv_250" _UpperCAmelCase = BeautifulSoup(requests.get(_lowerCAmelCase ).text , "html.parser" ) _UpperCAmelCase = soup.find_all("td" , attrs="titleColumn" ) _UpperCAmelCase = soup.find_all("td" , class_="ratingColumn imdbRating" ) return { title.a.text: float(rating.strong.text ) for title, rating in zip(_lowerCAmelCase , _lowerCAmelCase ) } def __lowerCamelCase ( _lowerCAmelCase = "IMDb_Top_250_Movies.csv" ) -> None: _UpperCAmelCase = get_imdb_top_aaa_movies() with open(_lowerCAmelCase , "w" , newline="" ) as out_file: _UpperCAmelCase = csv.writer(_lowerCAmelCase ) writer.writerow(["Movie title", "IMDb rating"] ) for title, rating in movies.items(): writer.writerow([title, rating] ) if __name__ == "__main__": write_movies()
684
from itertools import permutations def __lowerCamelCase ( _lowerCAmelCase ) -> bool: if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False _UpperCAmelCase = [7, 11, 13, 17] for i, test in enumerate(_lowerCAmelCase ): if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def __lowerCamelCase ( _lowerCAmelCase = 10 ) -> int: return sum( int("".join(map(_lowerCAmelCase , _lowerCAmelCase ) ) ) for num in permutations(range(_lowerCAmelCase ) ) if is_substring_divisible(_lowerCAmelCase ) ) if __name__ == "__main__": print(F'''{solution() = }''')
684
1
import argparse import collections import json import os import re import string import sys import numpy as np __lowerCAmelCase = re.compile(r"\b(a|an|the)\b", re.UNICODE) __lowerCAmelCase = None def __lowerCamelCase ( ) -> int: _UpperCAmelCase = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." ) parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." ) parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." ) parser.add_argument( "--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." ) parser.add_argument( "--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." ) parser.add_argument( "--na-prob-thresh" , "-t" , type=_lowerCAmelCase , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , ) parser.add_argument( "--out-image-dir" , "-p" , metavar="out_images" , default=_lowerCAmelCase , help="Save precision-recall curves to directory." ) parser.add_argument("--verbose" , "-v" , action="store_true" ) if len(sys.argv ) == 1: parser.print_help() sys.exit(1 ) return parser.parse_args() def __lowerCamelCase ( _lowerCAmelCase ) -> int: _UpperCAmelCase = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: _UpperCAmelCase = bool(qa["answers"]["text"] ) return qid_to_has_ans def __lowerCamelCase ( _lowerCAmelCase ) -> int: def remove_articles(_lowerCAmelCase ): return ARTICLES_REGEX.sub(" " , _lowerCAmelCase ) def white_space_fix(_lowerCAmelCase ): return " ".join(text.split() ) def remove_punc(_lowerCAmelCase ): _UpperCAmelCase = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(_lowerCAmelCase ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(_lowerCAmelCase ) ) ) ) def __lowerCamelCase ( _lowerCAmelCase ) -> Dict: if not s: return [] return normalize_answer(_lowerCAmelCase ).split() def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> int: return int(normalize_answer(_lowerCAmelCase ) == normalize_answer(_lowerCAmelCase ) ) def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Dict: _UpperCAmelCase = get_tokens(_lowerCAmelCase ) _UpperCAmelCase = get_tokens(_lowerCAmelCase ) _UpperCAmelCase = collections.Counter(_lowerCAmelCase ) & collections.Counter(_lowerCAmelCase ) _UpperCAmelCase = sum(common.values() ) if len(_lowerCAmelCase ) == 0 or len(_lowerCAmelCase ) == 0: # If either is no-answer, then F1 is 1 if they agree, 0 otherwise return int(gold_toks == pred_toks ) if num_same == 0: return 0 _UpperCAmelCase = 1.0 * num_same / len(_lowerCAmelCase ) _UpperCAmelCase = 1.0 * num_same / len(_lowerCAmelCase ) _UpperCAmelCase = (2 * precision * recall) / (precision + recall) return fa def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> str: _UpperCAmelCase = {} _UpperCAmelCase = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: _UpperCAmelCase = qa["id"] _UpperCAmelCase = [t for t in qa["answers"]["text"] if normalize_answer(_lowerCAmelCase )] if not gold_answers: # For unanswerable questions, only correct answer is empty string _UpperCAmelCase = [""] if qid not in preds: print(F'''Missing prediction for {qid}''' ) continue _UpperCAmelCase = preds[qid] # Take max over all gold answers _UpperCAmelCase = max(compute_exact(_lowerCAmelCase , _lowerCAmelCase ) for a in gold_answers ) _UpperCAmelCase = max(compute_fa(_lowerCAmelCase , _lowerCAmelCase ) for a in gold_answers ) return exact_scores, fa_scores def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]: _UpperCAmelCase = {} for qid, s in scores.items(): _UpperCAmelCase = na_probs[qid] > na_prob_thresh if pred_na: _UpperCAmelCase = float(not qid_to_has_ans[qid] ) else: _UpperCAmelCase = s return new_scores def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ) -> List[str]: if not qid_list: _UpperCAmelCase = len(_lowerCAmelCase ) return collections.OrderedDict( [ ("exact", 100.0 * sum(exact_scores.values() ) / total), ("f1", 100.0 * sum(fa_scores.values() ) / total), ("total", total), ] ) else: _UpperCAmelCase = len(_lowerCAmelCase ) return collections.OrderedDict( [ ("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total), ("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total), ("total", total), ] ) def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int: for k in new_eval: _UpperCAmelCase = new_eval[k] def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]: plt.step(_lowerCAmelCase , _lowerCAmelCase , color="b" , alpha=0.2 , where="post" ) plt.fill_between(_lowerCAmelCase , _lowerCAmelCase , step="post" , alpha=0.2 , color="b" ) plt.xlabel("Recall" ) plt.ylabel("Precision" ) plt.xlim([0.0, 1.05] ) plt.ylim([0.0, 1.05] ) plt.title(_lowerCAmelCase ) plt.savefig(_lowerCAmelCase ) plt.clf() def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Union[str, Any]: _UpperCAmelCase = sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : na_probs[k] ) _UpperCAmelCase = 0.0 _UpperCAmelCase = 1.0 _UpperCAmelCase = 0.0 _UpperCAmelCase = [1.0] _UpperCAmelCase = [0.0] _UpperCAmelCase = 0.0 for i, qid in enumerate(_lowerCAmelCase ): if qid_to_has_ans[qid]: true_pos += scores[qid] _UpperCAmelCase = true_pos / float(i + 1 ) _UpperCAmelCase = true_pos / float(_lowerCAmelCase ) if i == len(_lowerCAmelCase ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]: # i.e., if we can put a threshold after this point avg_prec += cur_p * (cur_r - recalls[-1]) precisions.append(_lowerCAmelCase ) recalls.append(_lowerCAmelCase ) if out_image: plot_pr_curve(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) return {"ap": 100.0 * avg_prec} def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]: if out_image_dir and not os.path.exists(_lowerCAmelCase ): os.makedirs(_lowerCAmelCase ) _UpperCAmelCase = sum(1 for v in qid_to_has_ans.values() if v ) if num_true_pos == 0: return _UpperCAmelCase = make_precision_recall_eval( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , out_image=os.path.join(_lowerCAmelCase , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , ) _UpperCAmelCase = make_precision_recall_eval( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , out_image=os.path.join(_lowerCAmelCase , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , ) _UpperCAmelCase = {k: float(_lowerCAmelCase ) for k, v in qid_to_has_ans.items()} _UpperCAmelCase = make_precision_recall_eval( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , out_image=os.path.join(_lowerCAmelCase , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , ) merge_eval(_lowerCAmelCase , _lowerCAmelCase , "pr_exact" ) merge_eval(_lowerCAmelCase , _lowerCAmelCase , "pr_f1" ) merge_eval(_lowerCAmelCase , _lowerCAmelCase , "pr_oracle" ) def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str: if not qid_list: return _UpperCAmelCase = [na_probs[k] for k in qid_list] _UpperCAmelCase = np.ones_like(_lowerCAmelCase ) / float(len(_lowerCAmelCase ) ) plt.hist(_lowerCAmelCase , weights=_lowerCAmelCase , bins=20 , range=(0.0, 1.0) ) plt.xlabel("Model probability of no-answer" ) plt.ylabel("Proportion of dataset" ) plt.title(F'''Histogram of no-answer probability: {name}''' ) plt.savefig(os.path.join(_lowerCAmelCase , F'''na_prob_hist_{name}.png''' ) ) plt.clf() def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]: _UpperCAmelCase = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] ) _UpperCAmelCase = num_no_ans _UpperCAmelCase = cur_score _UpperCAmelCase = 0.0 _UpperCAmelCase = sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : na_probs[k] ) for i, qid in enumerate(_lowerCAmelCase ): if qid not in scores: continue if qid_to_has_ans[qid]: _UpperCAmelCase = scores[qid] else: if preds[qid]: _UpperCAmelCase = -1 else: _UpperCAmelCase = 0 cur_score += diff if cur_score > best_score: _UpperCAmelCase = cur_score _UpperCAmelCase = na_probs[qid] return 100.0 * best_score / len(_lowerCAmelCase ), best_thresh def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]: _UpperCAmelCase , _UpperCAmelCase = find_best_thresh(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) _UpperCAmelCase , _UpperCAmelCase = find_best_thresh(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) _UpperCAmelCase = best_exact _UpperCAmelCase = exact_thresh _UpperCAmelCase = best_fa _UpperCAmelCase = fa_thresh def __lowerCamelCase ( ) -> int: with open(OPTS.data_file ) as f: _UpperCAmelCase = json.load(_lowerCAmelCase ) _UpperCAmelCase = dataset_json["data"] with open(OPTS.pred_file ) as f: _UpperCAmelCase = json.load(_lowerCAmelCase ) if OPTS.na_prob_file: with open(OPTS.na_prob_file ) as f: _UpperCAmelCase = json.load(_lowerCAmelCase ) else: _UpperCAmelCase = {k: 0.0 for k in preds} _UpperCAmelCase = make_qid_to_has_ans(_lowerCAmelCase ) # maps qid to True/False _UpperCAmelCase = [k for k, v in qid_to_has_ans.items() if v] _UpperCAmelCase = [k for k, v in qid_to_has_ans.items() if not v] _UpperCAmelCase , _UpperCAmelCase = get_raw_scores(_lowerCAmelCase , _lowerCAmelCase ) _UpperCAmelCase = apply_no_ans_threshold(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , OPTS.na_prob_thresh ) _UpperCAmelCase = apply_no_ans_threshold(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , OPTS.na_prob_thresh ) _UpperCAmelCase = make_eval_dict(_lowerCAmelCase , _lowerCAmelCase ) if has_ans_qids: _UpperCAmelCase = make_eval_dict(_lowerCAmelCase , _lowerCAmelCase , qid_list=_lowerCAmelCase ) merge_eval(_lowerCAmelCase , _lowerCAmelCase , "HasAns" ) if no_ans_qids: _UpperCAmelCase = make_eval_dict(_lowerCAmelCase , _lowerCAmelCase , qid_list=_lowerCAmelCase ) merge_eval(_lowerCAmelCase , _lowerCAmelCase , "NoAns" ) if OPTS.na_prob_file: find_all_best_thresh(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) if OPTS.na_prob_file and OPTS.out_image_dir: run_precision_recall_analysis(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , OPTS.out_image_dir ) histogram_na_prob(_lowerCAmelCase , _lowerCAmelCase , OPTS.out_image_dir , "hasAns" ) histogram_na_prob(_lowerCAmelCase , _lowerCAmelCase , OPTS.out_image_dir , "noAns" ) if OPTS.out_file: with open(OPTS.out_file , "w" ) as f: json.dump(_lowerCAmelCase , _lowerCAmelCase ) else: print(json.dumps(_lowerCAmelCase , indent=2 ) ) if __name__ == "__main__": __lowerCAmelCase = parse_args() if OPTS.out_image_dir: import matplotlib matplotlib.use("Agg") import matplotlib.pyplot as plt main()
684
import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot import BlenderbotTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = { "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } __lowerCAmelCase = { "vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"}, "merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"}, "tokenizer_config_file": { "facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json" }, } __lowerCAmelCase = {"facebook/blenderbot-3B": 1_2_8} class __SCREAMING_SNAKE_CASE ( lowercase): __SCREAMING_SNAKE_CASE : Optional[int] = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE : str = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE : List[Any] = ["""input_ids""", """attention_mask"""] __SCREAMING_SNAKE_CASE : List[str] = BlenderbotTokenizer def __init__( self : Tuple , __UpperCamelCase : List[str]=None , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : List[str]=None , __UpperCamelCase : Union[str, Any]="replace" , __UpperCamelCase : Tuple="<s>" , __UpperCamelCase : str="</s>" , __UpperCamelCase : Dict="</s>" , __UpperCamelCase : Union[str, Any]="<s>" , __UpperCamelCase : Union[str, Any]="<unk>" , __UpperCamelCase : Tuple="<pad>" , __UpperCamelCase : Optional[int]="<mask>" , __UpperCamelCase : Union[str, Any]=False , __UpperCamelCase : List[str]=True , **__UpperCamelCase : int , ): super().__init__( __UpperCamelCase , __UpperCamelCase , tokenizer_file=__UpperCamelCase , errors=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , trim_offsets=__UpperCamelCase , **__UpperCamelCase , ) _UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , __UpperCamelCase ) != add_prefix_space: _UpperCAmelCase = getattr(__UpperCamelCase , pre_tok_state.pop("type" ) ) _UpperCAmelCase = add_prefix_space _UpperCAmelCase = pre_tok_class(**__UpperCamelCase ) _UpperCAmelCase = add_prefix_space _UpperCAmelCase = "post_processor" _UpperCAmelCase = getattr(self.backend_tokenizer , __UpperCamelCase , __UpperCamelCase ) if tokenizer_component_instance: _UpperCAmelCase = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: _UpperCAmelCase = tuple(state["sep"] ) if "cls" in state: _UpperCAmelCase = tuple(state["cls"] ) _UpperCAmelCase = False if state.get("add_prefix_space" , __UpperCamelCase ) != add_prefix_space: _UpperCAmelCase = add_prefix_space _UpperCAmelCase = True if state.get("trim_offsets" , __UpperCamelCase ) != trim_offsets: _UpperCAmelCase = trim_offsets _UpperCAmelCase = True if changes_to_apply: _UpperCAmelCase = getattr(__UpperCamelCase , state.pop("type" ) ) _UpperCAmelCase = component_class(**__UpperCamelCase ) setattr(self.backend_tokenizer , __UpperCamelCase , __UpperCamelCase ) @property # Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot def UpperCAmelCase__ ( self : Union[str, Any] ): if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Union[str, Any] ): _UpperCAmelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else value _UpperCAmelCase = value def UpperCAmelCase__ ( self : int , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : List[Any] ): _UpperCAmelCase = kwargs.get("is_split_into_words" , __UpperCamelCase ) assert self.add_prefix_space or not is_split_into_words, ( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__UpperCamelCase , **__UpperCamelCase ) def UpperCAmelCase__ ( self : Tuple , *__UpperCamelCase : int , **__UpperCamelCase : Union[str, Any] ): _UpperCAmelCase = kwargs.get("is_split_into_words" , __UpperCamelCase ) assert self.add_prefix_space or not is_split_into_words, ( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*__UpperCamelCase , **__UpperCamelCase ) def UpperCAmelCase__ ( self : str , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ): _UpperCAmelCase = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase ) return tuple(__UpperCamelCase ) def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ): _UpperCAmelCase = [self.sep_token_id] _UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ): return token_ids_a + [self.eos_token_id] def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : "Conversation" ): _UpperCAmelCase = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(" " + text ) else: # Generated responses should contain them already. inputs.append(__UpperCamelCase ) _UpperCAmelCase = " ".join(__UpperCamelCase ) _UpperCAmelCase = self.encode(__UpperCamelCase ) if len(__UpperCamelCase ) > self.model_max_length: _UpperCAmelCase = input_ids[-self.model_max_length :] logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' ) return input_ids
684
1
from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.utils.data import DistributedSampler, RandomSampler from transformers import PreTrainedModel, Trainer, logging from transformers.integrations import is_fairscale_available from transformers.models.fsmt.configuration_fsmt import FSMTConfig from transformers.optimization import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.trainer_pt_utils import get_tpu_sampler from transformers.training_args import ParallelMode from transformers.utils import is_torch_tpu_available if is_fairscale_available(): from fairscale.optim import OSS __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = { "linear": get_linear_schedule_with_warmup, "cosine": get_cosine_schedule_with_warmup, "cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup, "polynomial": get_polynomial_decay_schedule_with_warmup, "constant": get_constant_schedule, "constant_w_warmup": get_constant_schedule_with_warmup, } class __SCREAMING_SNAKE_CASE ( lowercase): def __init__( self : str , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : Union[str, Any]=None , *__UpperCamelCase : Tuple , **__UpperCamelCase : List[str] ): super().__init__(*__UpperCamelCase , **__UpperCamelCase ) if config is None: assert isinstance(self.model , __UpperCamelCase ), ( "If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is" F''' {self.model.__class__}''' ) _UpperCAmelCase = self.model.config else: _UpperCAmelCase = config _UpperCAmelCase = data_args _UpperCAmelCase = self.config.tgt_vocab_size if isinstance(self.config , __UpperCamelCase ) else self.config.vocab_size if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss): assert self.config.pad_token_id is not None, ( "Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss" " calculation or doing label smoothing." ) if self.config.pad_token_id is None and self.config.eos_token_id is not None: logger.warning( F'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for''' " padding.." ) if self.args.label_smoothing == 0: _UpperCAmelCase = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id ) else: # dynamically import label_smoothed_nll_loss from utils import label_smoothed_nll_loss _UpperCAmelCase = label_smoothed_nll_loss def UpperCAmelCase__ ( self : List[Any] , __UpperCamelCase : int ): if self.optimizer is None: _UpperCAmelCase = ["bias", "LayerNorm.weight"] _UpperCAmelCase = [ { "params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )], "weight_decay": self.args.weight_decay, }, { "params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )], "weight_decay": 0.0, }, ] _UpperCAmelCase = Adafactor if self.args.adafactor else AdamW if self.args.adafactor: _UpperCAmelCase = Adafactor _UpperCAmelCase = {"scale_parameter": False, "relative_step": False} else: _UpperCAmelCase = AdamW _UpperCAmelCase = { "betas": (self.args.adam_betaa, self.args.adam_betaa), "eps": self.args.adam_epsilon, } _UpperCAmelCase = self.args.learning_rate if self.sharded_ddp: _UpperCAmelCase = OSS( params=__UpperCamelCase , optim=__UpperCamelCase , **__UpperCamelCase , ) else: _UpperCAmelCase = optimizer_cls(__UpperCamelCase , **__UpperCamelCase ) if self.lr_scheduler is None: _UpperCAmelCase = self._get_lr_scheduler(__UpperCamelCase ) else: # ignoring --lr_scheduler logger.warning("scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored." ) def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : Dict ): _UpperCAmelCase = arg_to_scheduler[self.args.lr_scheduler] if self.args.lr_scheduler == "constant": _UpperCAmelCase = schedule_func(self.optimizer ) elif self.args.lr_scheduler == "constant_w_warmup": _UpperCAmelCase = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps ) else: _UpperCAmelCase = schedule_func( self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=__UpperCamelCase ) return scheduler def UpperCAmelCase__ ( self : str ): if isinstance(self.train_dataset , torch.utils.data.IterableDataset ): return None elif is_torch_tpu_available(): return get_tpu_sampler(self.train_dataset ) else: if self.args.sortish_sampler: self.train_dataset.make_sortish_sampler( self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , ) return ( RandomSampler(self.train_dataset ) if self.args.local_rank == -1 else DistributedSampler(self.train_dataset ) ) def UpperCAmelCase__ ( self : Optional[int] , __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple ): if self.args.label_smoothing == 0: if self.data_args is not None and self.data_args.ignore_pad_token_for_loss: # force training to ignore pad token _UpperCAmelCase = model(**__UpperCamelCase , use_cache=__UpperCamelCase )[0] _UpperCAmelCase = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) ) else: # compute usual loss via models _UpperCAmelCase , _UpperCAmelCase = model(**__UpperCamelCase , labels=__UpperCamelCase , use_cache=__UpperCamelCase )[:2] else: # compute label smoothed loss _UpperCAmelCase = model(**__UpperCamelCase , use_cache=__UpperCamelCase )[0] _UpperCAmelCase = torch.nn.functional.log_softmax(__UpperCamelCase , dim=-1 ) _UpperCAmelCase , _UpperCAmelCase = self.loss_fn(__UpperCamelCase , __UpperCamelCase , self.args.label_smoothing , ignore_index=self.config.pad_token_id ) return loss, logits def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] ): _UpperCAmelCase = inputs.pop("labels" ) _UpperCAmelCase , _UpperCAmelCase = self._compute_loss(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) return loss def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : nn.Module , __UpperCamelCase : Dict[str, Union[torch.Tensor, Any]] , __UpperCamelCase : bool , __UpperCamelCase : Optional[List[str]] = None , ): _UpperCAmelCase = self._prepare_inputs(__UpperCamelCase ) _UpperCAmelCase = { "max_length": self.data_args.val_max_target_length if self.data_args is not None else self.config.max_length, "num_beams": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams, } if self.args.predict_with_generate and not self.args.prediction_loss_only: _UpperCAmelCase = self.model.generate( inputs["input_ids"] , attention_mask=inputs["attention_mask"] , **__UpperCamelCase , ) # in case the batch is shorter than max length, the output should be padded if generated_tokens.shape[-1] < gen_kwargs["max_length"]: _UpperCAmelCase = self._pad_tensors_to_max_len(__UpperCamelCase , gen_kwargs["max_length"] ) _UpperCAmelCase = inputs.pop("labels" ) with torch.no_grad(): # compute loss on predict data _UpperCAmelCase , _UpperCAmelCase = self._compute_loss(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = loss.mean().detach() if self.args.prediction_loss_only: return (loss, None, None) _UpperCAmelCase = generated_tokens if self.args.predict_with_generate else logits if labels.shape[-1] < gen_kwargs["max_length"]: _UpperCAmelCase = self._pad_tensors_to_max_len(__UpperCamelCase , gen_kwargs["max_length"] ) return (loss, logits, labels) def UpperCAmelCase__ ( self : str , __UpperCamelCase : int , __UpperCamelCase : Tuple ): # If PAD token is not defined at least EOS token has to be defined _UpperCAmelCase = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id if pad_token_id is None: raise ValueError( "Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be" F''' padded to `max_length`={max_length}''' ) _UpperCAmelCase = pad_token_id * torch.ones( (tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device ) _UpperCAmelCase = tensor return padded_tensor
684
import argparse import torch from transformers import ( WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForAudioFrameClassification, WavaVecaForSequenceClassification, WavaVecaForXVector, logging, ) logging.set_verbosity_info() __lowerCAmelCase = logging.get_logger(__name__) def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: _UpperCAmelCase = WavaVecaForSequenceClassification.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase ) _UpperCAmelCase = downstream_dict["projector.weight"] _UpperCAmelCase = downstream_dict["projector.bias"] _UpperCAmelCase = downstream_dict["model.post_net.linear.weight"] _UpperCAmelCase = downstream_dict["model.post_net.linear.bias"] return model def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str: _UpperCAmelCase = WavaVecaForAudioFrameClassification.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase ) _UpperCAmelCase = downstream_dict["model.linear.weight"] _UpperCAmelCase = downstream_dict["model.linear.bias"] return model def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict: _UpperCAmelCase = WavaVecaForXVector.from_pretrained(_lowerCAmelCase , config=_lowerCAmelCase ) _UpperCAmelCase = downstream_dict["connector.weight"] _UpperCAmelCase = downstream_dict["connector.bias"] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): _UpperCAmelCase = downstream_dict[ F'''model.framelevel_feature_extractor.module.{i}.kernel.weight''' ] _UpperCAmelCase = downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias'''] _UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"] _UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"] _UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"] _UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"] _UpperCAmelCase = downstream_dict["objective.W"] return model @torch.no_grad() def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict: _UpperCAmelCase = torch.load(_lowerCAmelCase , map_location="cpu" ) _UpperCAmelCase = checkpoint["Downstream"] _UpperCAmelCase = WavaVecaConfig.from_pretrained(_lowerCAmelCase ) _UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained( _lowerCAmelCase , return_attention_mask=_lowerCAmelCase , do_normalize=_lowerCAmelCase ) _UpperCAmelCase = hf_config.architectures[0] if arch.endswith("ForSequenceClassification" ): _UpperCAmelCase = convert_classification(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) elif arch.endswith("ForAudioFrameClassification" ): _UpperCAmelCase = convert_diarization(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) elif arch.endswith("ForXVector" ): _UpperCAmelCase = convert_xvector(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) else: raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''' ) if hf_config.use_weighted_layer_sum: _UpperCAmelCase = checkpoint["Featurizer"]["weights"] hf_feature_extractor.save_pretrained(_lowerCAmelCase ) hf_model.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() parser.add_argument( "--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model." ) parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.") parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.") __lowerCAmelCase = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
684
1
import warnings from ...utils import logging from .image_processing_deit import DeiTImageProcessor __lowerCAmelCase = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( lowercase): def __init__( self : Dict , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : str ): warnings.warn( "The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use DeiTImageProcessor instead." , __UpperCamelCase , ) super().__init__(*__UpperCamelCase , **__UpperCamelCase )
684
def __lowerCamelCase ( _lowerCAmelCase ) -> str: _UpperCAmelCase = [] _UpperCAmelCase = set({"(", "[", "{"} ) _UpperCAmelCase = set({")", "]", "}"} ) _UpperCAmelCase = {"{": "}", "[": "]", "(": ")"} for i in range(len(_lowerCAmelCase ) ): if s[i] in open_brackets: stack.append(s[i] ) elif s[i] in closed_brackets and ( len(_lowerCAmelCase ) == 0 or (len(_lowerCAmelCase ) > 0 and open_to_closed[stack.pop()] != s[i]) ): return False return len(_lowerCAmelCase ) == 0 def __lowerCamelCase ( ) -> str: _UpperCAmelCase = input("Enter sequence of brackets: " ) if is_balanced(_lowerCAmelCase ): print(_lowerCAmelCase , "is balanced" ) else: print(_lowerCAmelCase , "is not balanced" ) if __name__ == "__main__": main()
684
1
import os # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_doctest_list.py __lowerCAmelCase = "." if __name__ == "__main__": __lowerCAmelCase = os.path.join(REPO_PATH, "utils/documentation_tests.txt") __lowerCAmelCase = [] __lowerCAmelCase = [] with open(doctest_file_path) as fp: for line in fp: __lowerCAmelCase = line.strip() __lowerCAmelCase = os.path.join(REPO_PATH, line) if not (os.path.isfile(path) or os.path.isdir(path)): non_existent_paths.append(line) all_paths.append(path) if len(non_existent_paths) > 0: __lowerCAmelCase = "\n".join(non_existent_paths) raise ValueError(F'''`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}''') if all_paths != sorted(all_paths): raise ValueError("Files in `utils/documentation_tests.txt` are not in alphabetical order.")
684
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> tuple[float, float]: # Check if the input is valid if not len(_lowerCAmelCase ) == len(_lowerCAmelCase ) == 3: raise ValueError("Please enter a valid equation." ) if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0: raise ValueError("Both a & b of two equations can't be zero." ) # Extract the coefficients _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = equationa _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = equationa # Calculate the determinants of the matrices _UpperCAmelCase = aa * ba - aa * ba _UpperCAmelCase = ca * ba - ca * ba _UpperCAmelCase = aa * ca - aa * ca # Check if the system of linear equations has a solution (using Cramer's rule) if determinant == 0: if determinant_x == determinant_y == 0: raise ValueError("Infinite solutions. (Consistent system)" ) else: raise ValueError("No solution. (Inconsistent system)" ) else: if determinant_x == determinant_y == 0: # Trivial solution (Inconsistent system) return (0.0, 0.0) else: _UpperCAmelCase = determinant_x / determinant _UpperCAmelCase = determinant_y / determinant # Non-Trivial Solution (Consistent system) return (x, y)
684
1
import unittest from parameterized import parameterized from transformers import LlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer class __SCREAMING_SNAKE_CASE : def __init__( self : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : Any=13 , __UpperCamelCase : str=7 , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Tuple=True , __UpperCamelCase : Optional[Any]=False , __UpperCamelCase : List[Any]=True , __UpperCamelCase : Optional[int]=99 , __UpperCamelCase : Optional[Any]=32 , __UpperCamelCase : int=5 , __UpperCamelCase : Optional[int]=4 , __UpperCamelCase : Optional[int]=37 , __UpperCamelCase : List[Any]="gelu" , __UpperCamelCase : Tuple=0.1 , __UpperCamelCase : List[str]=0.1 , __UpperCamelCase : Dict=512 , __UpperCamelCase : List[str]=16 , __UpperCamelCase : Optional[Any]=2 , __UpperCamelCase : Dict=0.02 , __UpperCamelCase : List[Any]=3 , __UpperCamelCase : Any=4 , __UpperCamelCase : List[Any]=None , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_input_mask _UpperCAmelCase = use_token_type_ids _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = num_labels _UpperCAmelCase = num_choices _UpperCAmelCase = scope def UpperCAmelCase__ ( self : List[str] ): _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = None if self.use_input_mask: _UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCAmelCase = None if self.use_token_type_ids: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) _UpperCAmelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase__ ( self : Optional[int] ): return LlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : Dict , __UpperCamelCase : int , __UpperCamelCase : Dict , __UpperCamelCase : List[Any] ): _UpperCAmelCase = LlamaModel(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() _UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase ) _UpperCAmelCase = model(__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : int , __UpperCamelCase : Any , __UpperCamelCase : Any , __UpperCamelCase : str , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : List[str] , __UpperCamelCase : Any , ): _UpperCAmelCase = True _UpperCAmelCase = LlamaModel(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() _UpperCAmelCase = model( __UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , ) _UpperCAmelCase = model( __UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , ) _UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : Optional[int] , __UpperCamelCase : Dict , __UpperCamelCase : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : str , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[str] , ): _UpperCAmelCase = LlamaForCausalLM(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() _UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ ( self : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : Any , ): _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = LlamaForCausalLM(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() # first forward pass _UpperCAmelCase = model( __UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , use_cache=__UpperCamelCase , ) _UpperCAmelCase = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids _UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size ) _UpperCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and _UpperCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) _UpperCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 ) _UpperCAmelCase = model( __UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , output_hidden_states=__UpperCamelCase , )["hidden_states"][0] _UpperCAmelCase = model( __UpperCamelCase , attention_mask=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , output_hidden_states=__UpperCamelCase , )["hidden_states"][0] # select random slice _UpperCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() _UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach() _UpperCAmelCase = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) ) def UpperCAmelCase__ ( self : Optional[int] ): _UpperCAmelCase = self.prepare_config_and_inputs() ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) = config_and_inputs _UpperCAmelCase = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( lowercase , lowercase , lowercase , unittest.TestCase): __SCREAMING_SNAKE_CASE : Any = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else () __SCREAMING_SNAKE_CASE : int = (LlamaForCausalLM,) if is_torch_available() else () __SCREAMING_SNAKE_CASE : Optional[Any] = ( { """feature-extraction""": LlamaModel, """text-classification""": LlamaForSequenceClassification, """text-generation""": LlamaForCausalLM, """zero-shot""": LlamaForSequenceClassification, } if is_torch_available() else {} ) __SCREAMING_SNAKE_CASE : Optional[Any] = False __SCREAMING_SNAKE_CASE : Optional[int] = False def UpperCAmelCase__ ( self : int ): _UpperCAmelCase = LlamaModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 ) def UpperCAmelCase__ ( self : Optional[int] ): self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : Any ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def UpperCAmelCase__ ( self : Optional[Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _UpperCAmelCase = type self.model_tester.create_and_check_model(*__UpperCamelCase ) def UpperCAmelCase__ ( self : List[str] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = 3 _UpperCAmelCase = input_dict["input_ids"] _UpperCAmelCase = input_ids.ne(1 ).to(__UpperCamelCase ) _UpperCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) _UpperCAmelCase = LlamaForSequenceClassification(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() _UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCAmelCase__ ( self : Any ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = 3 _UpperCAmelCase = "single_label_classification" _UpperCAmelCase = input_dict["input_ids"] _UpperCAmelCase = input_ids.ne(1 ).to(__UpperCamelCase ) _UpperCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) _UpperCAmelCase = LlamaForSequenceClassification(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() _UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCAmelCase__ ( self : str ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = 3 _UpperCAmelCase = "multi_label_classification" _UpperCAmelCase = input_dict["input_ids"] _UpperCAmelCase = input_ids.ne(1 ).to(__UpperCamelCase ) _UpperCAmelCase = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) _UpperCAmelCase = LlamaForSequenceClassification(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() _UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip("LLaMA buffers include complex numbers, which breaks this test" ) def UpperCAmelCase__ ( self : int ): pass @parameterized.expand([("linear",), ("dynamic",)] ) def UpperCAmelCase__ ( self : Any , __UpperCamelCase : List[Any] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase = ids_tensor([1, 10] , config.vocab_size ) _UpperCAmelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights _UpperCAmelCase = LlamaModel(__UpperCamelCase ) original_model.to(__UpperCamelCase ) original_model.eval() _UpperCAmelCase = original_model(__UpperCamelCase ).last_hidden_state _UpperCAmelCase = original_model(__UpperCamelCase ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights _UpperCAmelCase = {"type": scaling_type, "factor": 10.0} _UpperCAmelCase = LlamaModel(__UpperCamelCase ) scaled_model.to(__UpperCamelCase ) scaled_model.eval() _UpperCAmelCase = scaled_model(__UpperCamelCase ).last_hidden_state _UpperCAmelCase = scaled_model(__UpperCamelCase ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-5 ) ) else: self.assertFalse(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-5 ) ) @require_torch class __SCREAMING_SNAKE_CASE ( unittest.TestCase): @unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" ) @slow def UpperCAmelCase__ ( self : Tuple ): _UpperCAmelCase = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] _UpperCAmelCase = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf" , device_map="auto" ) _UpperCAmelCase = model(torch.tensor([input_ids] ) ) # Expected mean on dim = -1 _UpperCAmelCase = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] ) torch.testing.assert_close(out.mean(-1 ) , __UpperCamelCase , atol=1e-2 , rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off _UpperCAmelCase = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , __UpperCamelCase , atol=1e-5 , rtol=1e-5 ) @unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" ) @slow def UpperCAmelCase__ ( self : List[Any] ): _UpperCAmelCase = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] _UpperCAmelCase = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-hf" , device_map="auto" ) _UpperCAmelCase = model(torch.tensor(__UpperCamelCase ) ) # Expected mean on dim = -1 _UpperCAmelCase = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] ) torch.testing.assert_close(out.mean(-1 ) , __UpperCamelCase , atol=1e-2 , rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off _UpperCAmelCase = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , __UpperCamelCase , atol=1e-5 , rtol=1e-5 ) @unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" ) @slow def UpperCAmelCase__ ( self : List[str] ): _UpperCAmelCase = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] _UpperCAmelCase = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-chat-hf" , device_map="auto" ) _UpperCAmelCase = model(torch.tensor(__UpperCamelCase ) ) # Expected mean on dim = -1 _UpperCAmelCase = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] ) torch.testing.assert_close(out.mean(-1 ) , __UpperCamelCase , atol=1e-2 , rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off _UpperCAmelCase = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] ) # fmt: on torch.testing.assert_close(out.mean(-1 ) , __UpperCamelCase , atol=1e-2 , rtol=1e-2 ) @unittest.skip( "Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test" ) @slow def UpperCAmelCase__ ( self : Optional[int] ): _UpperCAmelCase = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] _UpperCAmelCase = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-70b-hf" , device_map="auto" ) _UpperCAmelCase = model(torch.tensor(__UpperCamelCase ) ) _UpperCAmelCase = torch.tensor( [[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa ) torch.testing.assert_close(out.mean(-1 ) , __UpperCamelCase , atol=1e-2 , rtol=1e-2 ) # fmt: off _UpperCAmelCase = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , __UpperCamelCase , atol=1e-5 , rtol=1e-5 ) @unittest.skip("Model is curently gated" ) @slow def UpperCAmelCase__ ( self : Optional[int] ): _UpperCAmelCase = "Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi" _UpperCAmelCase = "Simply put, the theory of relativity states that " _UpperCAmelCase = LlamaTokenizer.from_pretrained("meta-llama/Llama-2-13b-chat-hf" ) _UpperCAmelCase = tokenizer.encode(__UpperCamelCase , return_tensors="pt" ) _UpperCAmelCase = LlamaForCausalLM.from_pretrained( "meta-llama/Llama-2-13b-chat-hf" , device_map="sequential" , use_safetensors=__UpperCamelCase ) # greedy generation outputs _UpperCAmelCase = model.generate(__UpperCamelCase , max_new_tokens=64 , top_p=__UpperCamelCase , temperature=1 , do_sample=__UpperCamelCase ) _UpperCAmelCase = tokenizer.decode(generated_ids[0] , skip_special_tokens=__UpperCamelCase ) self.assertEqual(__UpperCamelCase , __UpperCamelCase )
684
import argparse import torch from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert from transformers.utils import logging logging.set_verbosity_info() def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: # Initialise PyTorch model _UpperCAmelCase = RemBertConfig.from_json_file(_lowerCAmelCase ) print("Building PyTorch model from configuration: {}".format(str(_lowerCAmelCase ) ) ) _UpperCAmelCase = RemBertModel(_lowerCAmelCase ) # Load weights from tf checkpoint load_tf_weights_in_rembert(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # Save pytorch-model print("Save PyTorch model to {}".format(_lowerCAmelCase ) ) torch.save(model.state_dict() , _lowerCAmelCase ) if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--rembert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained RemBERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) __lowerCAmelCase = parser.parse_args() convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
684
1
def __lowerCamelCase ( _lowerCAmelCase ) -> str: _UpperCAmelCase = [] _UpperCAmelCase = set({"(", "[", "{"} ) _UpperCAmelCase = set({")", "]", "}"} ) _UpperCAmelCase = {"{": "}", "[": "]", "(": ")"} for i in range(len(_lowerCAmelCase ) ): if s[i] in open_brackets: stack.append(s[i] ) elif s[i] in closed_brackets and ( len(_lowerCAmelCase ) == 0 or (len(_lowerCAmelCase ) > 0 and open_to_closed[stack.pop()] != s[i]) ): return False return len(_lowerCAmelCase ) == 0 def __lowerCamelCase ( ) -> str: _UpperCAmelCase = input("Enter sequence of brackets: " ) if is_balanced(_lowerCAmelCase ): print(_lowerCAmelCase , "is balanced" ) else: print(_lowerCAmelCase , "is not balanced" ) if __name__ == "__main__": main()
684
import unittest from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __SCREAMING_SNAKE_CASE : @staticmethod def UpperCAmelCase__ ( *__UpperCamelCase : Dict , **__UpperCamelCase : Optional[int] ): pass @is_pipeline_test @require_torch @require_vision class __SCREAMING_SNAKE_CASE ( unittest.TestCase): __SCREAMING_SNAKE_CASE : List[Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] ): _UpperCAmelCase = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" ) _UpperCAmelCase = [ { "image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ), "question": "How many cats are there?", }, { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "question": "How many cats are there?", }, ] return vqa_pipeline, examples def UpperCAmelCase__ ( self : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] ): _UpperCAmelCase = vqa_pipeline(__UpperCamelCase , top_k=1 ) self.assertEqual( __UpperCamelCase , [ [{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}], [{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}], ] , ) @require_torch def UpperCAmelCase__ ( self : List[Any] ): _UpperCAmelCase = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" ) _UpperCAmelCase = "./tests/fixtures/tests_samples/COCO/000000039769.png" _UpperCAmelCase = "How many cats are there?" _UpperCAmelCase = vqa_pipeline(image=__UpperCamelCase , question="How many cats are there?" , top_k=2 ) self.assertEqual( __UpperCamelCase , [{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}, {"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}] ) _UpperCAmelCase = vqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual( __UpperCamelCase , [{"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}, {"score": ANY(__UpperCamelCase ), "answer": ANY(__UpperCamelCase )}] ) @slow @require_torch def UpperCAmelCase__ ( self : List[str] ): _UpperCAmelCase = pipeline("visual-question-answering" , model="dandelin/vilt-b32-finetuned-vqa" ) _UpperCAmelCase = "./tests/fixtures/tests_samples/COCO/000000039769.png" _UpperCAmelCase = "How many cats are there?" _UpperCAmelCase = vqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 ) self.assertEqual( nested_simplify(__UpperCamelCase , decimals=4 ) , [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] ) _UpperCAmelCase = vqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(__UpperCamelCase , decimals=4 ) , [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] ) _UpperCAmelCase = vqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 ) self.assertEqual( nested_simplify(__UpperCamelCase , decimals=4 ) , [[{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}]] * 2 , ) @require_tf @unittest.skip("Visual question answering not implemented in TF" ) def UpperCAmelCase__ ( self : Optional[int] ): pass
684
1
from itertools import product def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> list[int]: _UpperCAmelCase = sides_number _UpperCAmelCase = max_face_number * dice_number _UpperCAmelCase = [0] * (max_total + 1) _UpperCAmelCase = 1 _UpperCAmelCase = range(_lowerCAmelCase , max_face_number + 1 ) for dice_numbers in product(_lowerCAmelCase , repeat=_lowerCAmelCase ): _UpperCAmelCase = sum(_lowerCAmelCase ) totals_frequencies[total] += 1 return totals_frequencies def __lowerCamelCase ( ) -> float: _UpperCAmelCase = total_frequency_distribution( sides_number=4 , dice_number=9 ) _UpperCAmelCase = total_frequency_distribution( sides_number=6 , dice_number=6 ) _UpperCAmelCase = 0 _UpperCAmelCase = 9 _UpperCAmelCase = 4 * 9 _UpperCAmelCase = 6 for peter_total in range(_lowerCAmelCase , max_peter_total + 1 ): peter_wins_count += peter_totals_frequencies[peter_total] * sum( colin_totals_frequencies[min_colin_total:peter_total] ) _UpperCAmelCase = (4**9) * (6**6) _UpperCAmelCase = peter_wins_count / total_games_number _UpperCAmelCase = round(_lowerCAmelCase , ndigits=7 ) return rounded_peter_win_probability if __name__ == "__main__": print(F'''{solution() = }''')
684
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, ) else: from .modeling_text_unet import UNetFlatConditionModel from .pipeline_versatile_diffusion import VersatileDiffusionPipeline from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
684
1
from typing import Optional, Tuple, Union import torch from einops import rearrange, reduce from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput __lowerCAmelCase = 8 def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=BITS ) -> Optional[int]: _UpperCAmelCase = x.device _UpperCAmelCase = (x * 255).int().clamp(0 , 255 ) _UpperCAmelCase = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_lowerCAmelCase ) _UpperCAmelCase = rearrange(_lowerCAmelCase , "d -> d 1 1" ) _UpperCAmelCase = rearrange(_lowerCAmelCase , "b c h w -> b c 1 h w" ) _UpperCAmelCase = ((x & mask) != 0).float() _UpperCAmelCase = rearrange(_lowerCAmelCase , "b c d h w -> b (c d) h w" ) _UpperCAmelCase = bits * 2 - 1 return bits def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=BITS ) -> Optional[int]: _UpperCAmelCase = x.device _UpperCAmelCase = (x > 0).int() _UpperCAmelCase = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_lowerCAmelCase , dtype=torch.intaa ) _UpperCAmelCase = rearrange(_lowerCAmelCase , "d -> d 1 1" ) _UpperCAmelCase = rearrange(_lowerCAmelCase , "b (c d) h w -> b c d h w" , d=8 ) _UpperCAmelCase = reduce(x * mask , "b c d h w -> b c h w" , "sum" ) return (dec / 255).clamp(0.0 , 1.0 ) def __lowerCamelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 0.0 , _lowerCAmelCase = True , _lowerCAmelCase=None , _lowerCAmelCase = True , ) -> Union[DDIMSchedulerOutput, Tuple]: if self.num_inference_steps is None: raise ValueError( "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" ) # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf # Ideally, read DDIM paper in-detail understanding # Notation (<variable name> -> <name in paper> # - pred_noise_t -> e_theta(x_t, t) # - pred_original_sample -> f_theta(x_t, t) or x_0 # - std_dev_t -> sigma_t # - eta -> η # - pred_sample_direction -> "direction pointing to x_t" # - pred_prev_sample -> "x_t-1" # 1. get previous step value (=t-1) _UpperCAmelCase = timestep - self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas _UpperCAmelCase = self.alphas_cumprod[timestep] _UpperCAmelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod _UpperCAmelCase = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _UpperCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 # 4. Clip "predicted x_0" _UpperCAmelCase = self.bit_scale if self.config.clip_sample: _UpperCAmelCase = torch.clamp(_lowerCAmelCase , -scale , _lowerCAmelCase ) # 5. compute variance: "sigma_t(η)" -> see formula (16) # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1) _UpperCAmelCase = self._get_variance(_lowerCAmelCase , _lowerCAmelCase ) _UpperCAmelCase = eta * variance ** 0.5 if use_clipped_model_output: # the model_output is always re-derived from the clipped x_0 in Glide _UpperCAmelCase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _UpperCAmelCase = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _UpperCAmelCase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if eta > 0: # randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072 _UpperCAmelCase = model_output.device if torch.is_tensor(_lowerCAmelCase ) else "cpu" _UpperCAmelCase = torch.randn(model_output.shape , dtype=model_output.dtype , generator=_lowerCAmelCase ).to(_lowerCAmelCase ) _UpperCAmelCase = self._get_variance(_lowerCAmelCase , _lowerCAmelCase ) ** 0.5 * eta * noise _UpperCAmelCase = prev_sample + variance if not return_dict: return (prev_sample,) return DDIMSchedulerOutput(prev_sample=_lowerCAmelCase , pred_original_sample=_lowerCAmelCase ) def __lowerCamelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase="epsilon" , _lowerCAmelCase=None , _lowerCAmelCase = True , ) -> Union[DDPMSchedulerOutput, Tuple]: _UpperCAmelCase = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: _UpperCAmelCase , _UpperCAmelCase = torch.split(_lowerCAmelCase , sample.shape[1] , dim=1 ) else: _UpperCAmelCase = None # 1. compute alphas, betas _UpperCAmelCase = self.alphas_cumprod[t] _UpperCAmelCase = self.alphas_cumprod[t - 1] if t > 0 else self.one _UpperCAmelCase = 1 - alpha_prod_t _UpperCAmelCase = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if prediction_type == "epsilon": _UpperCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif prediction_type == "sample": _UpperCAmelCase = model_output else: raise ValueError(F'''Unsupported prediction_type {prediction_type}.''' ) # 3. Clip "predicted x_0" _UpperCAmelCase = self.bit_scale if self.config.clip_sample: _UpperCAmelCase = torch.clamp(_lowerCAmelCase , -scale , _lowerCAmelCase ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _UpperCAmelCase = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t _UpperCAmelCase = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _UpperCAmelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise _UpperCAmelCase = 0 if t > 0: _UpperCAmelCase = torch.randn( model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=_lowerCAmelCase ).to(model_output.device ) _UpperCAmelCase = (self._get_variance(_lowerCAmelCase , predicted_variance=_lowerCAmelCase ) ** 0.5) * noise _UpperCAmelCase = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return DDPMSchedulerOutput(prev_sample=_lowerCAmelCase , pred_original_sample=_lowerCAmelCase ) class __SCREAMING_SNAKE_CASE ( lowercase): def __init__( self : str , __UpperCamelCase : UNetaDConditionModel , __UpperCamelCase : Union[DDIMScheduler, DDPMScheduler] , __UpperCamelCase : Optional[float] = 1.0 , ): super().__init__() _UpperCAmelCase = bit_scale _UpperCAmelCase = ( ddim_bit_scheduler_step if isinstance(__UpperCamelCase , __UpperCamelCase ) else ddpm_bit_scheduler_step ) self.register_modules(unet=__UpperCamelCase , scheduler=__UpperCamelCase ) @torch.no_grad() def __call__( self : List[str] , __UpperCamelCase : Optional[int] = 256 , __UpperCamelCase : Optional[int] = 256 , __UpperCamelCase : Optional[int] = 50 , __UpperCamelCase : Optional[torch.Generator] = None , __UpperCamelCase : Optional[int] = 1 , __UpperCamelCase : Optional[str] = "pil" , __UpperCamelCase : bool = True , **__UpperCamelCase : int , ): _UpperCAmelCase = torch.randn( (batch_size, self.unet.config.in_channels, height, width) , generator=__UpperCamelCase , ) _UpperCAmelCase = decimal_to_bits(__UpperCamelCase ) * self.bit_scale _UpperCAmelCase = latents.to(self.device ) self.scheduler.set_timesteps(__UpperCamelCase ) for t in self.progress_bar(self.scheduler.timesteps ): # predict the noise residual _UpperCAmelCase = self.unet(__UpperCamelCase , __UpperCamelCase ).sample # compute the previous noisy sample x_t -> x_t-1 _UpperCAmelCase = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample _UpperCAmelCase = bits_to_decimal(__UpperCamelCase ) if output_type == "pil": _UpperCAmelCase = self.numpy_to_pil(__UpperCamelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=__UpperCamelCase )
684
import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class __SCREAMING_SNAKE_CASE ( lowercase): __SCREAMING_SNAKE_CASE : str = (UniPCMultistepScheduler,) __SCREAMING_SNAKE_CASE : Dict = (("""num_inference_steps""", 25),) def UpperCAmelCase__ ( self : str , **__UpperCamelCase : Any ): _UpperCAmelCase = { "num_train_timesteps": 1_000, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", "solver_order": 2, "solver_type": "bh2", } config.update(**__UpperCamelCase ) return config def UpperCAmelCase__ ( self : int , __UpperCamelCase : Any=0 , **__UpperCamelCase : Any ): _UpperCAmelCase = dict(self.forward_default_kwargs ) _UpperCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase ) _UpperCAmelCase = self.dummy_sample _UpperCAmelCase = 0.1 * sample _UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: _UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residuals _UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__UpperCamelCase ) _UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase ) new_scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residuals _UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] _UpperCAmelCase , _UpperCAmelCase = sample, sample for t in range(__UpperCamelCase , time_step + scheduler.config.solver_order + 1 ): _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample _UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Union[str, Any]=0 , **__UpperCamelCase : List[Any] ): _UpperCAmelCase = dict(self.forward_default_kwargs ) _UpperCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase ) _UpperCAmelCase = self.dummy_sample _UpperCAmelCase = 0.1 * sample _UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: _UpperCAmelCase = self.get_scheduler_config() _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residuals (must be after setting timesteps) _UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__UpperCamelCase ) _UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residual (must be after setting timesteps) _UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample _UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : Dict=None , **__UpperCamelCase : Optional[Any] ): if scheduler is None: _UpperCAmelCase = self.scheduler_classes[0] _UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) _UpperCAmelCase = self.scheduler_classes[0] _UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) _UpperCAmelCase = 10 _UpperCAmelCase = self.dummy_model() _UpperCAmelCase = self.dummy_sample_deter scheduler.set_timesteps(__UpperCamelCase ) for i, t in enumerate(scheduler.timesteps ): _UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample return sample def UpperCAmelCase__ ( self : List[str] ): _UpperCAmelCase = dict(self.forward_default_kwargs ) _UpperCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase ) for scheduler_class in self.scheduler_classes: _UpperCAmelCase = self.get_scheduler_config() _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) _UpperCAmelCase = self.dummy_sample _UpperCAmelCase = 0.1 * sample if num_inference_steps is not None and hasattr(__UpperCamelCase , "set_timesteps" ): scheduler.set_timesteps(__UpperCamelCase ) elif num_inference_steps is not None and not hasattr(__UpperCamelCase , "set_timesteps" ): _UpperCAmelCase = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) _UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10] _UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] _UpperCAmelCase = scheduler.timesteps[5] _UpperCAmelCase = scheduler.timesteps[6] _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def UpperCAmelCase__ ( self : Union[str, Any] ): # make sure that iterating over schedulers with same config names gives same results # for defaults _UpperCAmelCase = UniPCMultistepScheduler(**self.get_scheduler_config() ) _UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2464 ) < 1e-3 _UpperCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config ) _UpperCAmelCase = DEISMultistepScheduler.from_config(scheduler.config ) _UpperCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config ) _UpperCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config ) _UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2464 ) < 1e-3 def UpperCAmelCase__ ( self : str ): for timesteps in [25, 50, 100, 999, 1_000]: self.check_over_configs(num_train_timesteps=__UpperCamelCase ) def UpperCAmelCase__ ( self : int ): self.check_over_configs(thresholding=__UpperCamelCase ) for order in [1, 2, 3]: for solver_type in ["bh1", "bh2"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=__UpperCamelCase , prediction_type=__UpperCamelCase , sample_max_value=__UpperCamelCase , solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , ) def UpperCAmelCase__ ( self : int ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__UpperCamelCase ) def UpperCAmelCase__ ( self : int ): for solver_type in ["bh1", "bh2"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , ) _UpperCAmelCase = self.full_loop( solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , ) assert not torch.isnan(__UpperCamelCase ).any(), "Samples have nan numbers" def UpperCAmelCase__ ( self : Optional[int] ): self.check_over_configs(lower_order_final=__UpperCamelCase ) self.check_over_configs(lower_order_final=__UpperCamelCase ) def UpperCAmelCase__ ( self : Optional[Any] ): for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]: self.check_over_forward(num_inference_steps=__UpperCamelCase , time_step=0 ) def UpperCAmelCase__ ( self : List[str] ): _UpperCAmelCase = self.full_loop() _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2464 ) < 1e-3 def UpperCAmelCase__ ( self : Optional[Any] ): _UpperCAmelCase = self.full_loop(prediction_type="v_prediction" ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.1014 ) < 1e-3 def UpperCAmelCase__ ( self : Tuple ): _UpperCAmelCase = self.scheduler_classes[0] _UpperCAmelCase = self.get_scheduler_config(thresholding=__UpperCamelCase , dynamic_thresholding_ratio=0 ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) _UpperCAmelCase = 10 _UpperCAmelCase = self.dummy_model() _UpperCAmelCase = self.dummy_sample_deter.half() scheduler.set_timesteps(__UpperCamelCase ) for i, t in enumerate(scheduler.timesteps ): _UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample assert sample.dtype == torch.floataa def UpperCAmelCase__ ( self : str , **__UpperCamelCase : Optional[Any] ): for scheduler_class in self.scheduler_classes: _UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) scheduler.set_timesteps(scheduler.config.num_train_timesteps ) assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
684
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCAmelCase = { "configuration_nllb_moe": [ "NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP", "NllbMoeConfig", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ "NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST", "NllbMoeForConditionalGeneration", "NllbMoeModel", "NllbMoePreTrainedModel", "NllbMoeTop2Router", "NllbMoeSparseMLP", ] if TYPE_CHECKING: from .configuration_nllb_moe import ( NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP, NllbMoeConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nllb_moe import ( NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST, NllbMoeForConditionalGeneration, NllbMoeModel, NllbMoePreTrainedModel, NllbMoeSparseMLP, NllbMoeTopaRouter, ) else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
684
import math class __SCREAMING_SNAKE_CASE : def __init__( self : Union[str, Any] , __UpperCamelCase : List[Any]=0 ): # a graph with Node 0,1,...,N-1 _UpperCAmelCase = n _UpperCAmelCase = [ [math.inf for j in range(0 , __UpperCamelCase )] for i in range(0 , __UpperCamelCase ) ] # adjacency matrix for weight _UpperCAmelCase = [ [math.inf for j in range(0 , __UpperCamelCase )] for i in range(0 , __UpperCamelCase ) ] # dp[i][j] stores minimum distance from i to j def UpperCAmelCase__ ( self : str , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] ): _UpperCAmelCase = w def UpperCAmelCase__ ( self : Dict ): for k in range(0 , self.n ): for i in range(0 , self.n ): for j in range(0 , self.n ): _UpperCAmelCase = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] ) def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any ): return self.dp[u][v] if __name__ == "__main__": __lowerCAmelCase = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 1_0) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 1_0) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
684
1
def __lowerCamelCase ( _lowerCAmelCase ) -> list: for i in range(len(_lowerCAmelCase ) - 1 , 0 , -1 ): _UpperCAmelCase = False for j in range(_lowerCAmelCase , 0 , -1 ): if unsorted[j] < unsorted[j - 1]: _UpperCAmelCase , _UpperCAmelCase = unsorted[j - 1], unsorted[j] _UpperCAmelCase = True for j in range(_lowerCAmelCase ): if unsorted[j] > unsorted[j + 1]: _UpperCAmelCase , _UpperCAmelCase = unsorted[j + 1], unsorted[j] _UpperCAmelCase = True if not swapped: break return unsorted if __name__ == "__main__": import doctest doctest.testmod() __lowerCAmelCase = input("Enter numbers separated by a comma:\n").strip() __lowerCAmelCase = [int(item) for item in user_input.split(",")] print(F'''{cocktail_shaker_sort(unsorted) = }''')
684
import unittest import torch from diffusers import VQModel from diffusers.utils import floats_tensor, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class __SCREAMING_SNAKE_CASE ( lowercase , lowercase , unittest.TestCase): __SCREAMING_SNAKE_CASE : Dict = VQModel __SCREAMING_SNAKE_CASE : Optional[int] = """sample""" @property def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Optional[int]=(32, 32) ): _UpperCAmelCase = 4 _UpperCAmelCase = 3 _UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCamelCase ) return {"sample": image} @property def UpperCAmelCase__ ( self : Tuple ): return (3, 32, 32) @property def UpperCAmelCase__ ( self : str ): return (3, 32, 32) def UpperCAmelCase__ ( self : Dict ): _UpperCAmelCase = { "block_out_channels": [32, 64], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 3, } _UpperCAmelCase = self.dummy_input return init_dict, inputs_dict def UpperCAmelCase__ ( self : Dict ): pass def UpperCAmelCase__ ( self : str ): pass def UpperCAmelCase__ ( self : List[str] ): _UpperCAmelCase , _UpperCAmelCase = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) self.assertEqual(len(loading_info["missing_keys"] ) , 0 ) model.to(__UpperCamelCase ) _UpperCAmelCase = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def UpperCAmelCase__ ( self : List[Any] ): _UpperCAmelCase = VQModel.from_pretrained("fusing/vqgan-dummy" ) model.to(__UpperCamelCase ).eval() torch.manual_seed(0 ) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0 ) _UpperCAmelCase = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size ) _UpperCAmelCase = image.to(__UpperCamelCase ) with torch.no_grad(): _UpperCAmelCase = model(__UpperCamelCase ).sample _UpperCAmelCase = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off _UpperCAmelCase = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] ) # fmt: on self.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
684
1
import inspect import unittest class __SCREAMING_SNAKE_CASE ( unittest.TestCase): def UpperCAmelCase__ ( self : Optional[int] ): try: import diffusers # noqa: F401 except ImportError: assert False def UpperCAmelCase__ ( self : Any ): import diffusers from diffusers.dependency_versions_table import deps _UpperCAmelCase = inspect.getmembers(__UpperCamelCase , inspect.isclass ) for cls_name, cls_module in all_classes: if "dummy_" in cls_module.__module__: for backend in cls_module._backends: if backend == "k_diffusion": _UpperCAmelCase = "k-diffusion" elif backend == "invisible_watermark": _UpperCAmelCase = "invisible-watermark" assert backend in deps, F'''{backend} is not in the deps table!'''
684
import requests __lowerCAmelCase = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey=" def __lowerCamelCase ( _lowerCAmelCase ) -> None: # fetching a list of articles in json format _UpperCAmelCase = requests.get(_NEWS_API + bbc_news_api_key ).json() # each article in the list is a dict for i, article in enumerate(bbc_news_page["articles"] , 1 ): print(F'''{i}.) {article["title"]}''' ) if __name__ == "__main__": fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
684
1
import os from pickle import UnpicklingError from typing import Dict, Tuple import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict, unflatten_dict import transformers from .utils import logging __lowerCAmelCase = logging.get_logger(__name__) def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> Tuple: try: import torch # noqa: F401 except ImportError: logger.error( "Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see" " https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation" " instructions." ) raise if not is_sharded: _UpperCAmelCase = os.path.abspath(_lowerCAmelCase ) logger.info(F'''Loading PyTorch weights from {pt_path}''' ) _UpperCAmelCase = torch.load(_lowerCAmelCase , map_location="cpu" ) logger.info(F'''PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.''' ) _UpperCAmelCase = convert_pytorch_state_dict_to_flax(_lowerCAmelCase , _lowerCAmelCase ) else: # model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files _UpperCAmelCase = convert_pytorch_sharded_state_dict_to_flax(_lowerCAmelCase , _lowerCAmelCase ) return flax_state_dict def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> (Tuple[str], np.ndarray): def is_key_or_prefix_key_in_dict(_lowerCAmelCase ) -> bool: return len(set(_lowerCAmelCase ) & {key, (model_prefix,) + key} ) > 0 # layer norm _UpperCAmelCase = pt_tuple_key[:-1] + ("scale",) if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_lowerCAmelCase ): return renamed_pt_tuple_key, pt_tensor # batch norm layer mean _UpperCAmelCase = pt_tuple_key[:-1] + ("mean",) if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_lowerCAmelCase ): return renamed_pt_tuple_key, pt_tensor # batch norm layer var _UpperCAmelCase = pt_tuple_key[:-1] + ("var",) if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_lowerCAmelCase ): return renamed_pt_tuple_key, pt_tensor # embedding _UpperCAmelCase = pt_tuple_key[:-1] + ("embedding",) if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_lowerCAmelCase ): return renamed_pt_tuple_key, pt_tensor # conv layer _UpperCAmelCase = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_lowerCAmelCase ): _UpperCAmelCase = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer _UpperCAmelCase = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_lowerCAmelCase ): _UpperCAmelCase = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight _UpperCAmelCase = pt_tuple_key[:-1] + ("weight",) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias _UpperCAmelCase = pt_tuple_key[:-1] + ("bias",) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 _UpperCAmelCase = None if pt_tuple_key[-3::2] == ("parametrizations", "original0"): _UpperCAmelCase = pt_tuple_key[-2] + "_g" elif pt_tuple_key[-3::2] == ("parametrizations", "original1"): _UpperCAmelCase = pt_tuple_key[-2] + "_v" if name is not None: _UpperCAmelCase = pt_tuple_key[:-3] + (name,) return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Tuple: # convert pytorch tensor to numpy _UpperCAmelCase = {k: v.numpy() for k, v in pt_state_dict.items()} _UpperCAmelCase = flax_model.base_model_prefix # use params dict if the model contains batch norm layers if "params" in flax_model.params: _UpperCAmelCase = flax_model.params["params"] else: _UpperCAmelCase = flax_model.params _UpperCAmelCase = flatten_dict(_lowerCAmelCase ) # add batch_stats keys,values to dict if "batch_stats" in flax_model.params: _UpperCAmelCase = flatten_dict(flax_model.params["batch_stats"] ) random_flax_state_dict.update(_lowerCAmelCase ) _UpperCAmelCase = {} _UpperCAmelCase = (model_prefix not in flax_model_params) and ( model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()} ) _UpperCAmelCase = (model_prefix in flax_model_params) and ( model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): _UpperCAmelCase = tuple(pt_key.split("." ) ) # remove base model prefix if necessary _UpperCAmelCase = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: _UpperCAmelCase = pt_tuple_key[1:] # Correctly rename weight parameters _UpperCAmelCase , _UpperCAmelCase = rename_key_and_reshape_tensor( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # add model prefix if necessary _UpperCAmelCase = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: _UpperCAmelCase = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1] or "var" in flax_key[-1]: _UpperCAmelCase = jnp.asarray(_lowerCAmelCase ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(_lowerCAmelCase , _lowerCAmelCase ) continue # also add unexpected weight so that warning is thrown _UpperCAmelCase = jnp.asarray(_lowerCAmelCase ) else: # also add unexpected weight so that warning is thrown _UpperCAmelCase = jnp.asarray(_lowerCAmelCase ) return unflatten_dict(_lowerCAmelCase ) def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> List[str]: import torch # Load the index _UpperCAmelCase = {} for shard_file in shard_filenames: # load using msgpack utils _UpperCAmelCase = torch.load(_lowerCAmelCase ) _UpperCAmelCase = {k: v.numpy() for k, v in pt_state_dict.items()} _UpperCAmelCase = flax_model.base_model_prefix # use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict if "batch_stats" in flax_model.params: _UpperCAmelCase = flax_model.params["params"] _UpperCAmelCase = flatten_dict(_lowerCAmelCase ) random_flax_state_dict.update(flatten_dict(flax_model.params["batch_stats"] ) ) else: _UpperCAmelCase = flax_model.params _UpperCAmelCase = flatten_dict(_lowerCAmelCase ) _UpperCAmelCase = (model_prefix not in flax_model_params) and ( model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()} ) _UpperCAmelCase = (model_prefix in flax_model_params) and ( model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): _UpperCAmelCase = tuple(pt_key.split("." ) ) # remove base model prefix if necessary _UpperCAmelCase = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: _UpperCAmelCase = pt_tuple_key[1:] # Correctly rename weight parameters _UpperCAmelCase , _UpperCAmelCase = rename_key_and_reshape_tensor( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # add model prefix if necessary _UpperCAmelCase = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: _UpperCAmelCase = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1]: _UpperCAmelCase = jnp.asarray(_lowerCAmelCase ) continue if "var" in flax_key[-1]: _UpperCAmelCase = jnp.asarray(_lowerCAmelCase ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(_lowerCAmelCase , _lowerCAmelCase ) continue # also add unexpected weight so that warning is thrown _UpperCAmelCase = jnp.asarray(_lowerCAmelCase ) else: # also add unexpected weight so that warning is thrown _UpperCAmelCase = jnp.asarray(_lowerCAmelCase ) return unflatten_dict(_lowerCAmelCase ) def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> List[str]: _UpperCAmelCase = os.path.abspath(_lowerCAmelCase ) logger.info(F'''Loading Flax weights from {flax_checkpoint_path}''' ) # import correct flax class _UpperCAmelCase = getattr(_lowerCAmelCase , "Flax" + model.__class__.__name__ ) # load flax weight dict with open(_lowerCAmelCase , "rb" ) as state_f: try: _UpperCAmelCase = from_bytes(_lowerCAmelCase , state_f.read() ) except UnpicklingError: raise EnvironmentError(F'''Unable to convert {flax_checkpoint_path} to Flax deserializable object. ''' ) return load_flax_weights_in_pytorch_model(_lowerCAmelCase , _lowerCAmelCase ) def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Tuple: try: import torch # noqa: F401 except ImportError: logger.error( "Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see" " https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation" " instructions." ) raise # check if we have bf16 weights _UpperCAmelCase = flatten_dict(jax.tree_util.tree_map(lambda _lowerCAmelCase : x.dtype == jnp.bfloataa , _lowerCAmelCase ) ).values() if any(_lowerCAmelCase ): # convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( "Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` " "before loading those in PyTorch model." ) _UpperCAmelCase = jax.tree_util.tree_map( lambda _lowerCAmelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _lowerCAmelCase ) _UpperCAmelCase = flatten_dict(_lowerCAmelCase ) _UpperCAmelCase = pt_model.state_dict() _UpperCAmelCase = (pt_model.base_model_prefix in flax_state) and ( pt_model.base_model_prefix not in {k.split("." )[0] for k in pt_model_dict.keys()} ) _UpperCAmelCase = (pt_model.base_model_prefix not in flax_state) and ( pt_model.base_model_prefix in {k.split("." )[0] for k in pt_model_dict.keys()} ) # keep track of unexpected & missing keys _UpperCAmelCase = [] _UpperCAmelCase = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): _UpperCAmelCase = flax_key_tuple[0] == pt_model.base_model_prefix _UpperCAmelCase = ".".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict # adapt flax_key to prepare for loading from/to base model only if load_model_with_head_into_base_model and has_base_model_prefix: _UpperCAmelCase = flax_key_tuple[1:] elif load_base_model_into_model_with_head and require_base_model_prefix: _UpperCAmelCase = (pt_model.base_model_prefix,) + flax_key_tuple # rename flax weights to PyTorch format if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_lowerCAmelCase ) not in pt_model_dict: # conv layer _UpperCAmelCase = flax_key_tuple[:-1] + ("weight",) _UpperCAmelCase = jnp.transpose(_lowerCAmelCase , (3, 2, 0, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(_lowerCAmelCase ) not in pt_model_dict: # linear layer _UpperCAmelCase = flax_key_tuple[:-1] + ("weight",) _UpperCAmelCase = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: _UpperCAmelCase = flax_key_tuple[:-1] + ("weight",) # adding batch stats from flax batch norm to pt elif "mean" in flax_key_tuple[-1]: _UpperCAmelCase = flax_key_tuple[:-1] + ("running_mean",) elif "var" in flax_key_tuple[-1]: _UpperCAmelCase = flax_key_tuple[:-1] + ("running_var",) if "batch_stats" in flax_state: _UpperCAmelCase = ".".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header else: _UpperCAmelCase = ".".join(_lowerCAmelCase ) # We also need to look at `pt_model_dict` and see if there are keys requiring further transformation. _UpperCAmelCase = {} # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 for key in pt_model_dict: _UpperCAmelCase = key.split("." ) _UpperCAmelCase = None if key_components[-3::2] == ["parametrizations", "original0"]: _UpperCAmelCase = key_components[-2] + "_g" elif key_components[-3::2] == ["parametrizations", "original1"]: _UpperCAmelCase = key_components[-2] + "_v" if name is not None: _UpperCAmelCase = key_components[:-3] + [name] _UpperCAmelCase = ".".join(_lowerCAmelCase ) _UpperCAmelCase = key if flax_key in special_pt_names: _UpperCAmelCase = special_pt_names[flax_key] if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( F'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected ''' F'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) else: # add weight to pytorch dict _UpperCAmelCase = np.asarray(_lowerCAmelCase ) if not isinstance(_lowerCAmelCase , np.ndarray ) else flax_tensor _UpperCAmelCase = torch.from_numpy(_lowerCAmelCase ) # remove from missing keys missing_keys.remove(_lowerCAmelCase ) else: # weight is not expected by PyTorch model unexpected_keys.append(_lowerCAmelCase ) pt_model.load_state_dict(_lowerCAmelCase ) # re-transform missing_keys to list _UpperCAmelCase = list(_lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: logger.warning( "Some weights of the Flax model were not used when initializing the PyTorch model" F''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing''' F''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture''' " (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This" F''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect''' " to be exactly identical (e.g. initializing a BertForSequenceClassification model from a" " FlaxBertForSequenceClassification model)." ) else: logger.warning(F'''All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n''' ) if len(_lowerCAmelCase ) > 0: logger.warning( F'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly''' F''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to''' " use it for predictions and inference." ) else: logger.warning( F'''All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n''' "If your task is similar to the task the model of the checkpoint was trained on, " F'''you can already use {pt_model.__class__.__name__} for predictions without further training.''' ) return pt_model
684
import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class __SCREAMING_SNAKE_CASE ( unittest.TestCase): def UpperCAmelCase__ ( self : Any ): _UpperCAmelCase = 10 def UpperCAmelCase__ ( self : Optional[int] ): _UpperCAmelCase = [1, 2, 3, 4] _UpperCAmelCase = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase ) def UpperCAmelCase__ ( self : List[Any] ): _UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] _UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase ) def UpperCAmelCase__ ( self : int ): _UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] _UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(__UpperCamelCase , self.block_size , 0 ) , __UpperCamelCase ) def UpperCAmelCase__ ( self : Union[str, Any] ): _UpperCAmelCase = "It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this." _UpperCAmelCase , _UpperCAmelCase = process_story(__UpperCamelCase ) self.assertEqual(__UpperCamelCase , [] ) def UpperCAmelCase__ ( self : str ): _UpperCAmelCase = "" _UpperCAmelCase , _UpperCAmelCase = process_story(__UpperCamelCase ) self.assertEqual(__UpperCamelCase , [] ) self.assertEqual(__UpperCamelCase , [] ) def UpperCAmelCase__ ( self : Optional[Any] ): _UpperCAmelCase = ( "It was the year of Our Lord one thousand seven hundred and " "seventy-five\n\nSpiritual revelations were conceded to England " "at that favoured period, as at this.\n@highlight\n\nIt was the best of times" ) _UpperCAmelCase , _UpperCAmelCase = process_story(__UpperCamelCase ) _UpperCAmelCase = [ "It was the year of Our Lord one thousand seven hundred and seventy-five.", "Spiritual revelations were conceded to England at that favoured period, as at this.", ] self.assertEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = ["It was the best of times."] self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def UpperCAmelCase__ ( self : Dict ): _UpperCAmelCase = torch.tensor([1, 2, 3, 4] ) _UpperCAmelCase = torch.tensor([1, 1, 1, 1] ) np.testing.assert_array_equal(build_mask(__UpperCamelCase , 0 ).numpy() , expected.numpy() ) def UpperCAmelCase__ ( self : Optional[Any] ): _UpperCAmelCase = torch.tensor([1, 2, 3, 4, 23, 23, 23] ) _UpperCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(__UpperCamelCase , 23 ).numpy() , expected.numpy() ) def UpperCAmelCase__ ( self : Optional[int] ): _UpperCAmelCase = torch.tensor([8, 2, 3, 4, 1, 1, 1] ) _UpperCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(__UpperCamelCase , 1 ).numpy() , expected.numpy() ) def UpperCAmelCase__ ( self : Dict ): _UpperCAmelCase = 101 _UpperCAmelCase = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] ) _UpperCAmelCase = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] ) _UpperCAmelCase = compute_token_type_ids(__UpperCamelCase , __UpperCamelCase ) np.testing.assert_array_equal(__UpperCamelCase , __UpperCamelCase )
684
1
from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass __lowerCAmelCase = (3, 9, -1_1, 0, 7, 5, 1, -1) __lowerCAmelCase = (4, 6, 2, 0, 8, 1_0, 3, -2) @dataclass class __SCREAMING_SNAKE_CASE : __SCREAMING_SNAKE_CASE : int __SCREAMING_SNAKE_CASE : Node | None class __SCREAMING_SNAKE_CASE : def __init__( self : List[Any] , __UpperCamelCase : Iterable[int] ): _UpperCAmelCase = None for i in sorted(__UpperCamelCase , reverse=__UpperCamelCase ): _UpperCAmelCase = Node(__UpperCamelCase , self.head ) def __iter__( self : str ): _UpperCAmelCase = self.head while node: yield node.data _UpperCAmelCase = node.next_node def __len__( self : Union[str, Any] ): return sum(1 for _ in self ) def __str__( self : Tuple ): return " -> ".join([str(__UpperCamelCase ) for node in self] ) def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> SortedLinkedList: return SortedLinkedList(list(_lowerCAmelCase ) + list(_lowerCAmelCase ) ) if __name__ == "__main__": import doctest doctest.testmod() __lowerCAmelCase = SortedLinkedList print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
684
from __future__ import annotations from collections import namedtuple def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> tuple: _UpperCAmelCase = namedtuple("result" , "name value" ) if (voltage, current, power).count(0 ) != 1: raise ValueError("Only one argument must be 0" ) elif power < 0: raise ValueError( "Power cannot be negative in any electrical/electronics system" ) elif voltage == 0: return result("voltage" , power / current ) elif current == 0: return result("current" , power / voltage ) elif power == 0: return result("power" , float(round(abs(voltage * current ) , 2 ) ) ) else: raise ValueError("Exactly one argument must be 0" ) if __name__ == "__main__": import doctest doctest.testmod()
684
1
__lowerCAmelCase = 9.8_06_65 def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = g ) -> float: if fluid_density <= 0: raise ValueError("Impossible fluid density" ) if volume < 0: raise ValueError("Impossible Object volume" ) if gravity <= 0: raise ValueError("Impossible Gravity" ) return fluid_density * gravity * volume if __name__ == "__main__": import doctest # run doctest doctest.testmod()
684
import argparse import math import traceback import dateutil.parser as date_parser import requests def __lowerCamelCase ( _lowerCAmelCase ) -> Any: _UpperCAmelCase = {} _UpperCAmelCase = job["started_at"] _UpperCAmelCase = job["completed_at"] _UpperCAmelCase = date_parser.parse(_lowerCAmelCase ) _UpperCAmelCase = date_parser.parse(_lowerCAmelCase ) _UpperCAmelCase = round((end_datetime - start_datetime).total_seconds() / 60.0 ) _UpperCAmelCase = start _UpperCAmelCase = end _UpperCAmelCase = duration_in_min return job_info def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=None ) -> str: _UpperCAmelCase = None if token is not None: _UpperCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''} _UpperCAmelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100''' _UpperCAmelCase = requests.get(_lowerCAmelCase , headers=_lowerCAmelCase ).json() _UpperCAmelCase = {} try: job_time.update({job["name"]: extract_time_from_single_job(_lowerCAmelCase ) for job in result["jobs"]} ) _UpperCAmelCase = math.ceil((result["total_count"] - 100) / 100 ) for i in range(_lowerCAmelCase ): _UpperCAmelCase = requests.get(url + F'''&page={i + 2}''' , headers=_lowerCAmelCase ).json() job_time.update({job["name"]: extract_time_from_single_job(_lowerCAmelCase ) for job in result["jobs"]} ) return job_time except Exception: print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' ) return {} if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.") __lowerCAmelCase = parser.parse_args() __lowerCAmelCase = get_job_time(args.workflow_run_id) __lowerCAmelCase = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True)) for k, v in job_time.items(): print(F'''{k}: {v["duration"]}''')
684
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, is_vision_available, ) __lowerCAmelCase = {"processing_layoutxlm": ["LayoutXLMProcessor"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = ["LayoutXLMTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = ["LayoutXLMTokenizerFast"] if TYPE_CHECKING: from .processing_layoutxlm import LayoutXLMProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm import LayoutXLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
684
import argparse import math import os from copy import deepcopy import torch from audio_diffusion.models import DiffusionAttnUnetaD from diffusion import sampling from torch import nn from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel __lowerCAmelCase = { "gwf-440k": { "url": "https://model-server.zqevans2.workers.dev/gwf-440k.ckpt", "sample_rate": 4_8_0_0_0, "sample_size": 6_5_5_3_6, }, "jmann-small-190k": { "url": "https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt", "sample_rate": 4_8_0_0_0, "sample_size": 6_5_5_3_6, }, "jmann-large-580k": { "url": "https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt", "sample_rate": 4_8_0_0_0, "sample_size": 1_3_1_0_7_2, }, "maestro-uncond-150k": { "url": "https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt", "sample_rate": 1_6_0_0_0, "sample_size": 6_5_5_3_6, }, "unlocked-uncond-250k": { "url": "https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt", "sample_rate": 1_6_0_0_0, "sample_size": 6_5_5_3_6, }, "honk-140k": { "url": "https://model-server.zqevans2.workers.dev/honk-140k.ckpt", "sample_rate": 1_6_0_0_0, "sample_size": 6_5_5_3_6, }, } def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: return torch.atana(_lowerCAmelCase , _lowerCAmelCase ) / math.pi * 2 def __lowerCamelCase ( _lowerCAmelCase ) -> Union[str, Any]: _UpperCAmelCase = torch.sin(t * math.pi / 2 ) ** 2 _UpperCAmelCase = (1 - sigma**2) ** 0.5 return alpha_sigma_to_t(_lowerCAmelCase , _lowerCAmelCase ) class __SCREAMING_SNAKE_CASE ( lowercase): pass class __SCREAMING_SNAKE_CASE ( nn.Module): def __init__( self : str , __UpperCamelCase : Optional[int] ): super().__init__() _UpperCAmelCase = DiffusionAttnUnetaD(__UpperCamelCase , n_attn_layers=4 ) _UpperCAmelCase = deepcopy(self.diffusion ) _UpperCAmelCase = torch.quasirandom.SobolEngine(1 , scramble=__UpperCamelCase ) def __lowerCamelCase ( _lowerCAmelCase ) -> int: _UpperCAmelCase = MODELS_MAP[model_name]["url"] os.system(F'''wget {url} ./''' ) return F'''./{model_name}.ckpt''' __lowerCAmelCase = { "1": "resnets.0", "2": "attentions.0", "3": "resnets.1", "4": "attentions.1", "5": "resnets.2", "6": "attentions.2", } __lowerCAmelCase = { "8": "resnets.0", "9": "attentions.0", "10": "resnets.1", "11": "attentions.1", "12": "resnets.2", "13": "attentions.2", } __lowerCAmelCase = { "1": "resnets.0", "2": "attentions.0", "3": "resnets.1", "4": "attentions.1", "5": "resnets.2", "6": "attentions.2", "8": "resnets.3", "9": "attentions.3", "10": "resnets.4", "11": "attentions.4", "12": "resnets.5", "13": "attentions.5", } __lowerCAmelCase = { "0": "resnets.0", "1": "resnets.1", "2": "resnets.2", "4": "resnets.0", "5": "resnets.1", "6": "resnets.2", } __lowerCAmelCase = { "skip": "conv_skip", "main.0": "conv_1", "main.1": "group_norm_1", "main.3": "conv_2", "main.4": "group_norm_2", } __lowerCAmelCase = { "norm": "group_norm", "qkv_proj": ["query", "key", "value"], "out_proj": ["proj_attn"], } def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]: if name.startswith("skip" ): return name.replace("skip" , RES_CONV_MAP["skip"] ) # name has to be of format main.{digit} if not name.startswith("main." ): raise ValueError(F'''ResConvBlock error with {name}''' ) return name.replace(name[:6] , RES_CONV_MAP[name[:6]] ) def __lowerCamelCase ( _lowerCAmelCase ) -> Optional[Any]: for key, value in ATTN_MAP.items(): if name.startswith(_lowerCAmelCase ) and not isinstance(_lowerCAmelCase , _lowerCAmelCase ): return name.replace(_lowerCAmelCase , _lowerCAmelCase ) elif name.startswith(_lowerCAmelCase ): return [name.replace(_lowerCAmelCase , _lowerCAmelCase ) for v in value] raise ValueError(F'''Attn error with {name}''' ) def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=13 ) -> List[Any]: _UpperCAmelCase = input_string if string.split("." )[0] == "timestep_embed": return string.replace("timestep_embed" , "time_proj" ) _UpperCAmelCase = 0 if string.startswith("net.3." ): depth += 1 _UpperCAmelCase = string[6:] elif string.startswith("net." ): _UpperCAmelCase = string[4:] while string.startswith("main.7." ): depth += 1 _UpperCAmelCase = string[7:] if string.startswith("main." ): _UpperCAmelCase = string[5:] # mid block if string[:2].isdigit(): _UpperCAmelCase = string[:2] _UpperCAmelCase = string[2:] else: _UpperCAmelCase = string[0] _UpperCAmelCase = string[1:] if depth == max_depth: _UpperCAmelCase = MID_NUM_TO_LAYER[layer_num] _UpperCAmelCase = "mid_block" elif depth > 0 and int(_lowerCAmelCase ) < 7: _UpperCAmelCase = DOWN_NUM_TO_LAYER[layer_num] _UpperCAmelCase = F'''down_blocks.{depth}''' elif depth > 0 and int(_lowerCAmelCase ) > 7: _UpperCAmelCase = UP_NUM_TO_LAYER[layer_num] _UpperCAmelCase = F'''up_blocks.{max_depth - depth - 1}''' elif depth == 0: _UpperCAmelCase = DEPTH_0_TO_LAYER[layer_num] _UpperCAmelCase = F'''up_blocks.{max_depth - 1}''' if int(_lowerCAmelCase ) > 3 else "down_blocks.0" if not string_left.startswith("." ): raise ValueError(F'''Naming error with {input_string} and string_left: {string_left}.''' ) _UpperCAmelCase = string_left[1:] if "resnets" in new_layer: _UpperCAmelCase = convert_resconv_naming(_lowerCAmelCase ) elif "attentions" in new_layer: _UpperCAmelCase = convert_attn_naming(_lowerCAmelCase ) _UpperCAmelCase = new_string_left if not isinstance(_lowerCAmelCase , _lowerCAmelCase ): _UpperCAmelCase = prefix + "." + new_layer + "." + string_left else: _UpperCAmelCase = [prefix + "." + new_layer + "." + s for s in string_left] return new_string def __lowerCamelCase ( _lowerCAmelCase ) -> Optional[int]: _UpperCAmelCase = {} for k, v in state_dict.items(): if k.endswith("kernel" ): # up- and downsample layers, don't have trainable weights continue _UpperCAmelCase = rename(_lowerCAmelCase ) # check if we need to transform from Conv => Linear for attention if isinstance(_lowerCAmelCase , _lowerCAmelCase ): _UpperCAmelCase = transform_conv_attns(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) else: _UpperCAmelCase = v return new_state_dict def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]: if len(_lowerCAmelCase ) == 1: if len(v.shape ) == 3: # weight _UpperCAmelCase = v[:, :, 0] else: # bias _UpperCAmelCase = v else: # qkv matrices _UpperCAmelCase = v.shape[0] _UpperCAmelCase = trippled_shape // 3 for i in range(3 ): if len(v.shape ) == 3: _UpperCAmelCase = v[i * single_shape : (i + 1) * single_shape, :, 0] else: _UpperCAmelCase = v[i * single_shape : (i + 1) * single_shape] return new_state_dict def __lowerCamelCase ( _lowerCAmelCase ) -> Tuple: _UpperCAmelCase = torch.device("cuda" if torch.cuda.is_available() else "cpu" ) _UpperCAmelCase = args.model_path.split("/" )[-1].split("." )[0] if not os.path.isfile(args.model_path ): assert ( model_name == args.model_path ), F'''Make sure to provide one of the official model names {MODELS_MAP.keys()}''' _UpperCAmelCase = download(_lowerCAmelCase ) _UpperCAmelCase = MODELS_MAP[model_name]["sample_rate"] _UpperCAmelCase = MODELS_MAP[model_name]["sample_size"] _UpperCAmelCase = Object() _UpperCAmelCase = sample_size _UpperCAmelCase = sample_rate _UpperCAmelCase = 0 _UpperCAmelCase = UNetaDModel(sample_size=_lowerCAmelCase , sample_rate=_lowerCAmelCase ) _UpperCAmelCase = diffusers_model.state_dict() _UpperCAmelCase = DiffusionUncond(_lowerCAmelCase ) orig_model.load_state_dict(torch.load(args.model_path , map_location=_lowerCAmelCase )["state_dict"] ) _UpperCAmelCase = orig_model.diffusion_ema.eval() _UpperCAmelCase = orig_model.state_dict() _UpperCAmelCase = rename_orig_weights(_lowerCAmelCase ) _UpperCAmelCase = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() ) _UpperCAmelCase = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() ) assert len(_lowerCAmelCase ) == 0, F'''Problem with {renamed_minus_diffusers}''' assert all(k.endswith("kernel" ) for k in list(_lowerCAmelCase ) ), F'''Problem with {diffusers_minus_renamed}''' for key, value in renamed_state_dict.items(): assert ( diffusers_state_dict[key].squeeze().shape == value.squeeze().shape ), F'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}''' if key == "time_proj.weight": _UpperCAmelCase = value.squeeze() _UpperCAmelCase = value diffusers_model.load_state_dict(_lowerCAmelCase ) _UpperCAmelCase = 100 _UpperCAmelCase = 33 _UpperCAmelCase = IPNDMScheduler(num_train_timesteps=_lowerCAmelCase ) _UpperCAmelCase = torch.manual_seed(_lowerCAmelCase ) _UpperCAmelCase = torch.randn([1, 2, config.sample_size] , generator=_lowerCAmelCase ).to(_lowerCAmelCase ) _UpperCAmelCase = torch.linspace(1 , 0 , steps + 1 , device=_lowerCAmelCase )[:-1] _UpperCAmelCase = get_crash_schedule(_lowerCAmelCase ) _UpperCAmelCase = DanceDiffusionPipeline(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase ) _UpperCAmelCase = torch.manual_seed(33 ) _UpperCAmelCase = pipe(num_inference_steps=_lowerCAmelCase , generator=_lowerCAmelCase ).audios _UpperCAmelCase = sampling.iplms_sample(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , {} ) _UpperCAmelCase = generated.clamp(-1 , 1 ) _UpperCAmelCase = (generated - audio).abs().sum() _UpperCAmelCase = (generated - audio).abs().max() if args.save: pipe.save_pretrained(args.checkpoint_path ) print("Diff sum" , _lowerCAmelCase ) print("Diff max" , _lowerCAmelCase ) assert diff_max < 1E-3, F'''Diff max: {diff_max} is too much :-/''' print(F'''Conversion for {model_name} successful!''' ) if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.") parser.add_argument( "--save", default=True, type=bool, required=False, help="Whether to save the converted model or not." ) parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.") __lowerCAmelCase = parser.parse_args() main(args)
684
1
import warnings from ...utils import logging from .image_processing_layoutlmva import LayoutLMvaImageProcessor __lowerCAmelCase = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( lowercase): def __init__( self : Tuple , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : Optional[Any] ): warnings.warn( "The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use LayoutLMv2ImageProcessor instead." , __UpperCamelCase , ) super().__init__(*__UpperCamelCase , **__UpperCamelCase )
684
import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 __lowerCAmelCase = get_tests_dir("fixtures") class __SCREAMING_SNAKE_CASE ( unittest.TestCase): def UpperCAmelCase__ ( self : Dict ): # A mock response for an HTTP head request to emulate server down _UpperCAmelCase = mock.Mock() _UpperCAmelCase = 500 _UpperCAmelCase = {} _UpperCAmelCase = HTTPError _UpperCAmelCase = {} # Download this model to make sure it's in the cache. _UpperCAmelCase = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("requests.Session.request" , return_value=__UpperCamelCase ) as mock_head: _UpperCAmelCase = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" ) # This check we did call the fake head request mock_head.assert_called() def UpperCAmelCase__ ( self : List[Any] ): # This test is for deprecated behavior and can be removed in v5 _UpperCAmelCase = ViTImageProcessor.from_pretrained( "https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json" ) def UpperCAmelCase__ ( self : Dict ): with self.assertRaises(__UpperCamelCase ): # config is in subfolder, the following should not work without specifying the subfolder _UpperCAmelCase = AutoImageProcessor.from_pretrained("hf-internal-testing/stable-diffusion-all-variants" ) _UpperCAmelCase = AutoImageProcessor.from_pretrained( "hf-internal-testing/stable-diffusion-all-variants" , subfolder="feature_extractor" ) self.assertIsNotNone(__UpperCamelCase ) @is_staging_test class __SCREAMING_SNAKE_CASE ( unittest.TestCase): @classmethod def UpperCAmelCase__ ( cls : str ): _UpperCAmelCase = TOKEN HfFolder.save_token(__UpperCamelCase ) @classmethod def UpperCAmelCase__ ( cls : Optional[Any] ): try: delete_repo(token=cls._token , repo_id="test-image-processor" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="valid_org/test-image-processor-org" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="test-dynamic-image-processor" ) except HTTPError: pass def UpperCAmelCase__ ( self : Union[str, Any] ): _UpperCAmelCase = ViTImageProcessor.from_pretrained(__UpperCamelCase ) image_processor.push_to_hub("test-image-processor" , use_auth_token=self._token ) _UpperCAmelCase = ViTImageProcessor.from_pretrained(F'''{USER}/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) ) # Reset repo delete_repo(token=self._token , repo_id="test-image-processor" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( __UpperCamelCase , repo_id="test-image-processor" , push_to_hub=__UpperCamelCase , use_auth_token=self._token ) _UpperCAmelCase = ViTImageProcessor.from_pretrained(F'''{USER}/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) ) def UpperCAmelCase__ ( self : Union[str, Any] ): _UpperCAmelCase = ViTImageProcessor.from_pretrained(__UpperCamelCase ) image_processor.push_to_hub("valid_org/test-image-processor" , use_auth_token=self._token ) _UpperCAmelCase = ViTImageProcessor.from_pretrained("valid_org/test-image-processor" ) for k, v in image_processor.__dict__.items(): self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) ) # Reset repo delete_repo(token=self._token , repo_id="valid_org/test-image-processor" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( __UpperCamelCase , repo_id="valid_org/test-image-processor-org" , push_to_hub=__UpperCamelCase , use_auth_token=self._token ) _UpperCAmelCase = ViTImageProcessor.from_pretrained("valid_org/test-image-processor-org" ) for k, v in image_processor.__dict__.items(): self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) ) def UpperCAmelCase__ ( self : int ): CustomImageProcessor.register_for_auto_class() _UpperCAmelCase = CustomImageProcessor.from_pretrained(__UpperCamelCase ) image_processor.push_to_hub("test-dynamic-image-processor" , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map , {"AutoImageProcessor": "custom_image_processing.CustomImageProcessor"} , ) _UpperCAmelCase = AutoImageProcessor.from_pretrained( F'''{USER}/test-dynamic-image-processor''' , trust_remote_code=__UpperCamelCase ) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__ , "CustomImageProcessor" )
684
1
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> list: _UpperCAmelCase = len(_lowerCAmelCase ) _UpperCAmelCase = [] for i in range(len(_lowerCAmelCase ) - pat_len + 1 ): _UpperCAmelCase = True for j in range(_lowerCAmelCase ): if s[i + j] != pattern[j]: _UpperCAmelCase = False break if match_found: position.append(_lowerCAmelCase ) return position if __name__ == "__main__": assert naive_pattern_search("ABCDEFG", "DE") == [3] print(naive_pattern_search("ABAAABCDBBABCDDEBCABC", "ABC"))
684
from operator import delitem, getitem, setitem import pytest from data_structures.hashing.hash_map import HashMap def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]: return getitem, k def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]: return setitem, k, v def __lowerCamelCase ( _lowerCAmelCase ) -> str: return delitem, k def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase ) -> Optional[int]: try: return fun(_lowerCAmelCase , *_lowerCAmelCase ), None except Exception as e: return None, e __lowerCAmelCase = ( _set("key_a", "val_a"), _set("key_b", "val_b"), ) __lowerCAmelCase = [ _set("key_a", "val_a"), _set("key_a", "val_b"), ] __lowerCAmelCase = [ _set("key_a", "val_a"), _set("key_b", "val_b"), _del("key_a"), _del("key_b"), _set("key_a", "val_a"), _del("key_a"), ] __lowerCAmelCase = [ _get("key_a"), _del("key_a"), _set("key_a", "val_a"), _del("key_a"), _del("key_a"), _get("key_a"), ] __lowerCAmelCase = [ *[_set(x, x) for x in range(5)], # guaranteed upsize ] __lowerCAmelCase = [ *[_set(x, x) for x in range(5)], # guaranteed upsize *[_del(x) for x in range(5)], _set("key_a", "val_b"), ] @pytest.mark.parametrize( "operations" , ( pytest.param(_add_items , id="add items" ), pytest.param(_overwrite_items , id="overwrite items" ), pytest.param(_delete_items , id="delete items" ), pytest.param(_access_absent_items , id="access absent items" ), pytest.param(_add_with_resize_up , id="add with resize up" ), pytest.param(_add_with_resize_down , id="add with resize down" ), ) , ) def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]: _UpperCAmelCase = HashMap(initial_block_size=4 ) _UpperCAmelCase = {} for _, (fun, *args) in enumerate(_lowerCAmelCase ): _UpperCAmelCase , _UpperCAmelCase = _run_operation(_lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase ) _UpperCAmelCase , _UpperCAmelCase = _run_operation(_lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase ) assert my_res == py_res assert str(_lowerCAmelCase ) == str(_lowerCAmelCase ) assert set(_lowerCAmelCase ) == set(_lowerCAmelCase ) assert len(_lowerCAmelCase ) == len(_lowerCAmelCase ) assert set(my.items() ) == set(py.items() ) def __lowerCamelCase ( ) -> List[Any]: def is_public(_lowerCAmelCase ) -> bool: return not name.startswith("_" ) _UpperCAmelCase = {name for name in dir({} ) if is_public(_lowerCAmelCase )} _UpperCAmelCase = {name for name in dir(HashMap() ) if is_public(_lowerCAmelCase )} assert dict_public_names > hash_public_names
684
1
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..bit import BitConfig __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = { "Intel/dpt-large": "https://huggingface.co/Intel/dpt-large/resolve/main/config.json", # See all DPT models at https://huggingface.co/models?filter=dpt } class __SCREAMING_SNAKE_CASE ( lowercase): __SCREAMING_SNAKE_CASE : List[str] = """dpt""" def __init__( self : List[str] , __UpperCamelCase : Union[str, Any]=768 , __UpperCamelCase : Union[str, Any]=12 , __UpperCamelCase : Optional[int]=12 , __UpperCamelCase : List[str]=3_072 , __UpperCamelCase : int="gelu" , __UpperCamelCase : Dict=0.0 , __UpperCamelCase : Any=0.0 , __UpperCamelCase : List[Any]=0.02 , __UpperCamelCase : str=1e-1_2 , __UpperCamelCase : Dict=384 , __UpperCamelCase : Tuple=16 , __UpperCamelCase : Any=3 , __UpperCamelCase : Optional[Any]=False , __UpperCamelCase : Tuple=True , __UpperCamelCase : List[Any]=[2, 5, 8, 11] , __UpperCamelCase : Dict="project" , __UpperCamelCase : int=[4, 2, 1, 0.5] , __UpperCamelCase : str=[96, 192, 384, 768] , __UpperCamelCase : Dict=256 , __UpperCamelCase : int=-1 , __UpperCamelCase : str=False , __UpperCamelCase : Any=True , __UpperCamelCase : Any=0.4 , __UpperCamelCase : Dict=255 , __UpperCamelCase : Dict=0.1 , __UpperCamelCase : List[str]=[1, 1_024, 24, 24] , __UpperCamelCase : Tuple=[0, 1] , __UpperCamelCase : List[Any]=None , **__UpperCamelCase : Tuple , ): super().__init__(**__UpperCamelCase ) _UpperCAmelCase = hidden_size _UpperCAmelCase = is_hybrid if self.is_hybrid: if backbone_config is None: logger.info("Initializing the config with a `BiT` backbone." ) _UpperCAmelCase = { "global_padding": "same", "layer_type": "bottleneck", "depths": [3, 4, 9], "out_features": ["stage1", "stage2", "stage3"], "embedding_dynamic_padding": True, } _UpperCAmelCase = BitConfig(**__UpperCamelCase ) elif isinstance(__UpperCamelCase , __UpperCamelCase ): logger.info("Initializing the config with a `BiT` backbone." ) _UpperCAmelCase = BitConfig(**__UpperCamelCase ) elif isinstance(__UpperCamelCase , __UpperCamelCase ): _UpperCAmelCase = backbone_config else: raise ValueError( F'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' ) _UpperCAmelCase = backbone_featmap_shape _UpperCAmelCase = neck_ignore_stages if readout_type != "project": raise ValueError("Readout type must be 'project' when using `DPT-hybrid` mode." ) else: _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = [] _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = image_size _UpperCAmelCase = patch_size _UpperCAmelCase = num_channels _UpperCAmelCase = qkv_bias _UpperCAmelCase = backbone_out_indices if readout_type not in ["ignore", "add", "project"]: raise ValueError("Readout_type must be one of ['ignore', 'add', 'project']" ) _UpperCAmelCase = readout_type _UpperCAmelCase = reassemble_factors _UpperCAmelCase = neck_hidden_sizes _UpperCAmelCase = fusion_hidden_size _UpperCAmelCase = head_in_index _UpperCAmelCase = use_batch_norm_in_fusion_residual # auxiliary head attributes (semantic segmentation) _UpperCAmelCase = use_auxiliary_head _UpperCAmelCase = auxiliary_loss_weight _UpperCAmelCase = semantic_loss_ignore_index _UpperCAmelCase = semantic_classifier_dropout def UpperCAmelCase__ ( self : Any ): _UpperCAmelCase = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: _UpperCAmelCase = self.backbone_config.to_dict() _UpperCAmelCase = self.__class__.model_type return output
684
def __lowerCamelCase ( _lowerCAmelCase ) -> list: _UpperCAmelCase = len(_lowerCAmelCase ) for i in range(1 , _lowerCAmelCase ): _UpperCAmelCase = collection[i] _UpperCAmelCase = 0 _UpperCAmelCase = i - 1 while low <= high: _UpperCAmelCase = (low + high) // 2 if val < collection[mid]: _UpperCAmelCase = mid - 1 else: _UpperCAmelCase = mid + 1 for j in range(_lowerCAmelCase , _lowerCAmelCase , -1 ): _UpperCAmelCase = collection[j - 1] _UpperCAmelCase = val return collection if __name__ == "__main__": __lowerCAmelCase = input("Enter numbers separated by a comma:\n").strip() __lowerCAmelCase = [int(item) for item in user_input.split(",")] print(binary_insertion_sort(unsorted))
684
1
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..utils import cached_file # docstyle-ignore __lowerCAmelCase = "\nHuman: <<task>>\n\nAssistant: " __lowerCAmelCase = "huggingface-tools/default-prompts" __lowerCAmelCase = {"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"} def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase="run" ) -> Union[str, Any]: if prompt_or_repo_id is None: _UpperCAmelCase = DEFAULT_PROMPTS_REPO # prompt is considered a repo ID when it does not contain any kind of space if re.search("\\s" , _lowerCAmelCase ) is not None: return prompt_or_repo_id _UpperCAmelCase = cached_file( _lowerCAmelCase , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} ) with open(_lowerCAmelCase , "r" , encoding="utf-8" ) as f: return f.read()
684
__lowerCAmelCase = 2_5_6 # Modulus to hash a string __lowerCAmelCase = 1_0_0_0_0_0_3 def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> bool: _UpperCAmelCase = len(_lowerCAmelCase ) _UpperCAmelCase = len(_lowerCAmelCase ) if p_len > t_len: return False _UpperCAmelCase = 0 _UpperCAmelCase = 0 _UpperCAmelCase = 1 # Calculating the hash of pattern and substring of text for i in range(_lowerCAmelCase ): _UpperCAmelCase = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus _UpperCAmelCase = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue _UpperCAmelCase = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash _UpperCAmelCase = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def __lowerCamelCase ( ) -> None: _UpperCAmelCase = "abc1abc12" _UpperCAmelCase = "alskfjaldsabc1abc1abc12k23adsfabcabc" _UpperCAmelCase = "alskfjaldsk23adsfabcabc" assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase ) and not rabin_karp(_lowerCAmelCase , _lowerCAmelCase ) # Test 2) _UpperCAmelCase = "ABABX" _UpperCAmelCase = "ABABZABABYABABX" assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase ) # Test 3) _UpperCAmelCase = "AAAB" _UpperCAmelCase = "ABAAAAAB" assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase ) # Test 4) _UpperCAmelCase = "abcdabcy" _UpperCAmelCase = "abcxabcdabxabcdabcdabcy" assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase ) # Test 5) _UpperCAmelCase = "Lü" _UpperCAmelCase = "Lüsai" assert rabin_karp(_lowerCAmelCase , _lowerCAmelCase ) _UpperCAmelCase = "Lue" assert not rabin_karp(_lowerCAmelCase , _lowerCAmelCase ) print("Success." ) if __name__ == "__main__": test_rabin_karp()
684
1
import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class __SCREAMING_SNAKE_CASE : def __init__( self : Union[str, Any] , __UpperCamelCase : List[str] , __UpperCamelCase : str=13 , __UpperCamelCase : Tuple=7 , __UpperCamelCase : str=True , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Any=True , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : List[Any]=True , __UpperCamelCase : Union[str, Any]=False , __UpperCamelCase : Dict=False , __UpperCamelCase : Union[str, Any]=False , __UpperCamelCase : Dict=2 , __UpperCamelCase : Any=99 , __UpperCamelCase : List[str]=0 , __UpperCamelCase : Any=32 , __UpperCamelCase : List[str]=5 , __UpperCamelCase : Union[str, Any]=4 , __UpperCamelCase : str=0.1 , __UpperCamelCase : List[str]=0.1 , __UpperCamelCase : Union[str, Any]=512 , __UpperCamelCase : Tuple=2 , __UpperCamelCase : Optional[Any]=0.02 , __UpperCamelCase : Tuple=2 , __UpperCamelCase : str=4 , __UpperCamelCase : Tuple="last" , __UpperCamelCase : int=True , __UpperCamelCase : List[Any]=None , __UpperCamelCase : str=0 , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_input_lengths _UpperCAmelCase = use_token_type_ids _UpperCAmelCase = use_labels _UpperCAmelCase = gelu_activation _UpperCAmelCase = sinusoidal_embeddings _UpperCAmelCase = causal _UpperCAmelCase = asm _UpperCAmelCase = n_langs _UpperCAmelCase = vocab_size _UpperCAmelCase = n_special _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = num_labels _UpperCAmelCase = num_choices _UpperCAmelCase = summary_type _UpperCAmelCase = use_proj _UpperCAmelCase = scope _UpperCAmelCase = bos_token_id def UpperCAmelCase__ ( self : Optional[int] ): _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCAmelCase = None if self.use_input_lengths: _UpperCAmelCase = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length _UpperCAmelCase = None if self.use_token_type_ids: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCAmelCase = ids_tensor([self.batch_size] , 2 ).float() _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) _UpperCAmelCase = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def UpperCAmelCase__ ( self : List[str] ): return XLMConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , ) def UpperCAmelCase__ ( self : str , __UpperCamelCase : Tuple , __UpperCamelCase : Dict , __UpperCamelCase : int , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : str , __UpperCamelCase : Dict , __UpperCamelCase : int , ): _UpperCAmelCase = XLMModel(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() _UpperCAmelCase = model(__UpperCamelCase , lengths=__UpperCamelCase , langs=__UpperCamelCase ) _UpperCAmelCase = model(__UpperCamelCase , langs=__UpperCamelCase ) _UpperCAmelCase = model(__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] , __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple , __UpperCamelCase : int , ): _UpperCAmelCase = XLMWithLMHeadModel(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() _UpperCAmelCase = model(__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : str , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : Tuple , ): _UpperCAmelCase = XLMForQuestionAnsweringSimple(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() _UpperCAmelCase = model(__UpperCamelCase ) _UpperCAmelCase = model(__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase ) _UpperCAmelCase = outputs self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase__ ( self : str , __UpperCamelCase : Any , __UpperCamelCase : Tuple , __UpperCamelCase : str , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : int , __UpperCamelCase : str , __UpperCamelCase : int , __UpperCamelCase : Tuple , ): _UpperCAmelCase = XLMForQuestionAnswering(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() _UpperCAmelCase = model(__UpperCamelCase ) _UpperCAmelCase = model( __UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase , cls_index=__UpperCamelCase , is_impossible=__UpperCamelCase , p_mask=__UpperCamelCase , ) _UpperCAmelCase = model( __UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase , cls_index=__UpperCamelCase , is_impossible=__UpperCamelCase , ) ((_UpperCAmelCase) , ) = result_with_labels.to_tuple() _UpperCAmelCase = model(__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase ) ((_UpperCAmelCase) , ) = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def UpperCAmelCase__ ( self : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : int , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : int , ): _UpperCAmelCase = XLMForSequenceClassification(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() _UpperCAmelCase = model(__UpperCamelCase ) _UpperCAmelCase = model(__UpperCamelCase , labels=__UpperCamelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCAmelCase__ ( self : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , ): _UpperCAmelCase = self.num_labels _UpperCAmelCase = XLMForTokenClassification(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() _UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase__ ( self : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : Dict , __UpperCamelCase : str , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] , ): _UpperCAmelCase = self.num_choices _UpperCAmelCase = XLMForMultipleChoice(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() _UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = model( __UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase__ ( self : List[str] ): _UpperCAmelCase = self.prepare_config_and_inputs() ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) = config_and_inputs _UpperCAmelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( lowercase , lowercase , lowercase , unittest.TestCase): __SCREAMING_SNAKE_CASE : int = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) __SCREAMING_SNAKE_CASE : Tuple = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable __SCREAMING_SNAKE_CASE : List[str] = ( { """feature-extraction""": XLMModel, """fill-mask""": XLMWithLMHeadModel, """question-answering""": XLMForQuestionAnsweringSimple, """text-classification""": XLMForSequenceClassification, """text-generation""": XLMWithLMHeadModel, """token-classification""": XLMForTokenClassification, """zero-shot""": XLMForSequenceClassification, } if is_torch_available() else {} ) def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : Any , __UpperCamelCase : Tuple , __UpperCamelCase : str , __UpperCamelCase : List[Any] , __UpperCamelCase : Any ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def UpperCAmelCase__ ( self : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : int=False ): _UpperCAmelCase = super()._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": _UpperCAmelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__UpperCamelCase ) _UpperCAmelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__UpperCamelCase ) return inputs_dict def UpperCAmelCase__ ( self : str ): _UpperCAmelCase = XLMModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , emb_dim=37 ) def UpperCAmelCase__ ( self : List[Any] ): self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : int ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*__UpperCamelCase ) def UpperCAmelCase__ ( self : Dict ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*__UpperCamelCase ) def UpperCAmelCase__ ( self : Union[str, Any] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*__UpperCamelCase ) def UpperCAmelCase__ ( self : Tuple ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*__UpperCamelCase ) def UpperCAmelCase__ ( self : Dict ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*__UpperCamelCase ) def UpperCAmelCase__ ( self : List[str] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*__UpperCamelCase ) def UpperCAmelCase__ ( self : List[str] ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*__UpperCamelCase ) def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any]=False , __UpperCamelCase : int=1 ): self.assertIsInstance(__UpperCamelCase , __UpperCamelCase ) self.assertListEqual( [isinstance(__UpperCamelCase , __UpperCamelCase ) for iter_attentions in attentions] , [True] * len(__UpperCamelCase ) ) self.assertEqual(len(__UpperCamelCase ) , (max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(__UpperCamelCase ): # adds PAD dummy token _UpperCAmelCase = min_length + idx + 1 _UpperCAmelCase = min_length + idx + 1 _UpperCAmelCase = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(__UpperCamelCase ) ) def UpperCAmelCase__ ( self : List[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : List[Any]=False , __UpperCamelCase : List[str]=1 ): self.assertIsInstance(__UpperCamelCase , __UpperCamelCase ) self.assertListEqual( [isinstance(__UpperCamelCase , __UpperCamelCase ) for iter_hidden_states in hidden_states] , [True] * len(__UpperCamelCase ) , ) self.assertEqual(len(__UpperCamelCase ) , (max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(__UpperCamelCase ): # adds PAD dummy token _UpperCAmelCase = min_length + idx + 1 _UpperCAmelCase = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(__UpperCamelCase ) , ) pass @slow def UpperCAmelCase__ ( self : Dict ): for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase = XLMModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) @require_torch class __SCREAMING_SNAKE_CASE ( unittest.TestCase): @slow def UpperCAmelCase__ ( self : Optional[int] ): _UpperCAmelCase = XLMWithLMHeadModel.from_pretrained("xlm-mlm-en-2048" ) model.to(__UpperCamelCase ) _UpperCAmelCase = torch.tensor([[14, 447]] , dtype=torch.long , device=__UpperCamelCase ) # the president _UpperCAmelCase = [ 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference _UpperCAmelCase = model.generate(__UpperCamelCase , do_sample=__UpperCamelCase ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() , __UpperCamelCase )
684
import itertools import os import random import tempfile import unittest import numpy as np from datasets import load_dataset from transformers import is_speech_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import WhisperFeatureExtractor if is_torch_available(): import torch __lowerCAmelCase = random.Random() def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=1.0 , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> List[str]: if rng is None: _UpperCAmelCase = global_rng _UpperCAmelCase = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class __SCREAMING_SNAKE_CASE ( unittest.TestCase): def __init__( self : Optional[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any]=7 , __UpperCamelCase : Union[str, Any]=400 , __UpperCamelCase : List[Any]=2_000 , __UpperCamelCase : Optional[Any]=10 , __UpperCamelCase : Optional[int]=160 , __UpperCamelCase : Any=8 , __UpperCamelCase : List[Any]=0.0 , __UpperCamelCase : Dict=4_000 , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : Tuple=True , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = min_seq_length _UpperCAmelCase = max_seq_length _UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) _UpperCAmelCase = padding_value _UpperCAmelCase = sampling_rate _UpperCAmelCase = return_attention_mask _UpperCAmelCase = do_normalize _UpperCAmelCase = feature_size _UpperCAmelCase = chunk_length _UpperCAmelCase = hop_length def UpperCAmelCase__ ( self : Optional[Any] ): return { "feature_size": self.feature_size, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : Tuple=False , __UpperCamelCase : Dict=False ): def _flatten(__UpperCamelCase : Any ): return list(itertools.chain(*__UpperCamelCase ) ) if equal_length: _UpperCAmelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size _UpperCAmelCase = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: _UpperCAmelCase = [np.asarray(__UpperCamelCase ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class __SCREAMING_SNAKE_CASE ( lowercase , unittest.TestCase): __SCREAMING_SNAKE_CASE : str = WhisperFeatureExtractor if is_speech_available() else None def UpperCAmelCase__ ( self : str ): _UpperCAmelCase = WhisperFeatureExtractionTester(self ) def UpperCAmelCase__ ( self : str ): _UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _UpperCAmelCase = feat_extract_first.save_pretrained(__UpperCamelCase )[0] check_json_file_has_correct_format(__UpperCamelCase ) _UpperCAmelCase = self.feature_extraction_class.from_pretrained(__UpperCamelCase ) _UpperCAmelCase = feat_extract_first.to_dict() _UpperCAmelCase = feat_extract_second.to_dict() _UpperCAmelCase = feat_extract_first.mel_filters _UpperCAmelCase = feat_extract_second.mel_filters self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase ) ) self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def UpperCAmelCase__ ( self : Union[str, Any] ): _UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _UpperCAmelCase = os.path.join(__UpperCamelCase , "feat_extract.json" ) feat_extract_first.to_json_file(__UpperCamelCase ) _UpperCAmelCase = self.feature_extraction_class.from_json_file(__UpperCamelCase ) _UpperCAmelCase = feat_extract_first.to_dict() _UpperCAmelCase = feat_extract_second.to_dict() _UpperCAmelCase = feat_extract_first.mel_filters _UpperCAmelCase = feat_extract_second.mel_filters self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase ) ) self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def UpperCAmelCase__ ( self : int ): # Tests that all call wrap to encode_plus and batch_encode_plus _UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 _UpperCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] _UpperCAmelCase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs] # Test feature size _UpperCAmelCase = feature_extractor(__UpperCamelCase , padding="max_length" , return_tensors="np" ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames ) self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size ) # Test not batched input _UpperCAmelCase = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features _UpperCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) ) # Test batched _UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features _UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ): self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. _UpperCAmelCase = [floats_list((1, x) )[0] for x in (800, 800, 800)] _UpperCAmelCase = np.asarray(__UpperCamelCase ) _UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features _UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ): self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) ) # Test truncation required _UpperCAmelCase = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )] _UpperCAmelCase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs] _UpperCAmelCase = [x[: feature_extractor.n_samples] for x in speech_inputs] _UpperCAmelCase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs_truncated] _UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features _UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ): self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) ) def UpperCAmelCase__ ( self : Union[str, Any] ): import torch _UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) _UpperCAmelCase = np.random.rand(100 , 32 ).astype(np.floataa ) _UpperCAmelCase = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: _UpperCAmelCase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) _UpperCAmelCase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : Tuple ): _UpperCAmelCase = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" ) # automatic decoding with librispeech _UpperCAmelCase = ds.sort("id" ).select(range(__UpperCamelCase ) )[:num_samples]["audio"] return [x["array"] for x in speech_samples] def UpperCAmelCase__ ( self : Tuple ): # fmt: off _UpperCAmelCase = torch.tensor( [ 0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951, 0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678, 0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554, -0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854 ] ) # fmt: on _UpperCAmelCase = self._load_datasamples(1 ) _UpperCAmelCase = WhisperFeatureExtractor() _UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="pt" ).input_features self.assertEqual(input_features.shape , (1, 80, 3_000) ) self.assertTrue(torch.allclose(input_features[0, 0, :30] , __UpperCamelCase , atol=1e-4 ) ) def UpperCAmelCase__ ( self : Optional[Any] ): _UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) _UpperCAmelCase = self._load_datasamples(1 )[0] _UpperCAmelCase = ((audio - audio.min()) / (audio.max() - audio.min())) * 65_535 # Rescale to [0, 65535] to show issue _UpperCAmelCase = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__UpperCamelCase )[0] self.assertTrue(np.all(np.mean(__UpperCamelCase ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(__UpperCamelCase ) - 1 ) < 1e-3 ) )
684
1
import argparse from argparse import Namespace import torch from torch import nn from transformers import XGLMConfig, XGLMForCausalLM def __lowerCamelCase ( _lowerCAmelCase ) -> Any: _UpperCAmelCase = [ "decoder.version", "decoder.output_projection.weight", "_float_tensor", "decoder.embed_positions._float_tensor", ] for k in ignore_keys: state_dict.pop(_lowerCAmelCase , _lowerCAmelCase ) def __lowerCamelCase ( _lowerCAmelCase ) -> Tuple: _UpperCAmelCase , _UpperCAmelCase = emb.weight.shape _UpperCAmelCase = nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase ) _UpperCAmelCase = emb.weight.data return lin_layer def __lowerCamelCase ( _lowerCAmelCase ) -> int: _UpperCAmelCase = torch.load(_lowerCAmelCase , map_location="cpu" ) _UpperCAmelCase = Namespace(**checkpoint["cfg"]["model"] ) _UpperCAmelCase = checkpoint["model"] remove_ignore_keys_(_lowerCAmelCase ) _UpperCAmelCase = state_dict["decoder.embed_tokens.weight"].shape[0] _UpperCAmelCase = {key.replace("decoder" , "model" ): val for key, val in state_dict.items()} _UpperCAmelCase = XGLMConfig( vocab_size=_lowerCAmelCase , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="gelu" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , ) _UpperCAmelCase = XGLMForCausalLM(_lowerCAmelCase ) _UpperCAmelCase = model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase ) print(_lowerCAmelCase ) _UpperCAmelCase = make_linear_from_emb(model.model.embed_tokens ) return model if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.") parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") __lowerCAmelCase = parser.parse_args() __lowerCAmelCase = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path) model.save_pretrained(args.pytorch_dump_folder_path)
684
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..utils import cached_file # docstyle-ignore __lowerCAmelCase = "\nHuman: <<task>>\n\nAssistant: " __lowerCAmelCase = "huggingface-tools/default-prompts" __lowerCAmelCase = {"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"} def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase="run" ) -> Union[str, Any]: if prompt_or_repo_id is None: _UpperCAmelCase = DEFAULT_PROMPTS_REPO # prompt is considered a repo ID when it does not contain any kind of space if re.search("\\s" , _lowerCAmelCase ) is not None: return prompt_or_repo_id _UpperCAmelCase = cached_file( _lowerCAmelCase , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} ) with open(_lowerCAmelCase , "r" , encoding="utf-8" ) as f: return f.read()
684
1
import argparse import os import shutil from pathlib import Path import onnx import torch from packaging import version from torch.onnx import export from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline __lowerCAmelCase = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11") def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False , ) -> Optional[int]: output_path.parent.mkdir(parents=_lowerCAmelCase , exist_ok=_lowerCAmelCase ) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( _lowerCAmelCase , _lowerCAmelCase , f=output_path.as_posix() , input_names=_lowerCAmelCase , output_names=_lowerCAmelCase , dynamic_axes=_lowerCAmelCase , do_constant_folding=_lowerCAmelCase , use_external_data_format=_lowerCAmelCase , enable_onnx_checker=_lowerCAmelCase , opset_version=_lowerCAmelCase , ) else: export( _lowerCAmelCase , _lowerCAmelCase , f=output_path.as_posix() , input_names=_lowerCAmelCase , output_names=_lowerCAmelCase , dynamic_axes=_lowerCAmelCase , do_constant_folding=_lowerCAmelCase , opset_version=_lowerCAmelCase , ) @torch.no_grad() def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = False ) -> Optional[Any]: _UpperCAmelCase = torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): _UpperCAmelCase = "cuda" elif fpaa and not torch.cuda.is_available(): raise ValueError("`float16` model export is only supported on GPUs with CUDA" ) else: _UpperCAmelCase = "cpu" _UpperCAmelCase = StableDiffusionPipeline.from_pretrained(_lowerCAmelCase , torch_dtype=_lowerCAmelCase ).to(_lowerCAmelCase ) _UpperCAmelCase = Path(_lowerCAmelCase ) # TEXT ENCODER _UpperCAmelCase = pipeline.text_encoder.config.max_position_embeddings _UpperCAmelCase = pipeline.text_encoder.config.hidden_size _UpperCAmelCase = pipeline.tokenizer( "A sample prompt" , padding="max_length" , max_length=pipeline.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors="pt" , ) onnx_export( pipeline.text_encoder , model_args=(text_input.input_ids.to(device=_lowerCAmelCase , dtype=torch.intaa )) , output_path=output_path / "text_encoder" / "model.onnx" , ordered_input_names=["input_ids"] , output_names=["last_hidden_state", "pooler_output"] , dynamic_axes={ "input_ids": {0: "batch", 1: "sequence"}, } , opset=_lowerCAmelCase , ) del pipeline.text_encoder # UNET _UpperCAmelCase = pipeline.unet.config.in_channels _UpperCAmelCase = pipeline.unet.config.sample_size _UpperCAmelCase = output_path / "unet" / "model.onnx" onnx_export( pipeline.unet , model_args=( torch.randn(2 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).to(device=_lowerCAmelCase , dtype=_lowerCAmelCase ), torch.randn(2 ).to(device=_lowerCAmelCase , dtype=_lowerCAmelCase ), torch.randn(2 , _lowerCAmelCase , _lowerCAmelCase ).to(device=_lowerCAmelCase , dtype=_lowerCAmelCase ), False, ) , output_path=_lowerCAmelCase , ordered_input_names=["sample", "timestep", "encoder_hidden_states", "return_dict"] , output_names=["out_sample"] , dynamic_axes={ "sample": {0: "batch", 1: "channels", 2: "height", 3: "width"}, "timestep": {0: "batch"}, "encoder_hidden_states": {0: "batch", 1: "sequence"}, } , opset=_lowerCAmelCase , use_external_data_format=_lowerCAmelCase , ) _UpperCAmelCase = str(unet_path.absolute().as_posix() ) _UpperCAmelCase = os.path.dirname(_lowerCAmelCase ) _UpperCAmelCase = onnx.load(_lowerCAmelCase ) # clean up existing tensor files shutil.rmtree(_lowerCAmelCase ) os.mkdir(_lowerCAmelCase ) # collate external tensor files into one onnx.save_model( _lowerCAmelCase , _lowerCAmelCase , save_as_external_data=_lowerCAmelCase , all_tensors_to_one_file=_lowerCAmelCase , location="weights.pb" , convert_attribute=_lowerCAmelCase , ) del pipeline.unet # VAE ENCODER _UpperCAmelCase = pipeline.vae _UpperCAmelCase = vae_encoder.config.in_channels _UpperCAmelCase = vae_encoder.config.sample_size # need to get the raw tensor output (sample) from the encoder _UpperCAmelCase = lambda _lowerCAmelCase , _lowerCAmelCase : vae_encoder.encode(_lowerCAmelCase , _lowerCAmelCase )[0].sample() onnx_export( _lowerCAmelCase , model_args=( torch.randn(1 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).to(device=_lowerCAmelCase , dtype=_lowerCAmelCase ), False, ) , output_path=output_path / "vae_encoder" / "model.onnx" , ordered_input_names=["sample", "return_dict"] , output_names=["latent_sample"] , dynamic_axes={ "sample": {0: "batch", 1: "channels", 2: "height", 3: "width"}, } , opset=_lowerCAmelCase , ) # VAE DECODER _UpperCAmelCase = pipeline.vae _UpperCAmelCase = vae_decoder.config.latent_channels _UpperCAmelCase = vae_decoder.config.out_channels # forward only through the decoder part _UpperCAmelCase = vae_encoder.decode onnx_export( _lowerCAmelCase , model_args=( torch.randn(1 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).to(device=_lowerCAmelCase , dtype=_lowerCAmelCase ), False, ) , output_path=output_path / "vae_decoder" / "model.onnx" , ordered_input_names=["latent_sample", "return_dict"] , output_names=["sample"] , dynamic_axes={ "latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"}, } , opset=_lowerCAmelCase , ) del pipeline.vae # SAFETY CHECKER if pipeline.safety_checker is not None: _UpperCAmelCase = pipeline.safety_checker _UpperCAmelCase = safety_checker.config.vision_config.num_channels _UpperCAmelCase = safety_checker.config.vision_config.image_size _UpperCAmelCase = safety_checker.forward_onnx onnx_export( pipeline.safety_checker , model_args=( torch.randn( 1 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ).to(device=_lowerCAmelCase , dtype=_lowerCAmelCase ), torch.randn(1 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).to(device=_lowerCAmelCase , dtype=_lowerCAmelCase ), ) , output_path=output_path / "safety_checker" / "model.onnx" , ordered_input_names=["clip_input", "images"] , output_names=["out_images", "has_nsfw_concepts"] , dynamic_axes={ "clip_input": {0: "batch", 1: "channels", 2: "height", 3: "width"}, "images": {0: "batch", 1: "height", 2: "width", 3: "channels"}, } , opset=_lowerCAmelCase , ) del pipeline.safety_checker _UpperCAmelCase = OnnxRuntimeModel.from_pretrained(output_path / "safety_checker" ) _UpperCAmelCase = pipeline.feature_extractor else: _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = OnnxStableDiffusionPipeline( vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_encoder" ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_decoder" ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / "text_encoder" ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / "unet" ) , scheduler=pipeline.scheduler , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , requires_safety_checker=safety_checker is not None , ) onnx_pipeline.save_pretrained(_lowerCAmelCase ) print("ONNX pipeline saved to" , _lowerCAmelCase ) del pipeline del onnx_pipeline _UpperCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(_lowerCAmelCase , provider="CPUExecutionProvider" ) print("ONNX pipeline is loadable" ) if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() parser.add_argument( "--model_path", type=str, required=True, help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).", ) parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.") parser.add_argument( "--opset", default=1_4, type=int, help="The version of the ONNX operator set to use.", ) parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode") __lowerCAmelCase = parser.parse_args() convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
684
from itertools import permutations def __lowerCamelCase ( _lowerCAmelCase ) -> bool: if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False _UpperCAmelCase = [7, 11, 13, 17] for i, test in enumerate(_lowerCAmelCase ): if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def __lowerCamelCase ( _lowerCAmelCase = 10 ) -> int: return sum( int("".join(map(_lowerCAmelCase , _lowerCAmelCase ) ) ) for num in permutations(range(_lowerCAmelCase ) ) if is_substring_divisible(_lowerCAmelCase ) ) if __name__ == "__main__": print(F'''{solution() = }''')
684
1