code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=None , lowercase_=None) -> Tuple:
a__ =data
a__ =previous
a__ =next_node
def __str__( self) -> str:
return F"""{self.data}"""
def __UpperCamelCase ( self) -> int:
return self.data
def __UpperCamelCase ( self) -> Any:
return self.next
def __UpperCamelCase ( self) -> int:
return self.previous
class lowercase_ :
def __init__( self , lowercase_) -> str:
a__ =head
def __iter__( self) -> Union[str, Any]:
return self
def __UpperCamelCase ( self) -> str:
if not self.current:
raise StopIteration
else:
a__ =self.current.get_data()
a__ =self.current.get_next()
return value
class lowercase_ :
def __init__( self) -> Union[str, Any]:
a__ =None # First node in list
a__ =None # Last node in list
def __str__( self) -> Dict:
a__ =self.head
a__ =[]
while current is not None:
nodes.append(current.get_data())
a__ =current.get_next()
return " ".join(str(lowercase_) for node in nodes)
def __contains__( self , lowercase_) -> Any:
a__ =self.head
while current:
if current.get_data() == value:
return True
a__ =current.get_next()
return False
def __iter__( self) -> Any:
return LinkedListIterator(self.head)
def __UpperCamelCase ( self) -> List[str]:
if self.head:
return self.head.get_data()
return None
def __UpperCamelCase ( self) -> Optional[int]:
if self.tail:
return self.tail.get_data()
return None
def __UpperCamelCase ( self , lowercase_) -> None:
if self.head is None:
a__ =node
a__ =node
else:
self.insert_before_node(self.head , lowercase_)
def __UpperCamelCase ( self , lowercase_) -> None:
if self.head is None:
self.set_head(lowercase_)
else:
self.insert_after_node(self.tail , lowercase_)
def __UpperCamelCase ( self , lowercase_) -> None:
a__ =Node(lowercase_)
if self.head is None:
self.set_head(lowercase_)
else:
self.set_tail(lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> None:
a__ =node
a__ =node.previous
if node.get_previous() is None:
a__ =node_to_insert
else:
a__ =node_to_insert
a__ =node_to_insert
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> None:
a__ =node
a__ =node.next
if node.get_next() is None:
a__ =node_to_insert
else:
a__ =node_to_insert
a__ =node_to_insert
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> None:
a__ =1
a__ =Node(lowercase_)
a__ =self.head
while node:
if current_position == position:
self.insert_before_node(lowercase_ , lowercase_)
return
current_position += 1
a__ =node.next
self.insert_after_node(self.tail , lowercase_)
def __UpperCamelCase ( self , lowercase_) -> Node:
a__ =self.head
while node:
if node.get_data() == item:
return node
a__ =node.get_next()
raise Exception('Node not found')
def __UpperCamelCase ( self , lowercase_) -> Dict:
if (node := self.get_node(lowercase_)) is not None:
if node == self.head:
a__ =self.head.get_next()
if node == self.tail:
a__ =self.tail.get_previous()
self.remove_node_pointers(lowercase_)
@staticmethod
def __UpperCamelCase ( lowercase_) -> None:
if node.get_next():
a__ =node.previous
if node.get_previous():
a__ =node.next
a__ =None
a__ =None
def __UpperCamelCase ( self) -> Any:
return self.head is None
def _lowercase( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 |
import os
_lowercase = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1000}
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : List[str] = 0
lowerCAmelCase_ : Any = 0
while index < len(snake_case__) - 1:
lowerCAmelCase_ : Optional[Any] = SYMBOLS[numerals[index]]
lowerCAmelCase_ : int = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Optional[int] = ""
lowerCAmelCase_ : Tuple = num // 10_00
numerals += m_count * "M"
num %= 10_00
lowerCAmelCase_ : int = num // 1_00
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_00
lowerCAmelCase_ : int = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def UpperCamelCase ( snake_case__ = "/p089_roman.txt"):
lowerCAmelCase_ : int = 0
with open(os.path.dirname(snake_case__) + roman_numerals_filename) as filea:
lowerCAmelCase_ : List[Any] = filea.readlines()
for line in lines:
lowerCAmelCase_ : Any = line.strip()
lowerCAmelCase_ : Tuple = parse_roman_numerals(snake_case__)
lowerCAmelCase_ : List[Any] = generate_roman_numerals(snake_case__)
savings += len(snake_case__) - len(snake_case__)
return savings
if __name__ == "__main__":
print(f"{solution() = }")
| 659 | 0 |
def lowerCAmelCase_ ( lowerCamelCase = 1000 ):
__magic_name__ : Optional[int] =-1
__magic_name__ : Tuple =0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
__magic_name__ : str =(n * n - 2 * a * n) // (2 * n - 2 * a)
__magic_name__ : Tuple =n - a - b
if c * c == (a * a + b * b):
__magic_name__ : Any =a * b * c
if candidate >= product:
__magic_name__ : Optional[int] =candidate
return product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 21 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def UpperCamelCase ( ):
lowerCAmelCase_ : Dict = HfArgumentParser(snake_case__)
lowerCAmelCase_ : Dict = parser.parse_args_into_dataclasses()[0]
lowerCAmelCase_ : List[Any] = TensorFlowBenchmark(args=snake_case__)
try:
lowerCAmelCase_ : str = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowerCAmelCase_ : Optional[Any] = "Arg --no_{0} is no longer used, please use --no-{0} instead."
lowerCAmelCase_ : Tuple = " ".join(str(snake_case__).split(" ")[:-1])
lowerCAmelCase_ : List[Any] = ""
lowerCAmelCase_ : Optional[Any] = eval(str(snake_case__).split(" ")[-1])
lowerCAmelCase_ : List[Any] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:])
else:
wrong_args.append(snake_case__)
if len(snake_case__) > 0:
lowerCAmelCase_ : int = full_error_msg + begin_error_msg + str(snake_case__)
raise ValueError(snake_case__)
benchmark.run()
if __name__ == "__main__":
main()
| 659 | 0 |
'''simple docstring'''
import copy
import re
class A :
lowercase_ = 'hp'
lowercase_ = {}
lowercase_ = None
@classmethod
def __lowerCAmelCase ( cls : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] ) -> Any:
"""simple docstring"""
_a = prefix
_a = defaults
cls.build_naming_info()
@staticmethod
def __lowerCAmelCase ( lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
if len(lowerCAmelCase_ ) == 0:
return ""
_a = None
if any(char.isdigit() for char in word ):
raise Exception(F'Parameters should not contain numbers: \'{word}\' contains a number' )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(lowerCAmelCase_ ) + 1 ):
_a = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
_a = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(lowerCAmelCase_ : Optional[int] ):
_a = ''''''
while integer != 0:
_a = chr(ord('''A''' ) + integer % 10 ) + s
integer //= 10
return s
_a = 0
while True:
_a = word + '''#''' + int_to_alphabetic(lowerCAmelCase_ )
if sword in info["reverse_short_word"]:
continue
else:
_a = sword
break
_a = short_word
_a = word
return short_word
@staticmethod
def __lowerCAmelCase ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] ) -> int:
"""simple docstring"""
_a = param_name.split('''_''' )
_a = [TrialShortNamer.shortname_for_word(lowerCAmelCase_ , lowerCAmelCase_ ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
_a = ['''''', '''_''']
for separator in separators:
_a = separator.join(lowerCAmelCase_ )
if shortname not in info["reverse_short_param"]:
_a = shortname
_a = param_name
return shortname
return param_name
@staticmethod
def __lowerCAmelCase ( lowerCAmelCase_ : int , lowerCAmelCase_ : Any ) -> Any:
"""simple docstring"""
_a = TrialShortNamer.shortname_for_key(lowerCAmelCase_ , lowerCAmelCase_ )
_a = short_name
_a = param_name
@classmethod
def __lowerCAmelCase ( cls : Dict ) -> Optional[int]:
"""simple docstring"""
if cls.NAMING_INFO is not None:
return
_a = {
'''short_word''': {},
'''reverse_short_word''': {},
'''short_param''': {},
'''reverse_short_param''': {},
}
_a = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(lowerCAmelCase_ , lowerCAmelCase_ )
_a = info
@classmethod
def __lowerCAmelCase ( cls : int , lowerCAmelCase_ : Dict ) -> Optional[Any]:
"""simple docstring"""
cls.build_naming_info()
assert cls.PREFIX is not None
_a = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F'You should provide a default value for the param name {k} with value {v}' )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
_a = cls.NAMING_INFO['''short_param'''][k]
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_a = 1 if v else 0
_a = '''''' if isinstance(lowerCAmelCase_ , (int, float) ) else '''-'''
_a = F'{key}{sep}{v}'
name.append(lowerCAmelCase_ )
return "_".join(lowerCAmelCase_ )
@classmethod
def __lowerCAmelCase ( cls : List[Any] , lowerCAmelCase_ : Any ) -> Optional[int]:
"""simple docstring"""
_a = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
_a = []
else:
_a = repr.split('''_''' )
_a = {}
for value in values:
if "-" in value:
_a , _a = value.split('''-''' )
else:
_a = re.sub('''[0-9.]''' , '''''' , lowerCAmelCase_ )
_a = float(re.sub('''[^0-9.]''' , '''''' , lowerCAmelCase_ ) )
_a = cls.NAMING_INFO['''reverse_short_param'''][p_k]
_a = p_v
for k in cls.DEFAULTS:
if k not in parameters:
_a = cls.DEFAULTS[k]
return parameters
| 22 |
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
_lowercase = [
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'''
''' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'''
''' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.''',
'''The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'''
''' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'''
''' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'''
''' body.''',
'''Amnesty International releases its annual report on the death penalty. The report catalogs the use of'''
''' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'''
''' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'''
''' punishment.''',
]
_lowercase = [
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'''
''' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'''
''' had informed his Lufthansa training school of an episode of severe depression, airline says .''',
'''Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'''
''' Israel and the United States opposed the move, which could open the door to war crimes investigations against'''
''' Israelis .''',
'''Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'''
''' death . Organization claims that governments around the world are using the threat of terrorism to advance'''
''' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'''
''' sentences up by 28% .''',
]
def UpperCamelCase ( ):
lowerCAmelCase_ : Any = calculate_rouge(snake_case__ , snake_case__ , bootstrap_aggregation=snake_case__ , rouge_keys=["rouge2", "rougeL"])
assert isinstance(snake_case__ , snake_case__)
lowerCAmelCase_ : str = calculate_rouge(snake_case__ , snake_case__ , bootstrap_aggregation=snake_case__ , rouge_keys=["rouge2"])
assert (
pd.DataFrame(no_aggregation["rouge2"]).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["rouge2"]).fmeasure.mean()
)
def UpperCamelCase ( ):
lowerCAmelCase_ : str = "rougeLsum"
lowerCAmelCase_ : Any = calculate_rouge(snake_case__ , snake_case__ , newline_sep=snake_case__ , rouge_keys=[k])[k]
lowerCAmelCase_ : List[Any] = calculate_rouge(snake_case__ , snake_case__ , newline_sep=snake_case__ , rouge_keys=[k])[k]
assert score > score_no_sep
def UpperCamelCase ( ):
lowerCAmelCase_ : int = ["rouge1", "rouge2", "rougeL"]
lowerCAmelCase_ : List[Any] = calculate_rouge(snake_case__ , snake_case__ , newline_sep=snake_case__ , rouge_keys=snake_case__)
lowerCAmelCase_ : List[Any] = calculate_rouge(snake_case__ , snake_case__ , newline_sep=snake_case__ , rouge_keys=snake_case__)
assert score_sep == score_no_sep
def UpperCamelCase ( ):
lowerCAmelCase_ : List[str] = [
"Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.",
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .",
]
lowerCAmelCase_ : Dict = [
"Margot Frank, died in 1945, a month earlier than previously thought.",
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"
" the final seconds on board Flight 9525.",
]
assert calculate_rouge(snake_case__ , snake_case__ , newline_sep=snake_case__) == calculate_rouge(snake_case__ , snake_case__ , newline_sep=snake_case__)
def UpperCamelCase ( ):
lowerCAmelCase_ : Optional[int] = [
"\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" "
]
lowerCAmelCase_ : Any = [
" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."
]
lowerCAmelCase_ : Any = calculate_rouge(snake_case__ , snake_case__ , rouge_keys=["rougeLsum"] , newline_sep=snake_case__)["rougeLsum"]
lowerCAmelCase_ : Any = calculate_rouge(snake_case__ , snake_case__ , rouge_keys=["rougeLsum"])["rougeLsum"]
assert new_score > prev_score
def UpperCamelCase ( ):
lowerCAmelCase_ : int = Path("examples/seq2seq/test_data/wmt_en_ro")
lowerCAmelCase_ : Dict = calculate_rouge_path(data_dir.joinpath("test.source") , data_dir.joinpath("test.target"))
assert isinstance(snake_case__ , snake_case__)
lowerCAmelCase_ : Any = calculate_rouge_path(
data_dir.joinpath("test.source") , data_dir.joinpath("test.target") , bootstrap_aggregation=snake_case__)
assert isinstance(snake_case__ , snake_case__)
| 659 | 0 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = 42
A_ = 42
A_ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 23 |
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = LEDTokenizer
UpperCamelCase_ = LEDTokenizerFast
UpperCamelCase_ = True
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
super().setUp()
lowerCAmelCase_ : Union[str, Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowerCAmelCase_ : Tuple = dict(zip(lowerCAmelCase__ ,range(len(lowerCAmelCase__ ) ) ) )
lowerCAmelCase_ : int = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowerCAmelCase_ : Union[str, Any] = {"unk_token": "<unk>"}
lowerCAmelCase_ : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase__ ) )
def UpperCAmelCase_ ( self : List[Any] ,**lowerCAmelCase__ : int ) -> Tuple:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ,**lowerCAmelCase__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname ,**lowerCAmelCase__ )
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : int ) -> List[str]:
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
return LEDTokenizer.from_pretrained("allenai/led-base-16384" )
@cached_property
def UpperCAmelCase_ ( self : List[str] ) -> Dict:
'''simple docstring'''
return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" )
@require_torch
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
lowerCAmelCase_ : int = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase_ : Any = tokenizer(lowerCAmelCase__ ,max_length=len(lowerCAmelCase__ ) ,padding=lowerCAmelCase__ ,return_tensors="pt" )
self.assertIsInstance(lowerCAmelCase__ ,lowerCAmelCase__ )
self.assertEqual((2, 9) ,batch.input_ids.shape )
self.assertEqual((2, 9) ,batch.attention_mask.shape )
lowerCAmelCase_ : int = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
@require_torch
def UpperCAmelCase_ ( self : Dict ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : int = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase_ : Optional[Any] = tokenizer(lowerCAmelCase__ ,padding=lowerCAmelCase__ ,return_tensors="pt" )
self.assertIn("input_ids" ,lowerCAmelCase__ )
self.assertIn("attention_mask" ,lowerCAmelCase__ )
self.assertNotIn("labels" ,lowerCAmelCase__ )
self.assertNotIn("decoder_attention_mask" ,lowerCAmelCase__ )
@require_torch
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : int = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase_ : Optional[int] = tokenizer(text_target=lowerCAmelCase__ ,max_length=32 ,padding="max_length" ,return_tensors="pt" )
self.assertEqual(32 ,targets["input_ids"].shape[1] )
@require_torch
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase_ : Tuple = tokenizer(
["I am a small frog" * 10_24, "I am a small frog"] ,padding=lowerCAmelCase__ ,truncation=lowerCAmelCase__ ,return_tensors="pt" )
self.assertIsInstance(lowerCAmelCase__ ,lowerCAmelCase__ )
self.assertEqual(batch.input_ids.shape ,(2, 51_22) )
@require_torch
def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : Tuple = ["A long paragraph for summarization."]
lowerCAmelCase_ : Dict = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase_ : Optional[Any] = tokenizer(lowerCAmelCase__ ,return_tensors="pt" )
lowerCAmelCase_ : Optional[Any] = tokenizer(text_target=lowerCAmelCase__ ,return_tensors="pt" )
lowerCAmelCase_ : List[str] = inputs["input_ids"]
lowerCAmelCase_ : Any = targets["input_ids"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def UpperCAmelCase_ ( self : str ) -> Tuple:
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase_ : str = ["Summary of the text.", "Another summary."]
lowerCAmelCase_ : str = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
lowerCAmelCase_ : List[Any] = tokenizer(lowerCAmelCase__ ,padding=lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = [[0] * len(lowerCAmelCase__ ) for x in encoded_output["input_ids"]]
lowerCAmelCase_ : Optional[int] = tokenizer.pad(lowerCAmelCase__ )
self.assertSequenceEqual(outputs["global_attention_mask"] ,lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase_ : Dict = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ ,**lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = self.tokenizer_class.from_pretrained(lowerCAmelCase__ ,**lowerCAmelCase__ )
lowerCAmelCase_ : Dict = "A, <mask> AllenNLP sentence."
lowerCAmelCase_ : Tuple = tokenizer_r.encode_plus(lowerCAmelCase__ ,add_special_tokens=lowerCAmelCase__ ,return_token_type_ids=lowerCAmelCase__ )
lowerCAmelCase_ : int = tokenizer_p.encode_plus(lowerCAmelCase__ ,add_special_tokens=lowerCAmelCase__ ,return_token_type_ids=lowerCAmelCase__ )
self.assertEqual(sum(tokens_r["token_type_ids"] ) ,sum(tokens_p["token_type_ids"] ) )
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) ,sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) ,)
lowerCAmelCase_ : Any = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
lowerCAmelCase_ : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
self.assertSequenceEqual(tokens_p["input_ids"] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase__ ,["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
lowerCAmelCase__ ,["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 659 | 0 |
'''simple docstring'''
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
UpperCAmelCase_ : Any = [
'''kernels/rwkv/wkv_cuda.cu''',
'''kernels/rwkv/wkv_op.cpp''',
'''kernels/deformable_detr/ms_deform_attn.h''',
'''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''',
'''models/graphormer/algos_graphormer.pyx''',
]
def _UpperCamelCase (_lowerCamelCase : Any )-> str:
'''simple docstring'''
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = argparse.ArgumentParser()
parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''')
UpperCAmelCase_ : Tuple = parser.parse_args()
if args.check_lib:
UpperCAmelCase_ : Any = importlib.import_module('''transformers''')
UpperCAmelCase_ : Any = Path(transformers_module.__file__).parent
else:
UpperCAmelCase_ : List[Any] = Path.cwd() / '''build/lib/transformers'''
if not test_custom_files_are_present(transformers_path):
raise ValueError('''The built release does not contain the custom files. Fix this before going further!''')
| 24 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''Visual-Attention-Network/van-base''': (
'''https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'''
),
}
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = 'van'
def __init__( self : List[str] ,lowerCAmelCase__ : int=2_24 ,lowerCAmelCase__ : Optional[int]=3 ,lowerCAmelCase__ : Dict=[7, 3, 3, 3] ,lowerCAmelCase__ : List[str]=[4, 2, 2, 2] ,lowerCAmelCase__ : Union[str, Any]=[64, 1_28, 3_20, 5_12] ,lowerCAmelCase__ : Union[str, Any]=[3, 3, 12, 3] ,lowerCAmelCase__ : Any=[8, 8, 4, 4] ,lowerCAmelCase__ : Optional[int]="gelu" ,lowerCAmelCase__ : List[str]=0.02 ,lowerCAmelCase__ : Optional[Any]=1e-6 ,lowerCAmelCase__ : Dict=1e-2 ,lowerCAmelCase__ : Union[str, Any]=0.0 ,lowerCAmelCase__ : Optional[Any]=0.0 ,**lowerCAmelCase__ : List[str] ,) -> Tuple:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = image_size
lowerCAmelCase_ : List[str] = num_channels
lowerCAmelCase_ : str = patch_sizes
lowerCAmelCase_ : Optional[Any] = strides
lowerCAmelCase_ : List[Any] = hidden_sizes
lowerCAmelCase_ : int = depths
lowerCAmelCase_ : int = mlp_ratios
lowerCAmelCase_ : str = hidden_act
lowerCAmelCase_ : List[str] = initializer_range
lowerCAmelCase_ : Dict = layer_norm_eps
lowerCAmelCase_ : str = layer_scale_init_value
lowerCAmelCase_ : Tuple = drop_path_rate
lowerCAmelCase_ : Dict = dropout_rate
| 659 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
a_ = None
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
a_ = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
},
'tokenizer_file': {
'google/bigbird-roberta-base': (
'https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'
),
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'
),
},
}
a_ = {
'google/bigbird-roberta-base': 4096,
'google/bigbird-roberta-large': 4096,
'google/bigbird-base-trivia-itc': 4096,
}
a_ = '▁'
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =VOCAB_FILES_NAMES
lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ =BigBirdTokenizer
lowerCamelCase__ =['input_ids', 'attention_mask']
lowerCamelCase__ =[]
def __init__( self : str , a : int=None , a : Optional[int]=None , a : Union[str, Any]="<unk>" , a : int="<s>" , a : Any="</s>" , a : List[str]="<pad>" , a : List[Any]="[SEP]" , a : Optional[Any]="[MASK]" , a : Optional[int]="[CLS]" , **a : List[str] , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else bos_token
SCREAMING_SNAKE_CASE : Tuple = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else eos_token
SCREAMING_SNAKE_CASE : List[Any] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else unk_token
SCREAMING_SNAKE_CASE : Optional[int] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else pad_token
SCREAMING_SNAKE_CASE : Optional[Any] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else cls_token
SCREAMING_SNAKE_CASE : int = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE : Tuple = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
super().__init__(
a , tokenizer_file=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , **a , )
SCREAMING_SNAKE_CASE : Tuple = vocab_file
SCREAMING_SNAKE_CASE : Tuple = False if not self.vocab_file else True
def __UpperCamelCase ( self : Dict , a : List[int] , a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCamelCase ( self : List[Any] , a : List[int] , a : Optional[List[int]] = None , a : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(a )) + [1]
return [1] + ([0] * len(a )) + [1] + ([0] * len(a )) + [1]
def __UpperCamelCase ( self : List[str] , a : List[int] , a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self : Optional[Any] , a : str , a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(a ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ):
copyfile(self.vocab_file , a )
return (out_vocab_file,) | 25 |
from math import factorial
def UpperCamelCase ( snake_case__ , snake_case__):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError("Please enter positive integers for n and k where n >= k")
return factorial(snake_case__) // (factorial(snake_case__) * factorial(n - k))
if __name__ == "__main__":
print(
'''The number of five-card hands possible from a standard''',
f"fifty-two card deck is: {combinations(52, 5)}\n",
)
print(
'''If a class of 40 students must be arranged into groups of''',
f"4 for group projects, there are {combinations(40, 4)} ways",
'''to arrange them.\n''',
)
print(
'''If 10 teams are competing in a Formula One race, there''',
f"are {combinations(10, 3)} ways that first, second and",
'''third place can be awarded.''',
)
| 659 | 0 |
'''simple docstring'''
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
__UpperCamelCase = True
from torch.cuda.amp import autocast
__UpperCamelCase = logging.getLogger(__name__)
@dataclass
class _A :
lowercase__: str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowercase__: Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
lowercase__: Optional[bool] = field(
default=__lowercase , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
lowercase__: Optional[bool] = field(
default=__lowercase , metadata={'''help''': '''Whether to log verbose messages or not.'''} , )
lowercase__: Optional[float] = field(
default=2.0 , metadata={'''help''': '''Maximum temperature for gumbel softmax.'''} )
lowercase__: Optional[float] = field(
default=0.5 , metadata={'''help''': '''Minimum temperature for gumbel softmax.'''} )
lowercase__: Optional[float] = field(
default=0.9_9_9_9_9_5 , metadata={'''help''': '''Decay of gumbel temperature during training.'''} )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
__snake_case : int = logging.WARNING
if model_args.verbose_logging:
__snake_case : Any = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
__snake_case : List[str] = logging.INFO
logger.setLevel(_lowerCamelCase )
@dataclass
class _A :
lowercase__: str = field(
default=__lowercase , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
lowercase__: Optional[str] = field(
default=__lowercase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
lowercase__: Optional[str] = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
lowercase__: Optional[str] = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
lowercase__: Optional[str] = field(
default='''file''' , metadata={'''help''': '''Column in the dataset that contains speech file path. Defaults to \'file\''''} , )
lowercase__: bool = field(
default=__lowercase , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
lowercase__: Optional[int] = field(
default=1 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
lowercase__: Optional[int] = field(
default=__lowercase , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
lowercase__: Optional[float] = field(
default=2_0.0 , metadata={'''help''': '''Filter audio files that are longer than `max_duration_in_seconds` seconds'''} )
@dataclass
class _A :
lowercase__: WavaVecaForPreTraining
lowercase__: WavaVecaFeatureExtractor
lowercase__: Union[bool, str] = "longest"
lowercase__: Optional[int] = None
lowercase__: Optional[int] = None
def __call__( self : Any , __magic_name__ : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
__snake_case : Any = self.feature_extractor.pad(
__magic_name__ , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
__snake_case : int = self.model._get_feat_extract_output_lengths(batch["""input_values"""].shape[-1] )
__snake_case : Optional[Any] = batch["""input_values"""].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
__snake_case : Optional[int] = self.model._get_feat_extract_output_lengths(batch["""attention_mask"""].sum(-1 ) ).to(
torch.long )
__snake_case : Any = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch["""input_values"""].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
__snake_case : Dict = 1
__snake_case : Optional[Any] = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
__snake_case : int = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=__magic_name__ , min_masks=2 , )
return batch
class _A ( __lowercase ):
def __init__( self : str , *__magic_name__ : Any , __magic_name__ : Optional[Any]=1 , __magic_name__ : int=0 , __magic_name__ : Tuple=1.0 , **__magic_name__ : Any ) -> Dict:
"""simple docstring"""
super().__init__(*__magic_name__ , **__magic_name__ )
__snake_case : List[str] = 0
__snake_case : Optional[Any] = max_gumbel_temp
__snake_case : int = min_gumbel_temp
__snake_case : Union[str, Any] = gumbel_temp_decay
def lowercase__ ( self : Optional[Any] , __magic_name__ : nn.Module , __magic_name__ : Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor:
"""simple docstring"""
model.train()
__snake_case : int = self._prepare_inputs(__magic_name__ )
if self.use_amp:
with autocast():
__snake_case : str = self.compute_loss(__magic_name__ , __magic_name__ )
else:
__snake_case : Tuple = self.compute_loss(__magic_name__ , __magic_name__ )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
__snake_case : Union[str, Any] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
__snake_case : str = loss.sum() / (inputs["""mask_time_indices"""]).sum()
else:
raise ValueError(f'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
__snake_case : List[Any] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__magic_name__ ).backward()
elif self.use_apex:
with amp.scale_loss(__magic_name__ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__magic_name__ )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def _a ( ) -> Any:
"""simple docstring"""
__snake_case : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__snake_case , __snake_case , __snake_case : List[Any] = parser.parse_args_into_dataclasses()
configure_logger(_lowerCamelCase , _lowerCamelCase )
# Downloading and loading a dataset from the hub.
__snake_case : Tuple = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
__snake_case : int = DatasetDict()
__snake_case : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[:{data_args.validation_split_percentage}%]''' , cache_dir=model_args.cache_dir , )
__snake_case : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[{data_args.validation_split_percentage}%:]''' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
__snake_case : Optional[Any] = DatasetDict()
__snake_case : Optional[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="""validation""" , cache_dir=model_args.cache_dir , )
__snake_case : str = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}''' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
__snake_case : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_lowerCamelCase )
def prepare_dataset(_lowerCamelCase ):
# check that all files have the correct sampling rate
__snake_case , __snake_case : int = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
__snake_case : List[str] = datasets.map(
_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["""train"""].column_names )
# filter audio files that are too long
__snake_case : Optional[Any] = vectorized_datasets.filter(
lambda _lowerCamelCase : len(data["""speech"""] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(_lowerCamelCase ):
return feature_extractor(batch["""speech"""] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
__snake_case : str = vectorized_datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["""train"""].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
__snake_case : str = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"""PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"""
""" ``config.feat_extract_norm='layer'""" )
__snake_case : Tuple = WavaVecaForPreTraining(_lowerCamelCase )
__snake_case : int = DataCollatorForWavaVecaPretraining(model=_lowerCamelCase , feature_extractor=_lowerCamelCase )
__snake_case : List[str] = WavaVecaPreTrainer(
model=_lowerCamelCase , data_collator=_lowerCamelCase , args=_lowerCamelCase , train_dataset=vectorized_datasets["""train"""] , eval_dataset=vectorized_datasets["""validation"""] , tokenizer=_lowerCamelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 26 |
import argparse
import json
from tqdm import tqdm
def UpperCamelCase ( ):
lowerCAmelCase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src_path" , type=snake_case__ , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , )
parser.add_argument(
"--evaluation_set" , type=snake_case__ , help="where to store parsed evaluation_set file" , )
parser.add_argument(
"--gold_data_path" , type=snake_case__ , help="where to store parsed gold_data_path file" , )
lowerCAmelCase_ : Dict = parser.parse_args()
with open(args.src_path , "r") as src_file, open(args.evaluation_set , "w") as eval_file, open(
args.gold_data_path , "w") as gold_file:
lowerCAmelCase_ : Optional[int] = json.load(snake_case__)
for dpr_record in tqdm(snake_case__):
lowerCAmelCase_ : str = dpr_record["question"]
lowerCAmelCase_ : Dict = [context["title"] for context in dpr_record["positive_ctxs"]]
eval_file.write(question + "\n")
gold_file.write("\t".join(snake_case__) + "\n")
if __name__ == "__main__":
main()
| 659 | 0 |
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowerCamelCase( __snake_case ):
'''simple docstring'''
def __init__( self , *snake_case_ , snake_case_=None , snake_case_=None , **snake_case_ ):
super().__init__(*snake_case_ , **snake_case_ )
_A = eval_examples
_A = post_process_function
def lowerCAmelCase__ ( self , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_ = "eval" ):
_A = self.eval_dataset if eval_dataset is None else eval_dataset
_A = self.get_eval_dataloader(snake_case_ )
_A = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_A = self.compute_metrics
_A = None
_A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_A = time.time()
try:
_A = eval_loop(
snake_case_ , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=snake_case_ , metric_key_prefix=snake_case_ , )
finally:
_A = compute_metrics
_A = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
snake_case_ , snake_case_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
_A = self.post_process_function(snake_case_ , snake_case_ , output.predictions )
_A = self.compute_metrics(snake_case_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
_A = metrics.pop(snake_case_ )
metrics.update(output.metrics )
else:
_A = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(snake_case_ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_A = self.callback_handler.on_evaluate(self.args , self.state , self.control , snake_case_ )
return metrics
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_=None , snake_case_ = "test" ):
_A = self.get_test_dataloader(snake_case_ )
# Temporarily disable metric computation, we will do it in the loop here.
_A = self.compute_metrics
_A = None
_A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_A = time.time()
try:
_A = eval_loop(
snake_case_ , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=snake_case_ , metric_key_prefix=snake_case_ , )
finally:
_A = compute_metrics
_A = self.args.eval_batch_size * self.args.world_size
if F"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
snake_case_ , snake_case_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
_A = self.post_process_function(snake_case_ , snake_case_ , output.predictions , 'predict' )
_A = self.compute_metrics(snake_case_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
_A = metrics.pop(snake_case_ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=snake_case_ )
| 27 |
from collections.abc import Sequence
def UpperCamelCase ( snake_case__ = None):
if nums is None or not nums:
raise ValueError("Input sequence should not be empty")
lowerCAmelCase_ : Dict = nums[0]
for i in range(1 , len(snake_case__)):
lowerCAmelCase_ : Optional[int] = nums[i]
lowerCAmelCase_ : Optional[int] = max(snake_case__ , ans + num , snake_case__)
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
_lowercase = int(input('''Enter number of elements : ''').strip())
_lowercase = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array))
| 659 | 0 |
'''simple docstring'''
UpperCamelCase_ = [
"Audio",
"Array2D",
"Array3D",
"Array4D",
"Array5D",
"ClassLabel",
"Features",
"Sequence",
"Value",
"Image",
"Translation",
"TranslationVariableLanguages",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 28 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
_lowercase = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 659 | 0 |
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
A_ = pytest.mark.integration
@require_faiss
class __lowerCamelCase ( lowerCAmelCase ):
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = Dataset.from_dict({'''filename''': ['''my_name-train''' + '''_''' + str(UpperCAmelCase ) for x in np.arange(30 ).tolist()]} )
return dset
def UpperCAmelCase__ ( self ):
import faiss
lowerCamelCase_ = self._create_dummy_dataset()
lowerCamelCase_ = dset.map(
lambda UpperCAmelCase , UpperCAmelCase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=UpperCAmelCase , keep_in_memory=UpperCAmelCase )
lowerCamelCase_ = dset.add_faiss_index('''vecs''' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
dset.drop_index('''vecs''' )
def UpperCAmelCase__ ( self ):
import faiss
lowerCamelCase_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
def UpperCAmelCase__ ( self ):
import faiss
lowerCamelCase_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=UpperCAmelCase ) as tmp_file:
dset.save_faiss_index('''vecs''' , tmp_file.name )
dset.load_faiss_index('''vecs2''' , tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples('''vecs2''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' )
dset.drop_index('''vecs''' )
self.assertRaises(UpperCAmelCase , partial(dset.get_nearest_examples , '''vecs2''' , np.ones(5 , dtype=np.floataa ) ) )
def UpperCAmelCase__ ( self ):
from elasticsearch import Elasticsearch
lowerCamelCase_ = self._create_dummy_dataset()
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
lowerCamelCase_ = {'''acknowledged''': True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCamelCase_ = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 29}]}}
lowerCamelCase_ = Elasticsearch()
dset.add_elasticsearch_index('''filename''' , es_client=UpperCAmelCase )
lowerCamelCase_ , lowerCamelCase_ = dset.get_nearest_examples('''filename''' , '''my_name-train_29''' )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
@require_faiss
class __lowerCamelCase ( lowerCAmelCase ):
def UpperCAmelCase__ ( self ):
import faiss
lowerCamelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowerCamelCase_ = np.zeros(5 , dtype=np.floataa )
lowerCamelCase_ = 1
lowerCamelCase_ , lowerCamelCase_ = index.search(UpperCAmelCase )
self.assertRaises(UpperCAmelCase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowerCamelCase_ = np.eye(5 , dtype=np.floataa )[::-1]
lowerCamelCase_ , lowerCamelCase_ = index.search_batch(UpperCAmelCase )
self.assertRaises(UpperCAmelCase , index.search_batch , queries[0] )
lowerCamelCase_ = [scores[0] for scores in total_scores]
lowerCamelCase_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
import faiss
lowerCamelCase_ = FaissIndex(string_factory='''Flat''' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowerCamelCase_ = FaissIndex(string_factory='''LSH''' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(UpperCAmelCase ):
lowerCamelCase_ = FaissIndex(string_factory='''Flat''' , custom_index=faiss.IndexFlat(5 ) )
def UpperCAmelCase__ ( self ):
import faiss
lowerCamelCase_ = faiss.IndexFlat(5 )
lowerCamelCase_ = FaissIndex(custom_index=UpperCAmelCase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def UpperCAmelCase__ ( self ):
import faiss
lowerCamelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=UpperCAmelCase ) as tmp_file:
index.save(tmp_file.name )
lowerCamelCase_ = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase_ = np.zeros(5 , dtype=np.floataa )
lowerCamelCase_ = 1
lowerCamelCase_ , lowerCamelCase_ = index.search(UpperCAmelCase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def lowercase ( lowerCAmelCase__ ):
import faiss
lowerCamelCase_ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
lowerCamelCase_ = '''index.faiss'''
lowerCamelCase_ = f"mock://{index_name}"
index.save(lowerCAmelCase__ ,storage_options=mockfs.storage_options )
lowerCamelCase_ = FaissIndex.load(lowerCAmelCase__ ,storage_options=mockfs.storage_options )
lowerCamelCase_ = np.zeros(5 ,dtype=np.floataa )
lowerCamelCase_ = 1
lowerCamelCase_ , lowerCamelCase_ = index.search(lowerCAmelCase__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class __lowerCamelCase ( lowerCAmelCase ):
def UpperCAmelCase__ ( self ):
from elasticsearch import Elasticsearch
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
lowerCamelCase_ = Elasticsearch()
lowerCamelCase_ = {'''acknowledged''': True}
lowerCamelCase_ = ElasticSearchIndex(es_client=UpperCAmelCase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['''foo''', '''bar''', '''foobar'''] )
# single query
lowerCamelCase_ = '''foo'''
lowerCamelCase_ = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search(UpperCAmelCase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowerCamelCase_ = '''foo'''
lowerCamelCase_ = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search(UpperCAmelCase , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowerCamelCase_ = ['''foo''', '''bar''', '''foobar''']
lowerCamelCase_ = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search_batch(UpperCAmelCase )
lowerCamelCase_ = [scores[0] for scores in total_scores]
lowerCamelCase_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase ) , 0 )
self.assertListEqual([1, 1, 1] , UpperCAmelCase )
# batched queries with timeout
lowerCamelCase_ = ['''foo''', '''bar''', '''foobar''']
lowerCamelCase_ = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
lowerCamelCase_ , lowerCamelCase_ = index.search_batch(UpperCAmelCase , request_timeout=30 )
lowerCamelCase_ = [scores[0] for scores in total_scores]
lowerCamelCase_ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase ) , 0 )
self.assertListEqual([1, 1, 1] , UpperCAmelCase )
| 29 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_lowercase = '''src/diffusers'''
_lowercase = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
_lowercase = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
_lowercase = spec.loader.load_module()
def UpperCamelCase ( snake_case__ , snake_case__):
return line.startswith(snake_case__) or len(snake_case__) <= 1 or re.search(R"^\s*\)(\s*->.*:|:)\s*$" , snake_case__) is not None
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Tuple = object_name.split(".")
lowerCAmelCase_ : Union[str, Any] = 0
# First let's find the module where our object lives.
lowerCAmelCase_ : Union[str, Any] = parts[i]
while i < len(snake_case__) and not os.path.isfile(os.path.join(snake_case__ , F'''{module}.py''')):
i += 1
if i < len(snake_case__):
lowerCAmelCase_ : Dict = os.path.join(snake_case__ , parts[i])
if i >= len(snake_case__):
raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''')
with open(os.path.join(snake_case__ , F'''{module}.py''') , "r" , encoding="utf-8" , newline="\n") as f:
lowerCAmelCase_ : Optional[Any] = f.readlines()
# Now let's find the class / func in the code!
lowerCAmelCase_ : Union[str, Any] = ""
lowerCAmelCase_ : int = 0
for name in parts[i + 1 :]:
while (
line_index < len(snake_case__) and re.search(RF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index]) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(snake_case__):
raise ValueError(F''' {object_name} does not match any function or class in {module}.''')
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
lowerCAmelCase_ : Union[str, Any] = line_index
while line_index < len(snake_case__) and _should_continue(lines[line_index] , snake_case__):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1]) <= 1:
line_index -= 1
lowerCAmelCase_ : List[str] = lines[start_index:line_index]
return "".join(snake_case__)
_lowercase = re.compile(r'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
_lowercase = re.compile(r'''^\s*(\S+)->(\S+)(\s+.*|$)''')
_lowercase = re.compile(r'''<FILL\s+[^>]*>''')
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Any = code.split("\n")
lowerCAmelCase_ : Any = 0
while idx < len(snake_case__) and len(lines[idx]) == 0:
idx += 1
if idx < len(snake_case__):
return re.search(R"^(\s*)\S" , lines[idx]).groups()[0]
return ""
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Dict = len(get_indent(snake_case__)) > 0
if has_indent:
lowerCAmelCase_ : Dict = F'''class Bla:\n{code}'''
lowerCAmelCase_ : Optional[int] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 , preview=snake_case__)
lowerCAmelCase_ : Optional[Any] = black.format_str(snake_case__ , mode=snake_case__)
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = style_docstrings_in_code(snake_case__)
return result[len("class Bla:\n") :] if has_indent else result
def UpperCamelCase ( snake_case__ , snake_case__=False):
with open(snake_case__ , "r" , encoding="utf-8" , newline="\n") as f:
lowerCAmelCase_ : Tuple = f.readlines()
lowerCAmelCase_ : Tuple = []
lowerCAmelCase_ : Union[str, Any] = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(snake_case__):
lowerCAmelCase_ : Optional[int] = _re_copy_warning.search(lines[line_index])
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = search.groups()
lowerCAmelCase_ : int = find_code_in_diffusers(snake_case__)
lowerCAmelCase_ : Dict = get_indent(snake_case__)
lowerCAmelCase_ : Union[str, Any] = line_index + 1 if indent == theoretical_indent else line_index + 2
lowerCAmelCase_ : str = theoretical_indent
lowerCAmelCase_ : Union[str, Any] = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
lowerCAmelCase_ : Optional[int] = True
while line_index < len(snake_case__) and should_continue:
line_index += 1
if line_index >= len(snake_case__):
break
lowerCAmelCase_ : Dict = lines[line_index]
lowerCAmelCase_ : List[str] = _should_continue(snake_case__ , snake_case__) and re.search(F'''^{indent}# End copy''' , snake_case__) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1]) <= 1:
line_index -= 1
lowerCAmelCase_ : Dict = lines[start_index:line_index]
lowerCAmelCase_ : Optional[int] = "".join(snake_case__)
# Remove any nested `Copied from` comments to avoid circular copies
lowerCAmelCase_ : List[Any] = [line for line in theoretical_code.split("\n") if _re_copy_warning.search(snake_case__) is None]
lowerCAmelCase_ : Optional[Any] = "\n".join(snake_case__)
# Before comparing, use the `replace_pattern` on the original code.
if len(snake_case__) > 0:
lowerCAmelCase_ : List[str] = replace_pattern.replace("with" , "").split(",")
lowerCAmelCase_ : Tuple = [_re_replace_pattern.search(snake_case__) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = pattern.groups()
lowerCAmelCase_ : int = re.sub(snake_case__ , snake_case__ , snake_case__)
if option.strip() == "all-casing":
lowerCAmelCase_ : List[str] = re.sub(obja.lower() , obja.lower() , snake_case__)
lowerCAmelCase_ : int = re.sub(obja.upper() , obja.upper() , snake_case__)
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
lowerCAmelCase_ : List[Any] = blackify(lines[start_index - 1] + theoretical_code)
lowerCAmelCase_ : Union[str, Any] = theoretical_code[len(lines[start_index - 1]) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index])
if overwrite:
lowerCAmelCase_ : List[Any] = lines[:start_index] + [theoretical_code] + lines[line_index:]
lowerCAmelCase_ : Union[str, Any] = start_index + 1
if overwrite and len(snake_case__) > 0:
# Warn the user a file has been modified.
print(F'''Detected changes, rewriting {filename}.''')
with open(snake_case__ , "w" , encoding="utf-8" , newline="\n") as f:
f.writelines(snake_case__)
return diffs
def UpperCamelCase ( snake_case__ = False):
lowerCAmelCase_ : Tuple = glob.glob(os.path.join(snake_case__ , "**/*.py") , recursive=snake_case__)
lowerCAmelCase_ : int = []
for filename in all_files:
lowerCAmelCase_ : Union[str, Any] = is_copy_consistent(snake_case__ , snake_case__)
diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(snake_case__) > 0:
lowerCAmelCase_ : Optional[Any] = "\n".join(snake_case__)
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.")
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
_lowercase = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 659 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
__a = logging.get_logger(__name__)
__a = {
'deepmind/language-perceiver': 'https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = '''perceiver'''
def __init__( self ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=1_280 ,_SCREAMING_SNAKE_CASE=768 ,_SCREAMING_SNAKE_CASE=1 ,_SCREAMING_SNAKE_CASE=26 ,_SCREAMING_SNAKE_CASE=8 ,_SCREAMING_SNAKE_CASE=8 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE="kv" ,_SCREAMING_SNAKE_CASE=1 ,_SCREAMING_SNAKE_CASE=1 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=1e-12 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=262 ,_SCREAMING_SNAKE_CASE=2_048 ,_SCREAMING_SNAKE_CASE=56 ,_SCREAMING_SNAKE_CASE=[368, 496] ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE=1_920 ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE=[1, 16, 224, 224] ,**_SCREAMING_SNAKE_CASE ,) -> int:
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = num_latents
UpperCAmelCase_ : Any = d_latents
UpperCAmelCase_ : List[str] = d_model
UpperCAmelCase_ : int = num_blocks
UpperCAmelCase_ : str = num_self_attends_per_block
UpperCAmelCase_ : Any = num_self_attention_heads
UpperCAmelCase_ : List[str] = num_cross_attention_heads
UpperCAmelCase_ : List[Any] = qk_channels
UpperCAmelCase_ : Union[str, Any] = v_channels
UpperCAmelCase_ : Optional[Any] = cross_attention_shape_for_attention
UpperCAmelCase_ : List[str] = self_attention_widening_factor
UpperCAmelCase_ : Optional[Any] = cross_attention_widening_factor
UpperCAmelCase_ : str = hidden_act
UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Union[str, Any] = layer_norm_eps
UpperCAmelCase_ : int = use_query_residual
# masked language modeling attributes
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : Tuple = max_position_embeddings
# image classification attributes
UpperCAmelCase_ : Tuple = image_size
# flow attributes
UpperCAmelCase_ : int = train_size
# multimodal autoencoding attributes
UpperCAmelCase_ : int = num_frames
UpperCAmelCase_ : Dict = audio_samples_per_frame
UpperCAmelCase_ : Dict = samples_per_patch
UpperCAmelCase_ : str = output_shape
class __a( _a ):
"""simple docstring"""
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCAmelCase_ : Optional[int] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase_ : Tuple = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''inputs''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
@property
def a__ ( self ) -> float:
return 1e-4
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = -1 ,_SCREAMING_SNAKE_CASE = -1 ,_SCREAMING_SNAKE_CASE = -1 ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = 3 ,_SCREAMING_SNAKE_CASE = 40 ,_SCREAMING_SNAKE_CASE = 40 ,) -> Mapping[str, Any]:
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase_ : Dict = compute_effective_axis_dimension(
_SCREAMING_SNAKE_CASE ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase_ : str = preprocessor.num_special_tokens_to_add(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = compute_effective_axis_dimension(
_SCREAMING_SNAKE_CASE ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=_SCREAMING_SNAKE_CASE )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase_ : Union[str, Any] = [''' '''.join(['''a'''] ) * seq_length] * batch_size
UpperCAmelCase_ : Optional[int] = dict(preprocessor(_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : List[str] = inputs.pop('''input_ids''' )
return inputs
elif isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase_ : int = compute_effective_axis_dimension(_SCREAMING_SNAKE_CASE ,fixed_dimension=OnnxConfig.default_fixed_batch )
UpperCAmelCase_ : Optional[int] = self._generate_dummy_images(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = dict(preprocessor(images=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : str = inputs.pop('''pixel_values''' )
return inputs
else:
raise ValueError(
'''Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.''' ) | 30 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''microsoft/swinv2-tiny-patch4-window8-256''': (
'''https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'''
),
}
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = 'swinv2'
UpperCamelCase_ = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : List[Any] ,lowerCAmelCase__ : Optional[int]=2_24 ,lowerCAmelCase__ : Dict=4 ,lowerCAmelCase__ : Dict=3 ,lowerCAmelCase__ : List[Any]=96 ,lowerCAmelCase__ : Optional[Any]=[2, 2, 6, 2] ,lowerCAmelCase__ : Optional[Any]=[3, 6, 12, 24] ,lowerCAmelCase__ : Optional[int]=7 ,lowerCAmelCase__ : Dict=4.0 ,lowerCAmelCase__ : Dict=True ,lowerCAmelCase__ : str=0.0 ,lowerCAmelCase__ : Tuple=0.0 ,lowerCAmelCase__ : str=0.1 ,lowerCAmelCase__ : List[str]="gelu" ,lowerCAmelCase__ : Union[str, Any]=False ,lowerCAmelCase__ : Dict=0.02 ,lowerCAmelCase__ : int=1e-5 ,lowerCAmelCase__ : List[str]=32 ,**lowerCAmelCase__ : Tuple ,) -> List[str]:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = image_size
lowerCAmelCase_ : List[Any] = patch_size
lowerCAmelCase_ : Dict = num_channels
lowerCAmelCase_ : Optional[int] = embed_dim
lowerCAmelCase_ : Optional[Any] = depths
lowerCAmelCase_ : Any = len(lowerCAmelCase__ )
lowerCAmelCase_ : str = num_heads
lowerCAmelCase_ : List[str] = window_size
lowerCAmelCase_ : List[str] = mlp_ratio
lowerCAmelCase_ : Dict = qkv_bias
lowerCAmelCase_ : str = hidden_dropout_prob
lowerCAmelCase_ : str = attention_probs_dropout_prob
lowerCAmelCase_ : Union[str, Any] = drop_path_rate
lowerCAmelCase_ : List[Any] = hidden_act
lowerCAmelCase_ : Any = use_absolute_embeddings
lowerCAmelCase_ : List[str] = layer_norm_eps
lowerCAmelCase_ : int = initializer_range
lowerCAmelCase_ : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase_ : Tuple = int(embed_dim * 2 ** (len(lowerCAmelCase__ ) - 1) )
lowerCAmelCase_ : str = (0, 0, 0, 0)
| 659 | 0 |
import math
import qiskit
def UpperCAmelCase_ ( __UpperCAmelCase : int = 1 , __UpperCAmelCase : int = 1 , __UpperCAmelCase : int = 1 ) -> qiskit.result.counts.Counts:
if (
isinstance(__UpperCAmelCase , __UpperCAmelCase )
or isinstance(__UpperCAmelCase , __UpperCAmelCase )
or isinstance(__UpperCAmelCase , __UpperCAmelCase )
):
raise TypeError('inputs must be integers.' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('inputs must be positive.' )
if (
(math.floor(__UpperCAmelCase ) != input_a)
or (math.floor(__UpperCAmelCase ) != input_a)
or (math.floor(__UpperCAmelCase ) != carry_in)
):
raise ValueError('inputs must be exact integers.' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('inputs must be less or equal to 2.' )
# build registers
SCREAMING_SNAKE_CASE_ = qiskit.QuantumRegister(4 , 'qr' )
SCREAMING_SNAKE_CASE_ = qiskit.ClassicalRegister(2 , 'cr' )
# list the entries
SCREAMING_SNAKE_CASE_ = [input_a, input_a, carry_in]
SCREAMING_SNAKE_CASE_ = qiskit.QuantumCircuit(__UpperCAmelCase , __UpperCAmelCase )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(__UpperCAmelCase ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(__UpperCAmelCase ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(__UpperCAmelCase ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , __UpperCAmelCase ) # measure the last two qbits
SCREAMING_SNAKE_CASE_ = qiskit.Aer.get_backend('aer_simulator' )
SCREAMING_SNAKE_CASE_ = qiskit.execute(__UpperCAmelCase , __UpperCAmelCase , shots=10_00 )
return job.result().get_counts(__UpperCAmelCase )
if __name__ == "__main__":
print(f'''Total sum count for state is: {quantum_full_adder(1, 1, 1)}''') | 31 |
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
_lowercase = logging.get_logger(__name__)
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = ['input_features', 'attention_mask']
def __init__( self : Optional[Any] ,lowerCAmelCase__ : Any=80 ,lowerCAmelCase__ : Optional[Any]=1_60_00 ,lowerCAmelCase__ : List[str]=0.0 ,lowerCAmelCase__ : Tuple=10 ,lowerCAmelCase__ : Optional[Any]=25 ,lowerCAmelCase__ : Any="hamming_window" ,lowerCAmelCase__ : List[str]=32_768.0 ,lowerCAmelCase__ : Union[str, Any]=0.97 ,lowerCAmelCase__ : Any=1.0 ,lowerCAmelCase__ : str=True ,lowerCAmelCase__ : int=True ,lowerCAmelCase__ : Tuple=False ,**lowerCAmelCase__ : Optional[int] ,) -> Optional[Any]:
'''simple docstring'''
super().__init__(feature_size=lowerCAmelCase__ ,sampling_rate=lowerCAmelCase__ ,padding_value=lowerCAmelCase__ ,**lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = feature_size
lowerCAmelCase_ : List[Any] = sampling_rate
lowerCAmelCase_ : Union[str, Any] = padding_value
lowerCAmelCase_ : str = hop_length
lowerCAmelCase_ : str = win_length
lowerCAmelCase_ : str = frame_signal_scale
lowerCAmelCase_ : Any = preemphasis_coeff
lowerCAmelCase_ : Optional[Any] = mel_floor
lowerCAmelCase_ : List[str] = normalize_means
lowerCAmelCase_ : Optional[Any] = normalize_vars
lowerCAmelCase_ : Dict = win_function
lowerCAmelCase_ : List[Any] = return_attention_mask
lowerCAmelCase_ : Tuple = win_length * sampling_rate // 10_00
lowerCAmelCase_ : str = hop_length * sampling_rate // 10_00
lowerCAmelCase_ : Dict = optimal_fft_length(self.sample_size )
lowerCAmelCase_ : Optional[int] = (self.n_fft // 2) + 1
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : np.array ) -> np.ndarray:
'''simple docstring'''
if self.win_function == "hamming_window":
lowerCAmelCase_ : int = window_function(window_length=self.sample_size ,name=self.win_function ,periodic=lowerCAmelCase__ )
else:
lowerCAmelCase_ : Tuple = window_function(window_length=self.sample_size ,name=self.win_function )
lowerCAmelCase_ : List[str] = mel_filter_bank(
num_frequency_bins=self.n_freqs ,num_mel_filters=self.feature_size ,min_frequency=0.0 ,max_frequency=self.sampling_rate / 2.0 ,sampling_rate=self.sampling_rate ,)
lowerCAmelCase_ : Any = spectrogram(
one_waveform * self.frame_signal_scale ,window=lowerCAmelCase__ ,frame_length=self.sample_size ,hop_length=self.sample_stride ,fft_length=self.n_fft ,center=lowerCAmelCase__ ,preemphasis=self.preemphasis_coeff ,mel_filters=lowerCAmelCase__ ,mel_floor=self.mel_floor ,log_mel="log" ,)
return msfc_features.T
def UpperCAmelCase_ ( self : int ,lowerCAmelCase__ : List[Any] ,lowerCAmelCase__ : Optional[Any] ,lowerCAmelCase__ : Tuple ) -> Optional[Any]:
'''simple docstring'''
if self.normalize_means:
lowerCAmelCase_ : Optional[int] = x[:input_length].mean(axis=0 )
lowerCAmelCase_ : List[str] = np.subtract(lowerCAmelCase__ ,lowerCAmelCase__ )
if self.normalize_vars:
lowerCAmelCase_ : Optional[Any] = x[:input_length].std(axis=0 )
lowerCAmelCase_ : Tuple = np.divide(lowerCAmelCase__ ,lowerCAmelCase__ )
if input_length < x.shape[0]:
lowerCAmelCase_ : int = padding_value
# make sure array is in float32
lowerCAmelCase_ : Any = x.astype(np.floataa )
return x
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : List[np.ndarray] ,lowerCAmelCase__ : Optional[np.ndarray] = None ) -> List[np.ndarray]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(lowerCAmelCase__ ,lowerCAmelCase__ ,self.padding_value ) for x, n in zip(lowerCAmelCase__ ,lowerCAmelCase__ )]
def __call__( self : int ,lowerCAmelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,lowerCAmelCase__ : Union[bool, str, PaddingStrategy] = False ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : bool = False ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : Optional[bool] = None ,lowerCAmelCase__ : Optional[Union[str, TensorType]] = None ,lowerCAmelCase__ : Optional[int] = None ,**lowerCAmelCase__ : Union[str, Any] ,) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowerCAmelCase_ : List[Any] = isinstance(lowerCAmelCase__ ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
lowerCAmelCase_ : str = is_batched_numpy or (
isinstance(lowerCAmelCase__ ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase_ : Tuple = [np.asarray(lowerCAmelCase__ ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase__ ,np.ndarray ):
lowerCAmelCase_ : int = np.asarray(lowerCAmelCase__ ,dtype=np.floataa )
elif isinstance(lowerCAmelCase__ ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase_ : Union[str, Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase_ : Optional[int] = [raw_speech]
# extract fbank features
lowerCAmelCase_ : Dict = [self._extract_mfsc_features(lowerCAmelCase__ ) for one_waveform in raw_speech]
# convert into correct format for padding
lowerCAmelCase_ : int = BatchFeature({"input_features": features} )
lowerCAmelCase_ : Union[str, Any] = self.pad(
lowerCAmelCase__ ,padding=lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,truncation=lowerCAmelCase__ ,pad_to_multiple_of=lowerCAmelCase__ ,return_attention_mask=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
# make sure list is in array format
lowerCAmelCase_ : Optional[Any] = padded_inputs.get("input_features" )
if isinstance(input_features[0] ,lowerCAmelCase__ ):
lowerCAmelCase_ : Optional[int] = [np.asarray(lowerCAmelCase__ ,dtype=np.floataa ) for feature in input_features]
lowerCAmelCase_ : List[Any] = padded_inputs.get("attention_mask" )
if attention_mask is not None:
lowerCAmelCase_ : Dict = [np.asarray(lowerCAmelCase__ ,dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
lowerCAmelCase_ : Dict = (
np.array(lowerCAmelCase__ ,dtype=np.intaa )
if self._get_padding_strategies(lowerCAmelCase__ ,max_length=lowerCAmelCase__ ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
lowerCAmelCase_ : List[str] = self.normalize(
padded_inputs["input_features"] ,attention_mask=lowerCAmelCase__ )
if return_tensors is not None:
lowerCAmelCase_ : Dict = padded_inputs.convert_to_tensors(lowerCAmelCase__ )
return padded_inputs
| 659 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json",
}
class __UpperCamelCase ( A__ , A__ ):
__A : Union[str, Any] = """focalnet"""
def __init__( self , _UpperCamelCase=224 , _UpperCamelCase=4 , _UpperCamelCase=3 , _UpperCamelCase=96 , _UpperCamelCase=False , _UpperCamelCase=[192, 384, 768, 768] , _UpperCamelCase=[2, 2, 6, 2] , _UpperCamelCase=[2, 2, 2, 2] , _UpperCamelCase=[3, 3, 3, 3] , _UpperCamelCase="gelu" , _UpperCamelCase=4.0 , _UpperCamelCase=0.0 , _UpperCamelCase=0.1 , _UpperCamelCase=False , _UpperCamelCase=1e-4 , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=0.02 , _UpperCamelCase=1e-5 , _UpperCamelCase=32 , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase , ):
super().__init__(**_UpperCamelCase )
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = use_conv_embed
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = focal_levels
_UpperCAmelCase = focal_windows
_UpperCAmelCase = hidden_act
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = use_layerscale
_UpperCAmelCase = layerscale_value
_UpperCAmelCase = use_post_layernorm
_UpperCAmelCase = use_post_layernorm_in_modulation
_UpperCAmelCase = normalize_modulator
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = encoder_stride
_UpperCAmelCase = ['''stem'''] + [f'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )]
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=_UpperCamelCase , out_indices=_UpperCamelCase , stage_names=self.stage_names ) | 32 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_lowercase = 10
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
for i in range(snake_case__ , snake_case__):
if array[i] == target:
return i
return -1
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : List[str] = 0
lowerCAmelCase_ : Tuple = len(snake_case__)
while left <= right:
if right - left < precision:
return lin_search(snake_case__ , snake_case__ , snake_case__ , snake_case__)
lowerCAmelCase_ : List[str] = (left + right) // 3 + 1
lowerCAmelCase_ : Tuple = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
lowerCAmelCase_ : str = one_third - 1
elif array[two_third] < target:
lowerCAmelCase_ : Any = two_third + 1
else:
lowerCAmelCase_ : List[str] = one_third + 1
lowerCAmelCase_ : Tuple = two_third - 1
else:
return -1
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
if left < right:
if right - left < precision:
return lin_search(snake_case__ , snake_case__ , snake_case__ , snake_case__)
lowerCAmelCase_ : Dict = (left + right) // 3 + 1
lowerCAmelCase_ : List[Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(snake_case__ , one_third - 1 , snake_case__ , snake_case__)
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , snake_case__ , snake_case__ , snake_case__)
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , snake_case__ , snake_case__)
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase = input('''Enter numbers separated by comma:\n''').strip()
_lowercase = [int(item.strip()) for item in user_input.split(''',''')]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
_lowercase = int(input('''Enter the number to be found in the list:\n''').strip())
_lowercase = ite_ternary_search(collection, target)
_lowercase = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f"Iterative search: {target} found at positions: {resulta}")
print(f"Recursive search: {target} found at positions: {resulta}")
else:
print('''Not found''')
| 659 | 0 |
from math import isqrt
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> bool:
return all(number % divisor != 0 for divisor in range(2 , isqrt(__lowerCAmelCase ) + 1 ) )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = 10**6 ) -> int:
snake_case__ = 0
snake_case__ = 1
snake_case__ = 7
while prime_candidate < max_prime:
primes_count += is_prime(__lowerCAmelCase )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 33 |
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
_lowercase = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
_lowercase = {
'''facebook/blenderbot_small-90M''': 512,
}
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = BlenderbotSmallTokenizer
def __init__( self : Optional[int] ,lowerCAmelCase__ : Optional[int]=None ,lowerCAmelCase__ : Union[str, Any]=None ,lowerCAmelCase__ : Any="<|endoftext|>" ,lowerCAmelCase__ : int="<|endoftext|>" ,lowerCAmelCase__ : Optional[Any]="<|endoftext|>" ,lowerCAmelCase__ : Union[str, Any]=False ,lowerCAmelCase__ : Optional[Any]=True ,**lowerCAmelCase__ : Union[str, Any] ,) -> str:
'''simple docstring'''
super().__init__(
ByteLevelBPETokenizer(
vocab=lowerCAmelCase__ ,merges=lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ ,trim_offsets=lowerCAmelCase__ ,) ,bos_token=lowerCAmelCase__ ,eos_token=lowerCAmelCase__ ,unk_token=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
lowerCAmelCase_ : Dict = add_prefix_space
def UpperCAmelCase_ ( self : int ,lowerCAmelCase__ : List[str] ,lowerCAmelCase__ : Tuple=None ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : str = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCAmelCase_ ( self : int ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowerCAmelCase_ : Dict = [self.sep_token_id]
lowerCAmelCase_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 659 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = ['''pixel_values''']
def __init__( self , lowerCamelCase_ = True , lowerCamelCase_ = None , lowerCamelCase_ = PIL.Image.BICUBIC , lowerCamelCase_ = True , lowerCamelCase_ = None , lowerCamelCase_ = 1 / 2_5_5 , lowerCamelCase_ = True , lowerCamelCase_ = True , lowerCamelCase_ = None , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> None:
super().__init__(**lowerCamelCase_)
UpperCamelCase = size if size is not None else {'''height''': 2_5_6, '''width''': 2_5_6}
UpperCamelCase = get_size_dict(lowerCamelCase_)
UpperCamelCase = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
UpperCamelCase = get_size_dict(lowerCamelCase_ , param_name='''crop_size''')
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = resample
UpperCamelCase = do_center_crop
UpperCamelCase = crop_size
UpperCamelCase = do_rescale
UpperCamelCase = rescale_factor
UpperCamelCase = do_normalize
UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = PIL.Image.BICUBIC , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> np.ndarray:
UpperCamelCase = get_size_dict(lowerCamelCase_)
if "height" not in size or "width" not in size:
raise ValueError(F'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}')
return resize(
lowerCamelCase_ , size=(size['''height'''], size['''width''']) , resample=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> np.ndarray:
UpperCamelCase = get_size_dict(lowerCamelCase_)
if "height" not in size or "width" not in size:
raise ValueError(F'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}')
return center_crop(lowerCamelCase_ , size=(size['''height'''], size['''width''']) , data_format=lowerCamelCase_ , **lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> Tuple:
return rescale(lowerCamelCase_ , scale=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> np.ndarray:
return normalize(lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_=None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = ChannelDimension.FIRST , **lowerCamelCase_ , ) -> PIL.Image.Image:
UpperCamelCase = do_resize if do_resize is not None else self.do_resize
UpperCamelCase = resample if resample is not None else self.resample
UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase = image_mean if image_mean is not None else self.image_mean
UpperCamelCase = image_std if image_std is not None else self.image_std
UpperCamelCase = size if size is not None else self.size
UpperCamelCase = get_size_dict(lowerCamelCase_)
UpperCamelCase = crop_size if crop_size is not None else self.crop_size
UpperCamelCase = get_size_dict(lowerCamelCase_ , param_name='''crop_size''')
UpperCamelCase = make_list_of_images(lowerCamelCase_)
if not valid_images(lowerCamelCase_):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''')
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''')
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''')
# All transformations expect numpy arrays.
UpperCamelCase = [to_numpy_array(lowerCamelCase_) for image in images]
if do_resize:
UpperCamelCase = [self.resize(image=lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_) for image in images]
if do_center_crop:
UpperCamelCase = [self.center_crop(image=lowerCamelCase_ , size=lowerCamelCase_) for image in images]
if do_rescale:
UpperCamelCase = [self.rescale(image=lowerCamelCase_ , scale=lowerCamelCase_) for image in images]
if do_normalize:
UpperCamelCase = [self.normalize(image=lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_) for image in images]
UpperCamelCase = [to_channel_dimension_format(lowerCamelCase_ , lowerCamelCase_) for image in images]
UpperCamelCase = {'''pixel_values''': images}
return BatchFeature(data=lowerCamelCase_ , tensor_type=lowerCamelCase_) | 34 |
from collections.abc import Generator
from math import sin
def UpperCamelCase ( snake_case__):
if len(snake_case__) != 32:
raise ValueError("Input must be of length 32")
lowerCAmelCase_ : Tuple = b""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def UpperCamelCase ( snake_case__):
if i < 0:
raise ValueError("Input must be non-negative")
lowerCAmelCase_ : List[str] = format(snake_case__ , "08x")[-8:]
lowerCAmelCase_ : Any = b""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8")
return little_endian_hex
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Union[str, Any] = b""
for char in message:
bit_string += format(snake_case__ , "08b").encode("utf-8")
lowerCAmelCase_ : Optional[int] = format(len(snake_case__) , "064b").encode("utf-8")
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(snake_case__) % 5_12 != 4_48:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:]) + to_little_endian(start_len[:32])
return bit_string
def UpperCamelCase ( snake_case__):
if len(snake_case__) % 5_12 != 0:
raise ValueError("Input must have length that's a multiple of 512")
for pos in range(0 , len(snake_case__) , 5_12):
lowerCAmelCase_ : List[str] = bit_string[pos : pos + 5_12]
lowerCAmelCase_ : Union[str, Any] = []
for i in range(0 , 5_12 , 32):
block_words.append(int(to_little_endian(block[i : i + 32]) , 2))
yield block_words
def UpperCamelCase ( snake_case__):
if i < 0:
raise ValueError("Input must be non-negative")
lowerCAmelCase_ : Dict = format(snake_case__ , "032b")
lowerCAmelCase_ : str = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(snake_case__ , 2)
def UpperCamelCase ( snake_case__ , snake_case__):
return (a + b) % 2**32
def UpperCamelCase ( snake_case__ , snake_case__):
if i < 0:
raise ValueError("Input must be non-negative")
if shift < 0:
raise ValueError("Shift must be non-negative")
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Optional[Any] = preprocess(snake_case__)
lowerCAmelCase_ : Optional[Any] = [int(2**32 * abs(sin(i + 1))) for i in range(64)]
# Starting states
lowerCAmelCase_ : List[str] = 0x67_45_23_01
lowerCAmelCase_ : Union[str, Any] = 0xef_cd_ab_89
lowerCAmelCase_ : List[Any] = 0x98_ba_dc_fe
lowerCAmelCase_ : Tuple = 0x10_32_54_76
lowerCAmelCase_ : Any = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(snake_case__):
lowerCAmelCase_ : Optional[int] = aa
lowerCAmelCase_ : List[str] = ba
lowerCAmelCase_ : Any = ca
lowerCAmelCase_ : Union[str, Any] = da
# Hash current chunk
for i in range(64):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
lowerCAmelCase_ : Any = d ^ (b & (c ^ d))
lowerCAmelCase_ : Dict = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
lowerCAmelCase_ : Any = c ^ (d & (b ^ c))
lowerCAmelCase_ : List[str] = (5 * i + 1) % 16
elif i <= 47:
lowerCAmelCase_ : int = b ^ c ^ d
lowerCAmelCase_ : Optional[Any] = (3 * i + 5) % 16
else:
lowerCAmelCase_ : List[Any] = c ^ (b | not_aa(snake_case__))
lowerCAmelCase_ : List[Any] = (7 * i) % 16
lowerCAmelCase_ : Optional[Any] = (f + a + added_consts[i] + block_words[g]) % 2**32
lowerCAmelCase_ : Optional[Any] = d
lowerCAmelCase_ : Dict = c
lowerCAmelCase_ : List[str] = b
lowerCAmelCase_ : Any = sum_aa(snake_case__ , left_rotate_aa(snake_case__ , shift_amounts[i]))
# Add hashed chunk to running total
lowerCAmelCase_ : Dict = sum_aa(snake_case__ , snake_case__)
lowerCAmelCase_ : str = sum_aa(snake_case__ , snake_case__)
lowerCAmelCase_ : Optional[int] = sum_aa(snake_case__ , snake_case__)
lowerCAmelCase_ : int = sum_aa(snake_case__ , snake_case__)
lowerCAmelCase_ : Union[str, Any] = reformat_hex(snake_case__) + reformat_hex(snake_case__) + reformat_hex(snake_case__) + reformat_hex(snake_case__)
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 659 | 0 |
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def a ( A__ , A__ , A__ , A__=None , A__=None , A__=None , A__=None , A__=None , ) -> Dict:
'''simple docstring'''
if attention_mask is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE__ : Dict = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
SCREAMING_SNAKE_CASE__ : Dict = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=A__ )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE__ : List[Any] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=A__ )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE__ : List[str] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=A__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class lowercase :
def __init__( self : Tuple , _lowercase : List[str] , _lowercase : Union[str, Any]=13 , _lowercase : Optional[Any]=7 , _lowercase : int=True , _lowercase : str=False , _lowercase : Tuple=99 , _lowercase : Dict=16 , _lowercase : List[str]=2 , _lowercase : Optional[int]=4 , _lowercase : Any=4 , _lowercase : Any="relu" , _lowercase : List[str]=0.1 , _lowercase : Any=0.1 , _lowercase : List[str]=0.0 , _lowercase : Tuple=0.0 , _lowercase : Optional[int]=20 , _lowercase : Any=2 , _lowercase : Union[str, Any]=1 , _lowercase : int=0 , ):
SCREAMING_SNAKE_CASE__ : List[Any] = parent
SCREAMING_SNAKE_CASE__ : List[Any] = batch_size
SCREAMING_SNAKE_CASE__ : List[str] = seq_length
SCREAMING_SNAKE_CASE__ : Optional[Any] = is_training
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE__ : Dict = vocab_size
SCREAMING_SNAKE_CASE__ : List[str] = hidden_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] = encoder_layerdrop
SCREAMING_SNAKE_CASE__ : Tuple = decoder_layerdrop
SCREAMING_SNAKE_CASE__ : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : int = eos_token_id
SCREAMING_SNAKE_CASE__ : List[Any] = pad_token_id
SCREAMING_SNAKE_CASE__ : Any = bos_token_id
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Any = self.eos_token_id # Eos Token
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
SCREAMING_SNAKE_CASE__ : List[str] = input_ids.clamp(self.pad_token_id + 1 )
SCREAMING_SNAKE_CASE__ : List[str] = decoder_input_ids.clamp(self.pad_token_id + 1 )
SCREAMING_SNAKE_CASE__ : Any = self.get_config()
SCREAMING_SNAKE_CASE__ : Tuple = prepare_mam_aaa_inputs_dict(_lowercase , _lowercase , _lowercase )
return config, inputs_dict
def lowercase__ ( self : Optional[int] ):
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase__ ( self : Tuple , _lowercase : str , _lowercase : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Tuple = MaMaaaModel(config=_lowercase ).get_decoder().to(_lowercase ).eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = inputs_dict['''input_ids''']
SCREAMING_SNAKE_CASE__ : Optional[int] = inputs_dict['''attention_mask''']
SCREAMING_SNAKE_CASE__ : Tuple = inputs_dict['''head_mask''']
# first forward pass
SCREAMING_SNAKE_CASE__ : Tuple = model(_lowercase , attention_mask=_lowercase , head_mask=_lowercase , use_cache=_lowercase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ : Any = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
SCREAMING_SNAKE_CASE__ : Any = model(_lowercase , attention_mask=_lowercase )['''last_hidden_state''']
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_lowercase , attention_mask=_lowercase , past_key_values=_lowercase )[
'''last_hidden_state'''
]
# select random slice
SCREAMING_SNAKE_CASE__ : str = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1E-2 ) )
def lowercase__ ( self : Any , _lowercase : Dict , _lowercase : str ):
SCREAMING_SNAKE_CASE__ : List[Any] = MaMaaaModel(config=_lowercase ).to(_lowercase ).eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(**_lowercase )
SCREAMING_SNAKE_CASE__ : str = outputs.encoder_last_hidden_state
SCREAMING_SNAKE_CASE__ : List[str] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Any = model.get_encoder()
encoder.save_pretrained(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = MaMaaaEncoder.from_pretrained(_lowercase ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = encoder(inputs_dict['''input_ids'''] , attention_mask=inputs_dict['''attention_mask'''] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : int = model.get_decoder()
decoder.save_pretrained(_lowercase )
SCREAMING_SNAKE_CASE__ : int = MaMaaaDecoder.from_pretrained(_lowercase ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : int = decoder(
input_ids=inputs_dict['''decoder_input_ids'''] , attention_mask=inputs_dict['''decoder_attention_mask'''] , encoder_hidden_states=_lowercase , encoder_attention_mask=inputs_dict['''attention_mask'''] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : List[str] = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
lowerCamelCase : str = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
lowerCamelCase : Dict = (
{
'''conversational''': MaMaaaForConditionalGeneration,
'''feature-extraction''': MaMaaaModel,
'''summarization''': MaMaaaForConditionalGeneration,
'''text2text-generation''': MaMaaaForConditionalGeneration,
'''translation''': MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
lowerCamelCase : List[str] = True
lowerCamelCase : Tuple = True
lowerCamelCase : Any = False
lowerCamelCase : int = False
def lowercase__ ( self : List[str] , _lowercase : Tuple , _lowercase : Any , _lowercase : Tuple , _lowercase : str , _lowercase : List[Any] ):
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : List[Any] = MaMaaaModelTester(self )
SCREAMING_SNAKE_CASE__ : Tuple = ConfigTester(self , config_class=_lowercase )
def lowercase__ ( self : List[Any] ):
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : str = model_class(_lowercase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowercase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = model_class.from_pretrained(_lowercase , output_loading_info=_lowercase )
self.assertEqual(info['''missing_keys'''] , [] )
def lowercase__ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*_lowercase )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*_lowercase )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
SCREAMING_SNAKE_CASE__ : Dict = model_class(_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = copy.deepcopy(self._prepare_for_class(_lowercase , _lowercase ) )
if not self.is_encoder_decoder:
SCREAMING_SNAKE_CASE__ : int = inputs['''input_ids''']
del inputs["input_ids"]
else:
SCREAMING_SNAKE_CASE__ : int = inputs['''input_ids''']
SCREAMING_SNAKE_CASE__ : Optional[Any] = inputs.get('''decoder_input_ids''' , _lowercase )
del inputs["input_ids"]
inputs.pop('''decoder_input_ids''' , _lowercase )
SCREAMING_SNAKE_CASE__ : str = model.get_input_embeddings()
if not self.is_encoder_decoder:
SCREAMING_SNAKE_CASE__ : List[str] = wte(_lowercase )
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = wte(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = wte(_lowercase )
with torch.no_grad():
model(**_lowercase )[0]
def lowercase__ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE__ : List[Any] = input_ids.ne(1 ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = MaMaaaForConditionalGeneration(_lowercase ).eval().to(_lowercase )
if torch_device == "cuda":
model.half()
model.generate(_lowercase , attention_mask=_lowercase )
model.generate(num_beams=4 , do_sample=_lowercase , early_stopping=_lowercase , num_return_sequences=3 )
def a ( A__ ) -> List[str]:
'''simple docstring'''
return torch.tensor(A__ , dtype=torch.long , device=A__ )
a_ :int = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class lowercase ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : List[Any] ):
return MaMaaaTokenizer.from_pretrained('''facebook/m2m100_418M''' )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : List[str] = MaMaaaModel.from_pretrained('''facebook/m2m100_418M''' ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : int = _long_tensor([[12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38, 2]] )
SCREAMING_SNAKE_CASE__ : Tuple = _long_tensor([[2, 12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38]] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_mam_aaa_inputs_dict(model.config , _lowercase , _lowercase )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(**_lowercase )[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.Size((1, 11, 10_24) )
self.assertEqual(output.shape , _lowercase )
# change to expected output here
SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor(
[[-0.7780, -0.1676, 0.1038], [-6.7556, -1.3992, 0.0567], [-7.5383, -0.5920, -0.2779]] , device=_lowercase )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowercase , atol=_lowercase ) )
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : int = MaMaaaForConditionalGeneration.from_pretrained('''facebook/m2m100_418M''' ).to(_lowercase )
# change to intended input
SCREAMING_SNAKE_CASE__ : Any = _long_tensor([[12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38, 2]] )
SCREAMING_SNAKE_CASE__ : List[str] = _long_tensor([[2, 12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38]] )
SCREAMING_SNAKE_CASE__ : Optional[int] = prepare_mam_aaa_inputs_dict(model.config , _lowercase , _lowercase )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(**_lowercase )[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , _lowercase )
# change to expected output here
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor(
[[-1.0448, -1.0411, 3.7992], [-3.2191, -3.2386, -1.3451], [-3.6210, -3.5993, 0.4925]] , device=_lowercase )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowercase , atol=_lowercase ) )
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : int = MaMaaaForConditionalGeneration.from_pretrained('''facebook/m2m100_418M''' ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = MaMaaaTokenizer.from_pretrained('''facebook/m2m100_418M''' , src_lang='''fr''' , tgt_lang='''en''' )
SCREAMING_SNAKE_CASE__ : int = [
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent'''
''' Fabius convoque l\'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de'''
''' l\'ampleur de la surveillance américaine sur l\'ensemble des communications en France.''',
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer(_lowercase , padding=_lowercase , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ : Tuple = model.generate(
input_ids=dct['''input_ids'''].to(_lowercase ) , attention_mask=dct['''attention_mask'''].to(_lowercase ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id('''en''' ) , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
'''The NSA case highlights the total absence of intelligence debate''',
'''I think there are two levels of response from the French government.''',
'''When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S.'''
''' Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all'''
''' communications in France.''',
]
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=_lowercase , skip_special_tokens=_lowercase )
assert generated == expected_en
| 35 |
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('''1.6'''):
_lowercase = True
from torch.cuda.amp import autocast
_lowercase = logging.getLogger(__name__)
@dataclass
class __snake_case :
"""simple docstring"""
UpperCamelCase_ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
UpperCamelCase_ = field(
default=snake_case__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
UpperCamelCase_ = field(
default=snake_case__ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
UpperCamelCase_ = field(
default=snake_case__ , metadata={'help': 'Whether to log verbose messages or not.'} , )
UpperCamelCase_ = field(
default=2.0 , metadata={'help': 'Maximum temperature for gumbel softmax.'} )
UpperCamelCase_ = field(
default=0.5 , metadata={'help': 'Minimum temperature for gumbel softmax.'} )
UpperCamelCase_ = field(
default=0.99_99_95 , metadata={'help': 'Decay of gumbel temperature during training.'} )
def UpperCamelCase ( snake_case__ , snake_case__):
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout)] , )
lowerCAmelCase_ : str = logging.WARNING
if model_args.verbose_logging:
lowerCAmelCase_ : int = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank):
lowerCAmelCase_ : Any = logging.INFO
logger.setLevel(snake_case__)
@dataclass
class __snake_case :
"""simple docstring"""
UpperCamelCase_ = field(
default=snake_case__ , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
UpperCamelCase_ = field(
default=snake_case__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
UpperCamelCase_ = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
UpperCamelCase_ = field(
default='validation' , metadata={
'help': (
'The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
UpperCamelCase_ = field(
default='file' , metadata={'help': 'Column in the dataset that contains speech file path. Defaults to \'file\''} , )
UpperCamelCase_ = field(
default=snake_case__ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
UpperCamelCase_ = field(
default=1 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
UpperCamelCase_ = field(
default=snake_case__ , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
UpperCamelCase_ = field(
default=20.0 , metadata={'help': 'Filter audio files that are longer than `max_duration_in_seconds` seconds'} )
@dataclass
class __snake_case :
"""simple docstring"""
UpperCamelCase_ = 42
UpperCamelCase_ = 42
UpperCamelCase_ = "longest"
UpperCamelCase_ = None
UpperCamelCase_ = None
def __call__( self : str ,lowerCAmelCase__ : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
lowerCAmelCase_ : Tuple = self.feature_extractor.pad(
lowerCAmelCase__ ,max_length=self.max_length ,padding=self.padding ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors="pt" ,)
lowerCAmelCase_ : Union[str, Any] = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1] )
lowerCAmelCase_ : List[str] = batch["input_values"].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
lowerCAmelCase_ : Tuple = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1 ) ).to(
torch.long )
lowerCAmelCase_ : Optional[Any] = torch.zeros(
(batch_size, mask_indices_seq_length) ,dtype=torch.long ,device=batch["input_values"].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
lowerCAmelCase_ : Tuple = 1
lowerCAmelCase_ : int = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
lowerCAmelCase_ : str = _compute_mask_indices(
(batch_size, mask_indices_seq_length) ,self.model.config.mask_time_prob ,self.model.config.mask_time_length ,attention_mask=lowerCAmelCase__ ,min_masks=2 ,)
return batch
class __snake_case ( snake_case__ ):
"""simple docstring"""
def __init__( self : List[str] ,*lowerCAmelCase__ : Optional[int] ,lowerCAmelCase__ : Tuple=1 ,lowerCAmelCase__ : Optional[int]=0 ,lowerCAmelCase__ : Optional[Any]=1.0 ,**lowerCAmelCase__ : Any ) -> str:
'''simple docstring'''
super().__init__(*lowerCAmelCase__ ,**lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : int = max_gumbel_temp
lowerCAmelCase_ : Union[str, Any] = min_gumbel_temp
lowerCAmelCase_ : str = gumbel_temp_decay
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : nn.Module ,lowerCAmelCase__ : Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor:
'''simple docstring'''
model.train()
lowerCAmelCase_ : str = self._prepare_inputs(lowerCAmelCase__ )
if self.use_amp:
with autocast():
lowerCAmelCase_ : List[Any] = self.compute_loss(lowerCAmelCase__ ,lowerCAmelCase__ )
else:
lowerCAmelCase_ : List[Any] = self.compute_loss(lowerCAmelCase__ ,lowerCAmelCase__ )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
lowerCAmelCase_ : List[Any] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
lowerCAmelCase_ : Optional[Any] = loss.sum() / (inputs["mask_time_indices"]).sum()
else:
raise ValueError(f'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
lowerCAmelCase_ : int = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(lowerCAmelCase__ ).backward()
elif self.use_apex:
with amp.scale_loss(lowerCAmelCase__ ,self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(lowerCAmelCase__ )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step ,self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step ,self.min_gumbel_temp ) )
return loss.detach()
def UpperCamelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCAmelCase_ : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Dict = parser.parse_args_into_dataclasses()
configure_logger(snake_case__ , snake_case__)
# Downloading and loading a dataset from the hub.
lowerCAmelCase_ : List[str] = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir)
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
lowerCAmelCase_ : Any = DatasetDict()
lowerCAmelCase_ : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[:{data_args.validation_split_percentage}%]''' , cache_dir=model_args.cache_dir , )
lowerCAmelCase_ : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[{data_args.validation_split_percentage}%:]''' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
lowerCAmelCase_ : Union[str, Any] = DatasetDict()
lowerCAmelCase_ : int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , )
lowerCAmelCase_ : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}''' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
lowerCAmelCase_ : Dict = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=snake_case__)
def prepare_dataset(snake_case__):
# check that all files have the correct sampling rate
lowerCAmelCase_ , lowerCAmelCase_ : str = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate)
return batch
# load audio files into numpy arrays
lowerCAmelCase_ : int = datasets.map(
snake_case__ , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names)
# filter audio files that are too long
lowerCAmelCase_ : int = vectorized_datasets.filter(
lambda snake_case__: len(data["speech"]) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate))
def normalize(snake_case__):
return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate)
# normalize and transform to `BatchFeatures`
lowerCAmelCase_ : str = vectorized_datasets.map(
snake_case__ , batched=snake_case__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
lowerCAmelCase_ : Optional[Any] = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm='layer'")
lowerCAmelCase_ : Dict = WavaVecaForPreTraining(snake_case__)
lowerCAmelCase_ : int = DataCollatorForWavaVecaPretraining(model=snake_case__ , feature_extractor=snake_case__)
lowerCAmelCase_ : List[Any] = WavaVecaPreTrainer(
model=snake_case__ , data_collator=snake_case__ , args=snake_case__ , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=snake_case__ , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 659 | 0 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
__lowercase : Dict = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--original_config_file''',
type=str,
required=True,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--image_size''',
default=512,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
def lowercase ( __A : Tuple ) -> Union[str, Any]:
'''simple docstring'''
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f"""could not parse string as bool {string}""" )
parser.add_argument(
'''--use_linear_projection''', help='''Override for use linear projection''', required=False, type=parse_bool
)
parser.add_argument('''--cross_attention_dim''', help='''Override for cross attention_dim''', required=False, type=int)
__lowercase : Union[str, Any] = parser.parse_args()
__lowercase : Optional[int] = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 36 |
from __future__ import annotations
from collections.abc import Callable
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = 1_00 , ):
lowerCAmelCase_ : Any = x_start
lowerCAmelCase_ : Optional[Any] = fnc(snake_case__)
lowerCAmelCase_ : Union[str, Any] = 0.0
for _ in range(snake_case__):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
lowerCAmelCase_ : Any = (x_end - x_start) / steps + xa
lowerCAmelCase_ : Dict = fnc(snake_case__)
area += abs(fxa + fxa) * (xa - xa) / 2
# Increment step
lowerCAmelCase_ : int = xa
lowerCAmelCase_ : str = fxa
return area
if __name__ == "__main__":
def UpperCamelCase ( snake_case__):
return x**3 + x**2
print('''f(x) = x^3 + x^2''')
print('''The area between the curve, x = -5, x = 5 and the x axis is:''')
_lowercase = 10
while i <= 100000:
print(f"with {i} steps: {trapezoidal_area(f, -5, 5, i)}")
i *= 10
| 659 | 0 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
UpperCamelCase : Tuple = """true"""
def UpperCamelCase_ ( __a , __a=82 , __a=16 ) -> List[Any]:
set_seed(42 )
a__ : Any = RegressionModel()
a__ : Optional[Any] = deepcopy(__a )
a__ : Dict = RegressionDataset(length=__a )
a__ : List[str] = DataLoader(__a , batch_size=__a )
model.to(accelerator.device )
a__, a__ : int = accelerator.prepare(__a , __a )
return model, ddp_model, dataloader
def UpperCamelCase_ ( __a , __a=False ) -> str:
a__ : Dict = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" )
a__ : str = load_dataset("glue" , "mrpc" , split="validation" )
def tokenize_function(__a ):
a__ : List[str] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__a , max_length=__a )
return outputs
with accelerator.main_process_first():
a__ : int = dataset.map(
__a , batched=__a , remove_columns=["idx", "sentence1", "sentence2"] , )
a__ : Optional[Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__a ):
if use_longest:
return tokenizer.pad(__a , padding="longest" , return_tensors="pt" )
return tokenizer.pad(__a , padding="max_length" , max_length=128 , return_tensors="pt" )
return DataLoader(__a , shuffle=__a , collate_fn=__a , batch_size=16 )
def UpperCamelCase_ ( __a , __a ) -> List[Any]:
a__ : List[Any] = Accelerator(dispatch_batches=__a , split_batches=__a )
a__ : Any = get_dataloader(__a , not dispatch_batches )
a__ : Any = AutoModelForSequenceClassification.from_pretrained(
"hf-internal-testing/mrpc-bert-base-cased" , return_dict=__a )
a__, a__ : Tuple = accelerator.prepare(__a , __a )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def UpperCamelCase_ ( __a , __a , __a ) -> str:
a__ : List[str] = []
for batch in dataloader:
a__, a__ : Tuple = batch.values()
with torch.no_grad():
a__ : Any = model(__a )
a__, a__ : Union[str, Any] = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
a__, a__ : int = [], []
for logit, targ in logits_and_targets:
logits.append(__a )
targs.append(__a )
a__, a__ : List[str] = torch.cat(__a ), torch.cat(__a )
return logits, targs
def UpperCamelCase_ ( __a , __a=82 , __a=False , __a=False , __a=16 ) -> Optional[int]:
a__, a__, a__ : Optional[Any] = get_basic_setup(__a , __a , __a )
a__, a__ : int = generate_predictions(__a , __a , __a )
assert (
len(__a ) == num_samples
), f'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__a )}'''
def UpperCamelCase_ ( __a = False , __a = False ) -> Optional[Any]:
a__ : Any = evaluate.load("glue" , "mrpc" )
a__, a__ : List[Any] = get_mrpc_setup(__a , __a )
# First do baseline
a__, a__, a__ : Any = setup["no"]
model.to(__a )
model.eval()
for batch in dataloader:
batch.to(__a )
with torch.inference_mode():
a__ : str = model(**__a )
a__ : List[str] = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=__a , references=batch["labels"] )
a__ : Optional[Any] = metric.compute()
# Then do distributed
a__, a__, a__ : Any = setup["ddp"]
model.eval()
for batch in dataloader:
with torch.inference_mode():
a__ : Optional[Any] = model(**__a )
a__ : Any = outputs.logits.argmax(dim=-1 )
a__ : str = batch["labels"]
a__, a__ : Optional[int] = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=__a , references=__a )
a__ : int = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def UpperCamelCase_ ( ) -> Dict:
a__ : int = Accelerator(split_batches=__a , dispatch_batches=__a )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("**Testing gather_for_metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(__a , __a )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test torch metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
a__ : Dict = Accelerator(split_batches=__a , dispatch_batches=__a )
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(__a , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test last batch is not dropped when perfectly divisible**" )
a__ : Optional[Any] = Accelerator()
test_torch_metrics(__a , 512 )
accelerator.state._reset_state()
def UpperCamelCase_ ( __a ) -> Dict:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 37 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = StableDiffusionLDMaDPipeline
UpperCamelCase_ = TEXT_TO_IMAGE_PARAMS
UpperCamelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase_ ( self : Tuple ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase_ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") ,up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") ,cross_attention_dim=32 ,)
lowerCAmelCase_ : Any = DDIMScheduler(
beta_start=0.00_085 ,beta_end=0.012 ,beta_schedule="scaled_linear" ,clip_sample=lowerCAmelCase__ ,set_alpha_to_one=lowerCAmelCase__ ,)
torch.manual_seed(0 )
lowerCAmelCase_ : str = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=6 ,out_channels=6 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,)
torch.manual_seed(0 )
lowerCAmelCase_ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,)
lowerCAmelCase_ : Optional[int] = CLIPTextModel(lowerCAmelCase__ )
lowerCAmelCase_ : Dict = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCAmelCase_ : List[Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : List[Any] ,lowerCAmelCase__ : List[str]=0 ) -> Dict:
'''simple docstring'''
if str(lowerCAmelCase__ ).startswith("mps" ):
lowerCAmelCase_ : Optional[int] = torch.manual_seed(lowerCAmelCase__ )
else:
lowerCAmelCase_ : Dict = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
lowerCAmelCase_ : str = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ : List[str] = self.get_dummy_components()
lowerCAmelCase_ : Union[str, Any] = StableDiffusionLDMaDPipeline(**lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = ldmad_pipe.to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : Any = self.get_dummy_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = ldmad_pipe(**lowerCAmelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ : Any = output.rgb, output.depth
lowerCAmelCase_ : Dict = rgb[0, -3:, -3:, -1]
lowerCAmelCase_ : Tuple = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
lowerCAmelCase_ : Optional[Any] = np.array(
[0.37_338_176, 0.70_247, 0.74_203_193, 0.51_643_604, 0.58_256_793, 0.60_932_136, 0.4_181_095, 0.48_355_877, 0.46_535_262] )
lowerCAmelCase_ : Tuple = np.array([103.46_727, 85.812_004, 87.849_236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Dict = self.get_dummy_components()
lowerCAmelCase_ : List[str] = StableDiffusionLDMaDPipeline(**lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = ldmad_pipe.to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : str = 3 * [inputs["prompt"]]
# forward
lowerCAmelCase_ : Union[str, Any] = ldmad_pipe(**lowerCAmelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = output.rgb, output.depth
lowerCAmelCase_ : str = rgb_slice_a[0, -3:, -3:, -1]
lowerCAmelCase_ : List[str] = depth_slice_a[0, -3:, -1]
lowerCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = 3 * [inputs.pop("prompt" )]
lowerCAmelCase_ : str = ldmad_pipe.tokenizer(
lowerCAmelCase__ ,padding="max_length" ,max_length=ldmad_pipe.tokenizer.model_max_length ,truncation=lowerCAmelCase__ ,return_tensors="pt" ,)
lowerCAmelCase_ : Union[str, Any] = text_inputs["input_ids"].to(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = ldmad_pipe.text_encoder(lowerCAmelCase__ )[0]
lowerCAmelCase_ : Optional[int] = prompt_embeds
# forward
lowerCAmelCase_ : str = ldmad_pipe(**lowerCAmelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ : str = output.rgb, output.depth
lowerCAmelCase_ : Optional[Any] = rgb_slice_a[0, -3:, -3:, -1]
lowerCAmelCase_ : Tuple = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Any = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ : Optional[int] = self.get_dummy_components()
lowerCAmelCase_ : Dict = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = StableDiffusionLDMaDPipeline(**lowerCAmelCase__ )
lowerCAmelCase_ : Any = ldmad_pipe.to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = self.get_dummy_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = "french fries"
lowerCAmelCase_ : Optional[int] = ldmad_pipe(**lowerCAmelCase__ ,negative_prompt=lowerCAmelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = output.rgb, output.depth
lowerCAmelCase_ : Any = rgb[0, -3:, -3:, -1]
lowerCAmelCase_ : Tuple = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
lowerCAmelCase_ : int = np.array(
[0.37_044, 0.71_811_503, 0.7_223_251, 0.48_603_675, 0.5_638_391, 0.6_364_948, 0.42_833_704, 0.4_901_315, 0.47_926_217] )
lowerCAmelCase_ : Union[str, Any] = np.array([107.84_738, 84.62_802, 89.962_135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : Tuple ,lowerCAmelCase__ : Dict="cpu" ,lowerCAmelCase__ : Union[str, Any]=torch.floataa ,lowerCAmelCase__ : List[str]=0 ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Any = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = np.random.RandomState(lowerCAmelCase__ ).standard_normal((1, 4, 64, 64) )
lowerCAmelCase_ : Optional[Any] = torch.from_numpy(lowerCAmelCase__ ).to(device=lowerCAmelCase__ ,dtype=lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase_ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" )
lowerCAmelCase_ : List[str] = ldmad_pipe.to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : Dict = self.get_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = ldmad_pipe(**lowerCAmelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ : Dict = output.rgb, output.depth
lowerCAmelCase_ : List[str] = rgb[0, -3:, -3:, -1].flatten()
lowerCAmelCase_ : Optional[int] = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 5_12, 5_12, 3)
assert depth.shape == (1, 5_12, 5_12)
lowerCAmelCase_ : int = np.array(
[0.53_805_465, 0.56_707_305, 0.5_486_515, 0.57_012_236, 0.5_814_511, 0.56_253_487, 0.54_843_014, 0.55_092_263, 0.6_459_706] )
lowerCAmelCase_ : Optional[Any] = np.array(
[0.9_263_781, 0.6_678_672, 0.5_486_515, 0.92_202_145, 0.67_831_135, 0.56_253_487, 0.9_241_694, 0.7_551_478, 0.6_459_706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3
@nightly
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : Tuple ,lowerCAmelCase__ : Dict="cpu" ,lowerCAmelCase__ : List[str]=torch.floataa ,lowerCAmelCase__ : Optional[int]=0 ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Dict = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = np.random.RandomState(lowerCAmelCase__ ).standard_normal((1, 4, 64, 64) )
lowerCAmelCase_ : Any = torch.from_numpy(lowerCAmelCase__ ).to(device=lowerCAmelCase__ ,dtype=lowerCAmelCase__ )
lowerCAmelCase_ : int = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 50,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase_ ( self : Dict ) -> int:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" ).to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = self.get_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = ldmad_pipe(**lowerCAmelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ : Any = output.rgb, output.depth
lowerCAmelCase_ : Dict = 0.495_586
lowerCAmelCase_ : Optional[Any] = 0.33_795_515
lowerCAmelCase_ : Any = 112.48_518
lowerCAmelCase_ : List[Any] = 98.489_746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : int = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d-4c" ).to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : str = self.get_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = ldmad_pipe(**lowerCAmelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = output.rgb, output.depth
lowerCAmelCase_ : List[str] = 0.4_194_127
lowerCAmelCase_ : List[str] = 0.35_375_586
lowerCAmelCase_ : str = 0.5_638_502
lowerCAmelCase_ : Optional[Any] = 0.34_686_103
assert rgb.shape == (1, 5_12, 5_12, 3)
assert depth.shape == (1, 5_12, 5_12, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
| 659 | 0 |
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
A_ : Dict = logging.get_logger(__name__)
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = ['''audio_values''', '''audio_mask''']
def __init__( self , __SCREAMING_SNAKE_CASE=2_0_4_8 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=[1_6, 1_6] , __SCREAMING_SNAKE_CASE=1_2_8 , __SCREAMING_SNAKE_CASE=4_4_1_0_0 , __SCREAMING_SNAKE_CASE=8_6 , __SCREAMING_SNAKE_CASE=2_0_4_8 , __SCREAMING_SNAKE_CASE=0.0 , **__SCREAMING_SNAKE_CASE , ):
super().__init__(
feature_size=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , padding_value=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
snake_case__ : Tuple = spectrogram_length
snake_case__ : List[Any] = num_channels
snake_case__ : List[str] = patch_size
snake_case__ : Dict = feature_size // self.patch_size[1]
snake_case__ : Optional[Any] = n_fft
snake_case__ : Optional[int] = sampling_rate // hop_length_to_sampling_rate
snake_case__ : Optional[int] = sampling_rate
snake_case__ : str = padding_value
snake_case__ : Tuple = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__SCREAMING_SNAKE_CASE , min_frequency=0.0 , max_frequency=2_2050.0 , sampling_rate=__SCREAMING_SNAKE_CASE , norm="""slaney""" , mel_scale="""slaney""" , ).T
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = spectrogram(
__SCREAMING_SNAKE_CASE , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , )
snake_case__ : List[str] = log_spec[:, :-1]
snake_case__ : Optional[Any] = log_spec - 20.0
snake_case__ : List[str] = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False , **__SCREAMING_SNAKE_CASE , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"""This feature extractor is set to support sampling rate"""
f" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"
f" with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
snake_case__ : List[str] = isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
snake_case__ : int = is_batched_numpy or (
isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
snake_case__ : Union[str, Any] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ):
snake_case__ : Optional[int] = np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
snake_case__ : int = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
snake_case__ : Union[str, Any] = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
snake_case__ : str = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , __SCREAMING_SNAKE_CASE ):
snake_case__ : List[Any] = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
snake_case__ : Union[str, Any] = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
snake_case__ : int = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
snake_case__ : List[str] = np.array(__SCREAMING_SNAKE_CASE ).astype(np.floataa )
# convert into correct format for padding
snake_case__ : Tuple = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
snake_case__ : Dict = np.ones([len(__SCREAMING_SNAKE_CASE ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
snake_case__ : int = padded_audio_features * self.padding_value
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
snake_case__ : str = audio_features[i]
snake_case__ : Tuple = feature
# return as BatchFeature
if return_attention_mask:
snake_case__ : int = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask}
else:
snake_case__ : Any = {"""audio_values""": padded_audio_features}
snake_case__ : Union[str, Any] = BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE )
return encoded_inputs
| 38 |
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
_lowercase = {
'''iou_prediction_head.layers.0''': '''iou_prediction_head.proj_in''',
'''iou_prediction_head.layers.1''': '''iou_prediction_head.layers.0''',
'''iou_prediction_head.layers.2''': '''iou_prediction_head.proj_out''',
'''mask_decoder.output_upscaling.0''': '''mask_decoder.upscale_conv1''',
'''mask_decoder.output_upscaling.1''': '''mask_decoder.upscale_layer_norm''',
'''mask_decoder.output_upscaling.3''': '''mask_decoder.upscale_conv2''',
'''mask_downscaling.0''': '''mask_embed.conv1''',
'''mask_downscaling.1''': '''mask_embed.layer_norm1''',
'''mask_downscaling.3''': '''mask_embed.conv2''',
'''mask_downscaling.4''': '''mask_embed.layer_norm2''',
'''mask_downscaling.6''': '''mask_embed.conv3''',
'''point_embeddings''': '''point_embed''',
'''pe_layer.positional_encoding_gaussian_matrix''': '''shared_embedding.positional_embedding''',
'''image_encoder''': '''vision_encoder''',
'''neck.0''': '''neck.conv1''',
'''neck.1''': '''neck.layer_norm1''',
'''neck.2''': '''neck.conv2''',
'''neck.3''': '''neck.layer_norm2''',
'''patch_embed.proj''': '''patch_embed.projection''',
'''.norm''': '''.layer_norm''',
'''blocks''': '''layers''',
}
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : int = {}
state_dict.pop("pixel_mean" , snake_case__)
state_dict.pop("pixel_std" , snake_case__)
lowerCAmelCase_ : List[Any] = R".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
lowerCAmelCase_ : Dict = key.replace(snake_case__ , snake_case__)
if re.match(snake_case__ , snake_case__):
lowerCAmelCase_ : Any = int(re.match(snake_case__ , snake_case__).group(2))
if layer_nb == 0:
lowerCAmelCase_ : List[Any] = key.replace("layers.0" , "proj_in")
elif layer_nb == 1:
lowerCAmelCase_ : List[Any] = key.replace("layers.1" , "layers.0")
elif layer_nb == 2:
lowerCAmelCase_ : int = key.replace("layers.2" , "proj_out")
lowerCAmelCase_ : int = value
lowerCAmelCase_ : Optional[int] = model_state_dict[
"prompt_encoder.shared_embedding.positional_embedding"
]
return model_state_dict
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__="ybelkada/segment-anything"):
lowerCAmelCase_ : Optional[int] = hf_hub_download(snake_case__ , F'''checkpoints/{model_name}.pth''')
if "sam_vit_b" in model_name:
lowerCAmelCase_ : Optional[Any] = SamConfig()
elif "sam_vit_l" in model_name:
lowerCAmelCase_ : Optional[int] = SamVisionConfig(
hidden_size=10_24 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
lowerCAmelCase_ : Union[str, Any] = SamConfig(
vision_config=snake_case__ , )
elif "sam_vit_h" in model_name:
lowerCAmelCase_ : Optional[Any] = SamVisionConfig(
hidden_size=12_80 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
lowerCAmelCase_ : Tuple = SamConfig(
vision_config=snake_case__ , )
lowerCAmelCase_ : Optional[Any] = torch.load(snake_case__ , map_location="cpu")
lowerCAmelCase_ : Union[str, Any] = replace_keys(snake_case__)
lowerCAmelCase_ : List[Any] = SamImageProcessor()
lowerCAmelCase_ : Any = SamProcessor(image_processor=snake_case__)
lowerCAmelCase_ : Any = SamModel(snake_case__)
hf_model.load_state_dict(snake_case__)
lowerCAmelCase_ : Dict = hf_model.to("cuda")
lowerCAmelCase_ : List[str] = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
lowerCAmelCase_ : List[Any] = Image.open(requests.get(snake_case__ , stream=snake_case__).raw).convert("RGB")
lowerCAmelCase_ : Optional[int] = [[[4_00, 6_50]]]
lowerCAmelCase_ : int = [[1]]
lowerCAmelCase_ : Optional[Any] = processor(images=np.array(snake_case__) , return_tensors="pt").to("cuda")
with torch.no_grad():
lowerCAmelCase_ : Optional[Any] = hf_model(**snake_case__)
lowerCAmelCase_ : Optional[int] = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_890_251_159_668
lowerCAmelCase_ : Any = processor(
images=np.array(snake_case__) , input_points=snake_case__ , input_labels=snake_case__ , return_tensors="pt").to("cuda")
with torch.no_grad():
lowerCAmelCase_ : Optional[Any] = hf_model(**snake_case__)
lowerCAmelCase_ : Union[str, Any] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_712_603_092_193_604
lowerCAmelCase_ : Tuple = ((75, 2_75, 17_25, 8_50),)
lowerCAmelCase_ : Optional[Any] = processor(images=np.array(snake_case__) , input_boxes=snake_case__ , return_tensors="pt").to("cuda")
with torch.no_grad():
lowerCAmelCase_ : List[Any] = hf_model(**snake_case__)
lowerCAmelCase_ : str = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8_686_015_605_926_514
# Test with 2 points and 1 image.
lowerCAmelCase_ : int = [[[4_00, 6_50], [8_00, 6_50]]]
lowerCAmelCase_ : Optional[Any] = [[1, 1]]
lowerCAmelCase_ : List[Any] = processor(
images=np.array(snake_case__) , input_points=snake_case__ , input_labels=snake_case__ , return_tensors="pt").to("cuda")
with torch.no_grad():
lowerCAmelCase_ : Tuple = hf_model(**snake_case__)
lowerCAmelCase_ : str = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_936_047_792_434_692
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
_lowercase = ['''sam_vit_b_01ec64''', '''sam_vit_h_4b8939''', '''sam_vit_l_0b3195''']
parser.add_argument(
'''--model_name''',
default='''sam_vit_h_4b8939''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
parser.add_argument(
'''--model_hub_id''',
default='''ybelkada/segment-anything''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
_lowercase = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 659 | 0 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
lowerCAmelCase_ = '''pytorch_model.bin'''
lowerCAmelCase_ = '''pytorch_model.bin.index.json'''
lowerCAmelCase_ = '''adapter_config.json'''
lowerCAmelCase_ = '''adapter_model.bin'''
lowerCAmelCase_ = '''adapter_model.safetensors'''
lowerCAmelCase_ = '''tf_model.h5'''
lowerCAmelCase_ = '''tf_model.h5.index.json'''
lowerCAmelCase_ = '''model.ckpt'''
lowerCAmelCase_ = '''flax_model.msgpack'''
lowerCAmelCase_ = '''flax_model.msgpack.index.json'''
lowerCAmelCase_ = '''model.safetensors'''
lowerCAmelCase_ = '''model.safetensors.index.json'''
lowerCAmelCase_ = '''config.json'''
lowerCAmelCase_ = '''preprocessor_config.json'''
lowerCAmelCase_ = FEATURE_EXTRACTOR_NAME
lowerCAmelCase_ = '''generation_config.json'''
lowerCAmelCase_ = '''modelcard.json'''
lowerCAmelCase_ = '''▁'''
lowerCAmelCase_ = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
lowerCAmelCase_ = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
lowerCAmelCase_ = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
lowerCAmelCase_ = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if version.parse(SCREAMING_SNAKE_CASE__ ) < version.parse(SCREAMING_SNAKE_CASE__ ):
if "dev" in min_version:
snake_case_ = (
'''This example requires a source install from HuggingFace Transformers (see '''
'''`https://huggingface.co/docs/transformers/installation#install-from-source`),'''
)
else:
snake_case_ = F'''This example requires a minimum version of {min_version},'''
error_message += F''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ '''Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other '''
'''versions of HuggingFace Transformers.''' ) | 39 |
class __snake_case :
"""simple docstring"""
def __init__( self : Union[str, Any] ,lowerCAmelCase__ : str = "" ,lowerCAmelCase__ : bool = False ) -> None:
'''simple docstring'''
lowerCAmelCase_ : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
lowerCAmelCase_ : Optional[int] = is_leaf
lowerCAmelCase_ : List[str] = prefix
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : str ) -> tuple[str, str, str]:
'''simple docstring'''
lowerCAmelCase_ : List[str] = 0
for q, w in zip(self.prefix ,lowerCAmelCase__ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def UpperCAmelCase_ ( self : Optional[Any] ,lowerCAmelCase__ : list[str] ) -> None:
'''simple docstring'''
for word in words:
self.insert(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : str ) -> None:
'''simple docstring'''
if self.prefix == word:
lowerCAmelCase_ : Optional[Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowerCAmelCase_ : Optional[int] = RadixNode(prefix=lowerCAmelCase__ ,is_leaf=lowerCAmelCase__ )
else:
lowerCAmelCase_ : Optional[Any] = self.nodes[word[0]]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Any = incoming_node.match(
lowerCAmelCase__ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(lowerCAmelCase__ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowerCAmelCase_ : Dict = remaining_prefix
lowerCAmelCase_ : str = self.nodes[matching_string[0]]
lowerCAmelCase_ : Dict = RadixNode(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Any = aux_node
if remaining_word == "":
lowerCAmelCase_ : Optional[Any] = True
else:
self.nodes[matching_string[0]].insert(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ,lowerCAmelCase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ : List[str] = self.nodes.get(word[0] ,lowerCAmelCase__ )
if not incoming_node:
return False
else:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = incoming_node.match(
lowerCAmelCase__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ : int = self.nodes.get(word[0] ,lowerCAmelCase__ )
if not incoming_node:
return False
else:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = incoming_node.match(
lowerCAmelCase__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(lowerCAmelCase__ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
lowerCAmelCase_ : int = list(self.nodes.values() )[0]
lowerCAmelCase_ : List[Any] = merging_node.is_leaf
self.prefix += merging_node.prefix
lowerCAmelCase_ : int = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
lowerCAmelCase_ : List[str] = False
# If there is 1 edge, we merge it with its child
else:
lowerCAmelCase_ : Union[str, Any] = list(incoming_node.nodes.values() )[0]
lowerCAmelCase_ : Optional[int] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowerCAmelCase_ : List[str] = merging_node.nodes
return True
def UpperCAmelCase_ ( self : int ,lowerCAmelCase__ : int = 0 ) -> None:
'''simple docstring'''
if self.prefix != "":
print("-" * height ,self.prefix ," (leaf)" if self.is_leaf else "" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def UpperCamelCase ( ):
lowerCAmelCase_ : List[Any] = "banana bananas bandana band apple all beast".split()
lowerCAmelCase_ : Optional[Any] = RadixNode()
root.insert_many(snake_case__)
assert all(root.find(snake_case__) for word in words)
assert not root.find("bandanas")
assert not root.find("apps")
root.delete("all")
assert not root.find("all")
root.delete("banana")
assert not root.find("banana")
assert root.find("bananas")
return True
def UpperCamelCase ( ):
assert test_trie()
def UpperCamelCase ( ):
lowerCAmelCase_ : str = RadixNode()
lowerCAmelCase_ : str = "banana bananas bandanas bandana band apple all beast".split()
root.insert_many(snake_case__)
print("Words:" , snake_case__)
print("Tree:")
root.print_tree()
if __name__ == "__main__":
main()
| 659 | 0 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
__UpperCAmelCase = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 40 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case :
"""simple docstring"""
def __init__( self : Tuple ,lowerCAmelCase__ : List[str] ,lowerCAmelCase__ : Optional[Any]=12 ,lowerCAmelCase__ : Union[str, Any]=7 ,lowerCAmelCase__ : Union[str, Any]=True ,lowerCAmelCase__ : List[str]=True ,lowerCAmelCase__ : Any=True ,lowerCAmelCase__ : Optional[Any]=99 ,lowerCAmelCase__ : List[str]=32 ,lowerCAmelCase__ : Dict=32 ,lowerCAmelCase__ : str=2 ,lowerCAmelCase__ : Optional[int]=4 ,lowerCAmelCase__ : str=37 ,lowerCAmelCase__ : Dict=0.1 ,lowerCAmelCase__ : List[str]=0.1 ,lowerCAmelCase__ : str=5_12 ,lowerCAmelCase__ : Union[str, Any]=0.02 ,lowerCAmelCase__ : Tuple=0 ,lowerCAmelCase__ : str=None ,) -> str:
'''simple docstring'''
lowerCAmelCase_ : int = parent
lowerCAmelCase_ : str = batch_size
lowerCAmelCase_ : int = seq_length
lowerCAmelCase_ : Union[str, Any] = is_training
lowerCAmelCase_ : int = use_input_mask
lowerCAmelCase_ : List[Any] = use_labels
lowerCAmelCase_ : Dict = vocab_size
lowerCAmelCase_ : Union[str, Any] = hidden_size
lowerCAmelCase_ : Union[str, Any] = projection_dim
lowerCAmelCase_ : List[Any] = num_hidden_layers
lowerCAmelCase_ : Any = num_attention_heads
lowerCAmelCase_ : List[Any] = intermediate_size
lowerCAmelCase_ : Any = dropout
lowerCAmelCase_ : Optional[int] = attention_dropout
lowerCAmelCase_ : int = max_position_embeddings
lowerCAmelCase_ : Optional[int] = initializer_range
lowerCAmelCase_ : Any = scope
lowerCAmelCase_ : Tuple = bos_token_id
def UpperCAmelCase_ ( self : str ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowerCAmelCase_ : Dict = None
if self.use_input_mask:
lowerCAmelCase_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
lowerCAmelCase_ : List[Any] = input_mask.numpy()
lowerCAmelCase_ , lowerCAmelCase_ : str = input_mask.shape
lowerCAmelCase_ : Dict = np.random.randint(1 ,seq_length - 1 ,size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCAmelCase__ ):
lowerCAmelCase_ : Union[str, Any] = 1
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : List[Any] = self.get_config()
return config, input_ids, tf.convert_to_tensor(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : List[str] ) -> str:
'''simple docstring'''
return BlipTextConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,projection_dim=self.projection_dim ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,dropout=self.dropout ,attention_dropout=self.attention_dropout ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,bos_token_id=self.bos_token_id ,)
def UpperCAmelCase_ ( self : Optional[Any] ,lowerCAmelCase__ : str ,lowerCAmelCase__ : Any ,lowerCAmelCase__ : Dict ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = TFBlipTextModel(config=lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = model(lowerCAmelCase__ ,attention_mask=lowerCAmelCase__ ,training=lowerCAmelCase__ )
lowerCAmelCase_ : str = model(lowerCAmelCase__ ,training=lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
'''simple docstring'''
lowerCAmelCase_ : List[str] = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Dict = config_and_inputs
lowerCAmelCase_ : Tuple = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class __snake_case ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (TFBlipTextModel,) if is_tf_available() else ()
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
def UpperCAmelCase_ ( self : Optional[Any] ) -> str:
'''simple docstring'''
lowerCAmelCase_ : List[str] = BlipTextModelTester(self )
lowerCAmelCase_ : Tuple = ConfigTester(self ,config_class=lowerCAmelCase__ ,hidden_size=37 )
def UpperCAmelCase_ ( self : str ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason="Blip does not use inputs_embeds" )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def UpperCAmelCase_ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
pass
@slow
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Tuple = TFBlipTextModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : str=True ) -> List[Any]:
'''simple docstring'''
super().test_pt_tf_model_equivalence(allow_missing_keys=lowerCAmelCase__ )
| 659 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''google/bit-50''': '''https://huggingface.co/google/bit-50/resolve/main/config.json''',
}
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = 'bit'
SCREAMING_SNAKE_CASE : str = ['preactivation', 'bottleneck']
SCREAMING_SNAKE_CASE : List[Any] = ['SAME', 'VALID']
def __init__( self : int ,lowercase__ : Optional[int]=3 ,lowercase__ : List[str]=6_4 ,lowercase__ : Union[str, Any]=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] ,lowercase__ : str=[3, 4, 6, 3] ,lowercase__ : Optional[Any]="preactivation" ,lowercase__ : int="relu" ,lowercase__ : Optional[int]=None ,lowercase__ : List[str]=3_2 ,lowercase__ : Optional[int]=0.0 ,lowercase__ : int=False ,lowercase__ : Union[str, Any]=3_2 ,lowercase__ : str=1 ,lowercase__ : Tuple=None ,lowercase__ : Tuple=None ,**lowercase__ : Tuple ,):
super().__init__(**lowercase__ )
if layer_type not in self.layer_types:
raise ValueError(F"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
__lowercase = global_padding.upper()
else:
raise ValueError(F"Padding strategy {global_padding} not supported" )
__lowercase = num_channels
__lowercase = embedding_size
__lowercase = hidden_sizes
__lowercase = depths
__lowercase = layer_type
__lowercase = hidden_act
__lowercase = global_padding
__lowercase = num_groups
__lowercase = drop_path_rate
__lowercase = embedding_dynamic_padding
__lowercase = output_stride
__lowercase = width_factor
__lowercase = ['''stem'''] + [F"stage{idx}" for idx in range(1 ,len(lowercase__ ) + 1 )]
__lowercase , __lowercase = get_aligned_output_features_output_indices(
out_features=lowercase__ ,out_indices=lowercase__ ,stage_names=self.stage_names )
| 41 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
_lowercase = logging.get_logger(__name__)
_lowercase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
_lowercase = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
_lowercase = {
'''allenai/led-base-16384''': 16384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def UpperCamelCase ( ):
lowerCAmelCase_ : Optional[int] = (
list(range(ord("!") , ord("~") + 1)) + list(range(ord("¡") , ord("¬") + 1)) + list(range(ord("®") , ord("ÿ") + 1))
)
lowerCAmelCase_ : List[Any] = bs[:]
lowerCAmelCase_ : Optional[int] = 0
for b in range(2**8):
if b not in bs:
bs.append(snake_case__)
cs.append(2**8 + n)
n += 1
lowerCAmelCase_ : Tuple = [chr(snake_case__) for n in cs]
return dict(zip(snake_case__ , snake_case__))
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : str = set()
lowerCAmelCase_ : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
lowerCAmelCase_ : Union[str, Any] = char
return pairs
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ['input_ids', 'attention_mask']
def __init__( self : int ,lowerCAmelCase__ : Tuple ,lowerCAmelCase__ : Any ,lowerCAmelCase__ : Tuple="replace" ,lowerCAmelCase__ : Optional[int]="<s>" ,lowerCAmelCase__ : Optional[int]="</s>" ,lowerCAmelCase__ : Tuple="</s>" ,lowerCAmelCase__ : int="<s>" ,lowerCAmelCase__ : Union[str, Any]="<unk>" ,lowerCAmelCase__ : str="<pad>" ,lowerCAmelCase__ : Tuple="<mask>" ,lowerCAmelCase__ : Optional[int]=False ,**lowerCAmelCase__ : Tuple ,) -> Any:
'''simple docstring'''
lowerCAmelCase_ : int = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else bos_token
lowerCAmelCase_ : int = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else eos_token
lowerCAmelCase_ : int = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else sep_token
lowerCAmelCase_ : Any = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else cls_token
lowerCAmelCase_ : Tuple = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else unk_token
lowerCAmelCase_ : Any = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ : Optional[int] = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else mask_token
super().__init__(
errors=lowerCAmelCase__ ,bos_token=lowerCAmelCase__ ,eos_token=lowerCAmelCase__ ,unk_token=lowerCAmelCase__ ,sep_token=lowerCAmelCase__ ,cls_token=lowerCAmelCase__ ,pad_token=lowerCAmelCase__ ,mask_token=lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
with open(lowerCAmelCase__ ,encoding="utf-8" ) as vocab_handle:
lowerCAmelCase_ : List[str] = json.load(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = {v: k for k, v in self.encoder.items()}
lowerCAmelCase_ : Optional[int] = errors # how to handle errors in decoding
lowerCAmelCase_ : Optional[int] = bytes_to_unicode()
lowerCAmelCase_ : str = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ ,encoding="utf-8" ) as merges_handle:
lowerCAmelCase_ : List[str] = merges_handle.read().split("\n" )[1:-1]
lowerCAmelCase_ : List[Any] = [tuple(merge.split() ) for merge in bpe_merges]
lowerCAmelCase_ : Union[str, Any] = dict(zip(lowerCAmelCase__ ,range(len(lowerCAmelCase__ ) ) ) )
lowerCAmelCase_ : Dict = {}
lowerCAmelCase_ : List[str] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCAmelCase_ : Any = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def UpperCAmelCase_ ( self : Dict ) -> Dict:
'''simple docstring'''
return len(self.encoder )
def UpperCAmelCase_ ( self : Dict ) -> str:
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : Dict ) -> Dict:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowerCAmelCase_ : Union[str, Any] = tuple(lowerCAmelCase__ )
lowerCAmelCase_ : str = get_pairs(lowerCAmelCase__ )
if not pairs:
return token
while True:
lowerCAmelCase_ : Optional[int] = min(lowerCAmelCase__ ,key=lambda lowerCAmelCase__ : self.bpe_ranks.get(lowerCAmelCase__ ,float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = bigram
lowerCAmelCase_ : Tuple = []
lowerCAmelCase_ : str = 0
while i < len(lowerCAmelCase__ ):
try:
lowerCAmelCase_ : Union[str, Any] = word.index(lowerCAmelCase__ ,lowerCAmelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase_ : List[str] = j
if word[i] == first and i < len(lowerCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase_ : Optional[int] = tuple(lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = new_word
if len(lowerCAmelCase__ ) == 1:
break
else:
lowerCAmelCase_ : Dict = get_pairs(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = " ".join(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = word
return word
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : Dict ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ : Any = []
for token in re.findall(self.pat ,lowerCAmelCase__ ):
lowerCAmelCase_ : Optional[int] = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__ ).split(" " ) )
return bpe_tokens
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
return self.encoder.get(lowerCAmelCase__ ,self.encoder.get(self.unk_token ) )
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return self.decoder.get(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : List[Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : int = "".join(lowerCAmelCase__ )
lowerCAmelCase_ : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" ,errors=self.errors )
return text
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : str ,lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase_ : Optional[int] = os.path.join(
lowerCAmelCase__ ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ : List[str] = os.path.join(
lowerCAmelCase__ ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(lowerCAmelCase__ ,"w" ,encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=lowerCAmelCase__ ,ensure_ascii=lowerCAmelCase__ ) + "\n" )
lowerCAmelCase_ : Dict = 0
with open(lowerCAmelCase__ ,"w" ,encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda lowerCAmelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
lowerCAmelCase_ : List[Any] = token_index
writer.write(" ".join(lowerCAmelCase__ ) + "\n" )
index += 1
return vocab_file, merge_file
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase_ : Union[str, Any] = [self.cls_token_id]
lowerCAmelCase_ : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ,lowerCAmelCase__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ ,token_ids_a=lowerCAmelCase__ ,already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = [self.sep_token_id]
lowerCAmelCase_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : Union[str, Any] ,lowerCAmelCase__ : Optional[int]=False ,**lowerCAmelCase__ : str ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = kwargs.pop("add_prefix_space" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__ ) > 0 and not text[0].isspace()):
lowerCAmelCase_ : List[str] = " " + text
return (text, kwargs)
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : Union[Dict[str, EncodedInput], BatchEncoding] ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : Optional[bool] = None ,) -> dict:
'''simple docstring'''
lowerCAmelCase_ : int = super()._pad(
encoded_inputs=lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding_strategy=lowerCAmelCase__ ,pad_to_multiple_of=lowerCAmelCase__ ,return_attention_mask=lowerCAmelCase__ ,)
# Load from model defaults
if return_attention_mask is None:
lowerCAmelCase_ : List[Any] = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowerCAmelCase_ : Dict = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowerCAmelCase_ : List[Any] = len(encoded_inputs["global_attention_mask"] ) != len(lowerCAmelCase__ )
if needs_to_be_padded:
lowerCAmelCase_ : Union[str, Any] = len(lowerCAmelCase__ ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowerCAmelCase_ : Optional[int] = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
lowerCAmelCase_ : List[Any] = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 659 | 0 |
'''simple docstring'''
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def _UpperCamelCase ( *__UpperCamelCase ) -> Dict:
with open(__UpperCamelCase ,'r' ) as fh:
fcntl.flock(__UpperCamelCase ,fcntl.LOCK_EX )
try:
print(*__UpperCamelCase )
finally:
fcntl.flock(__UpperCamelCase ,fcntl.LOCK_UN )
A_ = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
A_ = torch.device("cuda", local_rank)
A_ = socket.gethostname()
A_ = f'''[{hostname}-{local_rank}]'''
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
A_ = dist.get_rank()
A_ = dist.get_world_size()
printflock(f'''{gpu} is OK (global rank: {rank}/{world_size})''')
dist.barrier()
if rank == 0:
printflock(f'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''')
except Exception:
printflock(f'''{gpu} is broken''')
raise
| 42 |
import os
_lowercase = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1000}
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : List[str] = 0
lowerCAmelCase_ : Any = 0
while index < len(snake_case__) - 1:
lowerCAmelCase_ : Optional[Any] = SYMBOLS[numerals[index]]
lowerCAmelCase_ : int = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Optional[int] = ""
lowerCAmelCase_ : Tuple = num // 10_00
numerals += m_count * "M"
num %= 10_00
lowerCAmelCase_ : int = num // 1_00
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_00
lowerCAmelCase_ : int = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def UpperCamelCase ( snake_case__ = "/p089_roman.txt"):
lowerCAmelCase_ : int = 0
with open(os.path.dirname(snake_case__) + roman_numerals_filename) as filea:
lowerCAmelCase_ : List[Any] = filea.readlines()
for line in lines:
lowerCAmelCase_ : Any = line.strip()
lowerCAmelCase_ : Tuple = parse_roman_numerals(snake_case__)
lowerCAmelCase_ : List[Any] = generate_roman_numerals(snake_case__)
savings += len(snake_case__) - len(snake_case__)
return savings
if __name__ == "__main__":
print(f"{solution() = }")
| 659 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: Any ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self: int ) -> Any:
"""simple docstring"""
lowercase__ = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
lowercase__ = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
sd_pipe.set_scheduler('''sample_euler''' )
lowercase__ = '''A painting of a squirrel eating a burger'''
lowercase__ = torch.manual_seed(0 )
lowercase__ = sd_pipe([prompt] , generator=UpperCamelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self: Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
lowercase__ = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
sd_pipe.set_scheduler('''sample_euler''' )
lowercase__ = '''A painting of a squirrel eating a burger'''
lowercase__ = torch.manual_seed(0 )
lowercase__ = sd_pipe([prompt] , generator=UpperCamelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def lowerCamelCase_ ( self: List[str] ) -> List[Any]:
"""simple docstring"""
lowercase__ = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
lowercase__ = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
sd_pipe.set_scheduler('''sample_dpmpp_2m''' )
lowercase__ = '''A painting of a squirrel eating a burger'''
lowercase__ = torch.manual_seed(0 )
lowercase__ = sd_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=UpperCamelCase_ , )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array(
[0.11381689, 0.12112921, 0.1389457, 0.12549606, 0.1244964, 0.10831517, 0.11562866, 0.10867816, 0.10499048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 43 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def UpperCamelCase ( ):
lowerCAmelCase_ : Dict = HfArgumentParser(snake_case__)
lowerCAmelCase_ : Dict = parser.parse_args_into_dataclasses()[0]
lowerCAmelCase_ : List[Any] = TensorFlowBenchmark(args=snake_case__)
try:
lowerCAmelCase_ : str = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowerCAmelCase_ : Optional[Any] = "Arg --no_{0} is no longer used, please use --no-{0} instead."
lowerCAmelCase_ : Tuple = " ".join(str(snake_case__).split(" ")[:-1])
lowerCAmelCase_ : List[Any] = ""
lowerCAmelCase_ : Optional[Any] = eval(str(snake_case__).split(" ")[-1])
lowerCAmelCase_ : List[Any] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:])
else:
wrong_args.append(snake_case__)
if len(snake_case__) > 0:
lowerCAmelCase_ : int = full_error_msg + begin_error_msg + str(snake_case__)
raise ValueError(snake_case__)
benchmark.run()
if __name__ == "__main__":
main()
| 659 | 0 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Tuple = emb.weight.shape
_lowerCamelCase : List[str] = nn.Linear(_lowerCAmelCase , _lowerCAmelCase , bias=_lowerCAmelCase )
_lowerCamelCase : int = emb.weight.data
return lin_layer
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = torch.load(_lowerCAmelCase , map_location="cpu" )
_lowerCamelCase : List[Any] = mam_aaa["args"] or mam_aaa["cfg"]["model"]
_lowerCamelCase : Optional[Any] = mam_aaa["model"]
remove_ignore_keys_(_lowerCAmelCase )
_lowerCamelCase : int = state_dict["encoder.embed_tokens.weight"].shape[0]
_lowerCamelCase : str = MaMaaaConfig(
vocab_size=_lowerCAmelCase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , )
_lowerCamelCase : Optional[int] = state_dict["decoder.embed_tokens.weight"]
_lowerCamelCase : List[Any] = MaMaaaForConditionalGeneration(_lowerCAmelCase )
model.model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
_lowerCamelCase : Optional[int] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
UpperCAmelCase_ : Optional[Any] = parser.parse_args()
UpperCAmelCase_ : List[str] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path) | 44 |
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
_lowercase = [
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'''
''' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'''
''' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.''',
'''The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'''
''' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'''
''' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'''
''' body.''',
'''Amnesty International releases its annual report on the death penalty. The report catalogs the use of'''
''' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'''
''' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'''
''' punishment.''',
]
_lowercase = [
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'''
''' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'''
''' had informed his Lufthansa training school of an episode of severe depression, airline says .''',
'''Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'''
''' Israel and the United States opposed the move, which could open the door to war crimes investigations against'''
''' Israelis .''',
'''Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'''
''' death . Organization claims that governments around the world are using the threat of terrorism to advance'''
''' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'''
''' sentences up by 28% .''',
]
def UpperCamelCase ( ):
lowerCAmelCase_ : Any = calculate_rouge(snake_case__ , snake_case__ , bootstrap_aggregation=snake_case__ , rouge_keys=["rouge2", "rougeL"])
assert isinstance(snake_case__ , snake_case__)
lowerCAmelCase_ : str = calculate_rouge(snake_case__ , snake_case__ , bootstrap_aggregation=snake_case__ , rouge_keys=["rouge2"])
assert (
pd.DataFrame(no_aggregation["rouge2"]).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["rouge2"]).fmeasure.mean()
)
def UpperCamelCase ( ):
lowerCAmelCase_ : str = "rougeLsum"
lowerCAmelCase_ : Any = calculate_rouge(snake_case__ , snake_case__ , newline_sep=snake_case__ , rouge_keys=[k])[k]
lowerCAmelCase_ : List[Any] = calculate_rouge(snake_case__ , snake_case__ , newline_sep=snake_case__ , rouge_keys=[k])[k]
assert score > score_no_sep
def UpperCamelCase ( ):
lowerCAmelCase_ : int = ["rouge1", "rouge2", "rougeL"]
lowerCAmelCase_ : List[Any] = calculate_rouge(snake_case__ , snake_case__ , newline_sep=snake_case__ , rouge_keys=snake_case__)
lowerCAmelCase_ : List[Any] = calculate_rouge(snake_case__ , snake_case__ , newline_sep=snake_case__ , rouge_keys=snake_case__)
assert score_sep == score_no_sep
def UpperCamelCase ( ):
lowerCAmelCase_ : List[str] = [
"Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.",
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .",
]
lowerCAmelCase_ : Dict = [
"Margot Frank, died in 1945, a month earlier than previously thought.",
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"
" the final seconds on board Flight 9525.",
]
assert calculate_rouge(snake_case__ , snake_case__ , newline_sep=snake_case__) == calculate_rouge(snake_case__ , snake_case__ , newline_sep=snake_case__)
def UpperCamelCase ( ):
lowerCAmelCase_ : Optional[int] = [
"\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" "
]
lowerCAmelCase_ : Any = [
" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."
]
lowerCAmelCase_ : Any = calculate_rouge(snake_case__ , snake_case__ , rouge_keys=["rougeLsum"] , newline_sep=snake_case__)["rougeLsum"]
lowerCAmelCase_ : Any = calculate_rouge(snake_case__ , snake_case__ , rouge_keys=["rougeLsum"])["rougeLsum"]
assert new_score > prev_score
def UpperCamelCase ( ):
lowerCAmelCase_ : int = Path("examples/seq2seq/test_data/wmt_en_ro")
lowerCAmelCase_ : Dict = calculate_rouge_path(data_dir.joinpath("test.source") , data_dir.joinpath("test.target"))
assert isinstance(snake_case__ , snake_case__)
lowerCAmelCase_ : Any = calculate_rouge_path(
data_dir.joinpath("test.source") , data_dir.joinpath("test.target") , bootstrap_aggregation=snake_case__)
assert isinstance(snake_case__ , snake_case__)
| 659 | 0 |
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def A ( lowercase__ : Optional[int] , lowercase__ : Any ) -> Any:
assert isinstance(lowercase__ , lowercase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def A ( lowercase__ : Any , lowercase__ : Union[str, Any] , lowercase__ : List[str] ) -> Tuple:
UpperCamelCase__ :List[str] = tmp_path / """cache"""
UpperCamelCase__ :Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase__ :int = JsonDatasetReader(lowercase__ , cache_dir=lowercase__ , keep_in_memory=lowercase__ ).read()
_check_json_dataset(lowercase__ , lowercase__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def A ( lowercase__ : str , lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] ) -> Tuple:
UpperCamelCase__ :Tuple = tmp_path / """cache"""
UpperCamelCase__ :Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
UpperCamelCase__ :Union[str, Any] = features.copy() if features else default_expected_features
UpperCamelCase__ :Union[str, Any] = (
Features({feature: Value(lowercase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :Any = JsonDatasetReader(lowercase__ , features=lowercase__ , cache_dir=lowercase__ ).read()
_check_json_dataset(lowercase__ , lowercase__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""},
] , )
def A ( lowercase__ : Tuple , lowercase__ : Union[str, Any] , lowercase__ : Optional[Any] ) -> List[str]:
UpperCamelCase__ :Union[str, Any] = tmp_path / """cache"""
UpperCamelCase__ :List[str] = {"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""}
UpperCamelCase__ :Optional[int] = features.copy() if features else default_expected_features
UpperCamelCase__ :Dict = (
Features({feature: Value(lowercase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :str = JsonDatasetReader(lowercase__ , features=lowercase__ , cache_dir=lowercase__ ).read()
assert isinstance(lowercase__ , lowercase__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def A ( lowercase__ : Optional[Any] , lowercase__ : int ) -> Dict:
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
UpperCamelCase__ :int = {"""col_2""": """int64""", """col_3""": """float64""", """col_1""": """string"""}
UpperCamelCase__ :Tuple = features.copy()
UpperCamelCase__ :Union[str, Any] = (
Features({feature: Value(lowercase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :Any = tmp_path / """cache"""
UpperCamelCase__ :Any = JsonDatasetReader(lowercase__ , features=lowercase__ , cache_dir=lowercase__ ).read()
assert isinstance(lowercase__ , lowercase__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def A ( lowercase__ : str , lowercase__ : Any , lowercase__ : Optional[Any] ) -> Any:
UpperCamelCase__ :Tuple = tmp_path / """cache"""
UpperCamelCase__ :Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
UpperCamelCase__ :Any = JsonDatasetReader(lowercase__ , cache_dir=lowercase__ , split=lowercase__ ).read()
_check_json_dataset(lowercase__ , lowercase__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def A ( lowercase__ : List[str] , lowercase__ : List[Any] , lowercase__ : Union[str, Any] ) -> Tuple:
if issubclass(lowercase__ , lowercase__ ):
UpperCamelCase__ :Any = jsonl_path
elif issubclass(lowercase__ , lowercase__ ):
UpperCamelCase__ :List[str] = [jsonl_path]
UpperCamelCase__ :Optional[int] = tmp_path / """cache"""
UpperCamelCase__ :str = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
UpperCamelCase__ :List[Any] = JsonDatasetReader(lowercase__ , cache_dir=lowercase__ ).read()
_check_json_dataset(lowercase__ , lowercase__ )
def A ( lowercase__ : Optional[int] , lowercase__ : List[Any] , lowercase__ : Tuple=("train",) ) -> Optional[Any]:
assert isinstance(lowercase__ , lowercase__ )
for split in splits:
UpperCamelCase__ :Union[str, Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def A ( lowercase__ : Optional[int] , lowercase__ : Optional[Any] , lowercase__ : Tuple ) -> Optional[Any]:
UpperCamelCase__ :int = tmp_path / """cache"""
UpperCamelCase__ :List[str] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase__ :List[Any] = JsonDatasetReader({"""train""": jsonl_path} , cache_dir=lowercase__ , keep_in_memory=lowercase__ ).read()
_check_json_datasetdict(lowercase__ , lowercase__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def A ( lowercase__ : Optional[Any] , lowercase__ : Any , lowercase__ : List[Any] ) -> List[Any]:
UpperCamelCase__ :int = tmp_path / """cache"""
UpperCamelCase__ :Tuple = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
UpperCamelCase__ :str = features.copy() if features else default_expected_features
UpperCamelCase__ :Optional[int] = (
Features({feature: Value(lowercase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :List[Any] = JsonDatasetReader({"""train""": jsonl_path} , features=lowercase__ , cache_dir=lowercase__ ).read()
_check_json_datasetdict(lowercase__ , lowercase__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def A ( lowercase__ : Tuple , lowercase__ : str , lowercase__ : int ) -> List[Any]:
if split:
UpperCamelCase__ :Dict = {split: jsonl_path}
else:
UpperCamelCase__ :Tuple = """train"""
UpperCamelCase__ :Any = {"""train""": jsonl_path, """test""": jsonl_path}
UpperCamelCase__ :Union[str, Any] = tmp_path / """cache"""
UpperCamelCase__ :Tuple = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
UpperCamelCase__ :int = JsonDatasetReader(lowercase__ , cache_dir=lowercase__ ).read()
_check_json_datasetdict(lowercase__ , lowercase__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def A ( lowercase__ : List[str] ) -> List[str]:
return json.load(lowercase__ )
def A ( lowercase__ : Optional[Any] ) -> Any:
return [json.loads(lowercase__ ) for line in buffer]
class lowerCAmelCase_ :
"""simple docstring"""
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def __a ( self :str , lowerCamelCase__ :int , lowerCamelCase__ :int , lowerCamelCase__ :Optional[Any] ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase__ , lowerCamelCase__ , lines=lowerCamelCase__ ).write()
buffer.seek(0 )
UpperCamelCase__ :str = load_json_function(lowerCamelCase__ )
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
assert isinstance(exported_content[0] , lowerCamelCase__ )
assert len(lowerCamelCase__ ) == 10
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def __a ( self :Any , lowerCamelCase__ :Any , lowerCamelCase__ :Any , lowerCamelCase__ :Tuple , lowerCamelCase__ :List[str] , lowerCamelCase__ :Union[str, Any] ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase__ , lowerCamelCase__ , lines=lowerCamelCase__ , orient=lowerCamelCase__ ).write()
buffer.seek(0 )
UpperCamelCase__ :Optional[int] = load_json(lowerCamelCase__ )
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowerCamelCase__ , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(lowerCamelCase__ ) == 10
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def __a ( self :Any , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :Any ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase__ , lowerCamelCase__ , lines=lowerCamelCase__ , num_proc=2 ).write()
buffer.seek(0 )
UpperCamelCase__ :Union[str, Any] = load_json_function(lowerCamelCase__ )
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
assert isinstance(exported_content[0] , lowerCamelCase__ )
assert len(lowerCamelCase__ ) == 10
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def __a ( self :Any , lowerCamelCase__ :Dict , lowerCamelCase__ :str , lowerCamelCase__ :str , lowerCamelCase__ :Dict , lowerCamelCase__ :Dict ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase__ , lowerCamelCase__ , lines=lowerCamelCase__ , orient=lowerCamelCase__ , num_proc=2 ).write()
buffer.seek(0 )
UpperCamelCase__ :Optional[Any] = load_json(lowerCamelCase__ )
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowerCamelCase__ , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(lowerCamelCase__ ) == 10
def __a ( self :Tuple , lowerCamelCase__ :List[Any] ):
with pytest.raises(lowerCamelCase__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase__ , lowerCamelCase__ , num_proc=0 )
@pytest.mark.parametrize("""compression, extension""" , [("""gzip""", """gz"""), ("""bz2""", """bz2"""), ("""xz""", """xz""")] )
def __a ( self :Union[str, Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Tuple ):
UpperCamelCase__ :Optional[Any] = tmp_path_factory.mktemp("""data""" ) / f"""test.json.{extension}"""
UpperCamelCase__ :Optional[int] = str(shared_datadir / f"""test_file.json.{extension}""" )
JsonDatasetWriter(lowerCamelCase__ , lowerCamelCase__ , compression=lowerCamelCase__ ).write()
with fsspec.open(lowerCamelCase__ , """rb""" , compression="""infer""" ) as f:
UpperCamelCase__ :Union[str, Any] = f.read()
with fsspec.open(lowerCamelCase__ , """rb""" , compression="""infer""" ) as f:
UpperCamelCase__ :int = f.read()
assert exported_content == original_content | 45 |
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = LEDTokenizer
UpperCamelCase_ = LEDTokenizerFast
UpperCamelCase_ = True
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
super().setUp()
lowerCAmelCase_ : Union[str, Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowerCAmelCase_ : Tuple = dict(zip(lowerCAmelCase__ ,range(len(lowerCAmelCase__ ) ) ) )
lowerCAmelCase_ : int = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowerCAmelCase_ : Union[str, Any] = {"unk_token": "<unk>"}
lowerCAmelCase_ : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase__ ) )
def UpperCAmelCase_ ( self : List[Any] ,**lowerCAmelCase__ : int ) -> Tuple:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ,**lowerCAmelCase__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname ,**lowerCAmelCase__ )
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : int ) -> List[str]:
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
return LEDTokenizer.from_pretrained("allenai/led-base-16384" )
@cached_property
def UpperCAmelCase_ ( self : List[str] ) -> Dict:
'''simple docstring'''
return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" )
@require_torch
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
lowerCAmelCase_ : int = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase_ : Any = tokenizer(lowerCAmelCase__ ,max_length=len(lowerCAmelCase__ ) ,padding=lowerCAmelCase__ ,return_tensors="pt" )
self.assertIsInstance(lowerCAmelCase__ ,lowerCAmelCase__ )
self.assertEqual((2, 9) ,batch.input_ids.shape )
self.assertEqual((2, 9) ,batch.attention_mask.shape )
lowerCAmelCase_ : int = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
@require_torch
def UpperCAmelCase_ ( self : Dict ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : int = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase_ : Optional[Any] = tokenizer(lowerCAmelCase__ ,padding=lowerCAmelCase__ ,return_tensors="pt" )
self.assertIn("input_ids" ,lowerCAmelCase__ )
self.assertIn("attention_mask" ,lowerCAmelCase__ )
self.assertNotIn("labels" ,lowerCAmelCase__ )
self.assertNotIn("decoder_attention_mask" ,lowerCAmelCase__ )
@require_torch
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : int = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase_ : Optional[int] = tokenizer(text_target=lowerCAmelCase__ ,max_length=32 ,padding="max_length" ,return_tensors="pt" )
self.assertEqual(32 ,targets["input_ids"].shape[1] )
@require_torch
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase_ : Tuple = tokenizer(
["I am a small frog" * 10_24, "I am a small frog"] ,padding=lowerCAmelCase__ ,truncation=lowerCAmelCase__ ,return_tensors="pt" )
self.assertIsInstance(lowerCAmelCase__ ,lowerCAmelCase__ )
self.assertEqual(batch.input_ids.shape ,(2, 51_22) )
@require_torch
def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : Tuple = ["A long paragraph for summarization."]
lowerCAmelCase_ : Dict = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase_ : Optional[Any] = tokenizer(lowerCAmelCase__ ,return_tensors="pt" )
lowerCAmelCase_ : Optional[Any] = tokenizer(text_target=lowerCAmelCase__ ,return_tensors="pt" )
lowerCAmelCase_ : List[str] = inputs["input_ids"]
lowerCAmelCase_ : Any = targets["input_ids"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def UpperCAmelCase_ ( self : str ) -> Tuple:
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase_ : str = ["Summary of the text.", "Another summary."]
lowerCAmelCase_ : str = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
lowerCAmelCase_ : List[Any] = tokenizer(lowerCAmelCase__ ,padding=lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = [[0] * len(lowerCAmelCase__ ) for x in encoded_output["input_ids"]]
lowerCAmelCase_ : Optional[int] = tokenizer.pad(lowerCAmelCase__ )
self.assertSequenceEqual(outputs["global_attention_mask"] ,lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase_ : Dict = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ ,**lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = self.tokenizer_class.from_pretrained(lowerCAmelCase__ ,**lowerCAmelCase__ )
lowerCAmelCase_ : Dict = "A, <mask> AllenNLP sentence."
lowerCAmelCase_ : Tuple = tokenizer_r.encode_plus(lowerCAmelCase__ ,add_special_tokens=lowerCAmelCase__ ,return_token_type_ids=lowerCAmelCase__ )
lowerCAmelCase_ : int = tokenizer_p.encode_plus(lowerCAmelCase__ ,add_special_tokens=lowerCAmelCase__ ,return_token_type_ids=lowerCAmelCase__ )
self.assertEqual(sum(tokens_r["token_type_ids"] ) ,sum(tokens_p["token_type_ids"] ) )
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) ,sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) ,)
lowerCAmelCase_ : Any = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
lowerCAmelCase_ : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
self.assertSequenceEqual(tokens_p["input_ids"] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase__ ,["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
lowerCAmelCase__ ,["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 659 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class A_ ( _a ):
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Any = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowerCAmelCase ,"hidden_sizes" ) )
self.parent.assertTrue(hasattr(__lowerCAmelCase ,"neck_hidden_sizes" ) )
self.parent.assertTrue(hasattr(__lowerCAmelCase ,"num_attention_heads" ) )
class A_ :
def __init__( self: List[str] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: List[Any]=13 ,__lowerCAmelCase: Any=32 ,__lowerCAmelCase: Optional[Any]=2 ,__lowerCAmelCase: Dict=3 ,__lowerCAmelCase: Any=640 ,__lowerCAmelCase: List[str]=4 ,__lowerCAmelCase: Dict="silu" ,__lowerCAmelCase: int=3 ,__lowerCAmelCase: str=32 ,__lowerCAmelCase: Dict=0.1 ,__lowerCAmelCase: Optional[Any]=0.1 ,__lowerCAmelCase: Optional[Any]=0.1 ,__lowerCAmelCase: List[str]=0.02 ,__lowerCAmelCase: int=True ,__lowerCAmelCase: int=True ,__lowerCAmelCase: str=10 ,__lowerCAmelCase: List[str]=None ,):
'''simple docstring'''
_lowerCamelCase : Optional[int] = parent
_lowerCamelCase : Dict = batch_size
_lowerCamelCase : int = image_size
_lowerCamelCase : int = patch_size
_lowerCamelCase : Dict = num_channels
_lowerCamelCase : Optional[Any] = last_hidden_size
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : Any = hidden_act
_lowerCamelCase : Union[str, Any] = conv_kernel_size
_lowerCamelCase : Any = output_stride
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : Optional[int] = attention_probs_dropout_prob
_lowerCamelCase : Optional[Any] = classifier_dropout_prob
_lowerCamelCase : Union[str, Any] = use_labels
_lowerCamelCase : Dict = is_training
_lowerCamelCase : int = num_labels
_lowerCamelCase : str = initializer_range
_lowerCamelCase : Optional[int] = scope
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase : Dict = None
_lowerCamelCase : Union[str, Any] = None
if self.use_labels:
_lowerCamelCase : Dict = ids_tensor([self.batch_size] ,self.num_labels )
_lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
_lowerCamelCase : Dict = self.get_config()
return config, pixel_values, labels, pixel_labels
def _lowercase ( self: str ):
'''simple docstring'''
return MobileViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,num_attention_heads=self.num_attention_heads ,hidden_act=self.hidden_act ,conv_kernel_size=self.conv_kernel_size ,output_stride=self.output_stride ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,classifier_dropout_prob=self.classifier_dropout_prob ,initializer_range=self.initializer_range ,)
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: int ,__lowerCAmelCase: int ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = MobileViTModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Dict = model(__lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape ,(
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : List[Any] = self.num_labels
_lowerCamelCase : str = MobileViTForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : List[str] = model(__lowerCAmelCase ,labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _lowercase ( self: List[str] ,__lowerCAmelCase: int ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = self.num_labels
_lowerCamelCase : Tuple = MobileViTForSemanticSegmentation(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
_lowerCamelCase : Tuple = model(__lowerCAmelCase ,labels=__lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.prepare_config_and_inputs()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = config_and_inputs
_lowerCamelCase : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A_ ( _a , _a , unittest.TestCase ):
lowerCAmelCase__ = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{
'feature-extraction': MobileViTModel,
'image-classification': MobileViTForImageClassification,
'image-segmentation': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _lowercase ( self: Any ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = MobileViTModelTester(self )
_lowerCamelCase : Optional[int] = MobileViTConfigTester(self ,config_class=__lowerCAmelCase ,has_text_modality=__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViT does not use inputs_embeds" )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="MobileViT does not support input and output embeddings" )
def _lowercase ( self: Any ):
'''simple docstring'''
pass
@unittest.skip(reason="MobileViT does not output attentions" )
def _lowercase ( self: List[str] ):
'''simple docstring'''
pass
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Any = model_class(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : List[str] = [*signature.parameters.keys()]
_lowerCamelCase : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,__lowerCAmelCase )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
pass
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowercase ( self: Tuple ):
'''simple docstring'''
def check_hidden_states_output(__lowerCAmelCase: int ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: int ):
_lowerCamelCase : str = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowerCamelCase : int = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
_lowerCamelCase : Dict = outputs.hidden_states
_lowerCamelCase : Optional[Any] = 5
self.assertEqual(len(__lowerCAmelCase ) ,__lowerCAmelCase )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
_lowerCamelCase : int = 2
for i in range(len(__lowerCAmelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) ,[self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] ,)
divisor *= 2
self.assertEqual(self.model_tester.output_stride ,divisor // 2 )
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Optional[Any] = True
check_hidden_states_output(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : Any = True
check_hidden_states_output(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowerCAmelCase )
@slow
def _lowercase ( self: List[Any] ):
'''simple docstring'''
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Union[str, Any] = MobileViTModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def lowerCamelCase_( ) -> str:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
@cached_property
def _lowercase ( self: Any ):
'''simple docstring'''
return MobileViTImageProcessor.from_pretrained("apple/mobilevit-xx-small" ) if is_vision_available() else None
@slow
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Tuple = MobileViTForImageClassification.from_pretrained("apple/mobilevit-xx-small" ).to(__lowerCAmelCase )
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : Union[str, Any] = prepare_img()
_lowerCamelCase : List[Any] = image_processor(images=__lowerCAmelCase ,return_tensors="pt" ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowerCamelCase : Dict = model(**__lowerCAmelCase )
# verify the logits
_lowerCamelCase : str = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape ,__lowerCAmelCase )
_lowerCamelCase : Any = torch.tensor([-1.93_64, -1.23_27, -0.46_53] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__lowerCAmelCase ,atol=1e-4 ) )
@slow
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : str = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
_lowerCamelCase : Any = model.to(__lowerCAmelCase )
_lowerCamelCase : Tuple = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
_lowerCamelCase : Any = prepare_img()
_lowerCamelCase : int = image_processor(images=__lowerCAmelCase ,return_tensors="pt" ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowerCamelCase : List[str] = model(**__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = outputs.logits
# verify the logits
_lowerCamelCase : List[str] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape ,__lowerCAmelCase )
_lowerCamelCase : Dict = torch.tensor(
[
[[6.97_13, 6.97_86, 7.24_22], [7.28_93, 7.28_25, 7.44_46], [7.65_80, 7.87_97, 7.94_20]],
[[-10.68_69, -10.32_50, -10.34_71], [-10.42_28, -9.98_68, -9.71_32], [-11.04_05, -11.02_21, -10.73_18]],
[[-3.30_89, -2.85_39, -2.67_40], [-3.27_06, -2.56_21, -2.51_08], [-3.25_34, -2.66_15, -2.66_51]],
] ,device=__lowerCAmelCase ,)
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,__lowerCAmelCase ,atol=1e-4 ) )
@slow
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : str = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
_lowerCamelCase : Optional[Any] = model.to(__lowerCAmelCase )
_lowerCamelCase : Dict = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
_lowerCamelCase : int = prepare_img()
_lowerCamelCase : Any = image_processor(images=__lowerCAmelCase ,return_tensors="pt" ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowerCamelCase : Optional[int] = model(**__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = outputs.logits.detach().cpu()
_lowerCamelCase : Dict = image_processor.post_process_semantic_segmentation(outputs=__lowerCAmelCase ,target_sizes=[(50, 60)] )
_lowerCamelCase : Optional[Any] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape ,__lowerCAmelCase )
_lowerCamelCase : Any = image_processor.post_process_semantic_segmentation(outputs=__lowerCAmelCase )
_lowerCamelCase : List[str] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape ,__lowerCAmelCase ) | 46 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''Visual-Attention-Network/van-base''': (
'''https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'''
),
}
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = 'van'
def __init__( self : List[str] ,lowerCAmelCase__ : int=2_24 ,lowerCAmelCase__ : Optional[int]=3 ,lowerCAmelCase__ : Dict=[7, 3, 3, 3] ,lowerCAmelCase__ : List[str]=[4, 2, 2, 2] ,lowerCAmelCase__ : Union[str, Any]=[64, 1_28, 3_20, 5_12] ,lowerCAmelCase__ : Union[str, Any]=[3, 3, 12, 3] ,lowerCAmelCase__ : Any=[8, 8, 4, 4] ,lowerCAmelCase__ : Optional[int]="gelu" ,lowerCAmelCase__ : List[str]=0.02 ,lowerCAmelCase__ : Optional[Any]=1e-6 ,lowerCAmelCase__ : Dict=1e-2 ,lowerCAmelCase__ : Union[str, Any]=0.0 ,lowerCAmelCase__ : Optional[Any]=0.0 ,**lowerCAmelCase__ : List[str] ,) -> Tuple:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = image_size
lowerCAmelCase_ : List[str] = num_channels
lowerCAmelCase_ : str = patch_sizes
lowerCAmelCase_ : Optional[Any] = strides
lowerCAmelCase_ : List[Any] = hidden_sizes
lowerCAmelCase_ : int = depths
lowerCAmelCase_ : int = mlp_ratios
lowerCAmelCase_ : str = hidden_act
lowerCAmelCase_ : List[str] = initializer_range
lowerCAmelCase_ : Dict = layer_norm_eps
lowerCAmelCase_ : str = layer_scale_init_value
lowerCAmelCase_ : Tuple = drop_path_rate
lowerCAmelCase_ : Dict = dropout_rate
| 659 | 0 |
from collections.abc import Sequence
from queue import Queue
class _UpperCamelCase:
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Tuple=None ):
'''simple docstring'''
__a : Tuple = start
__a : Dict = end
__a : List[str] = val
__a : List[Any] = (start + end) // 2
__a : Optional[Any] = left
__a : List[str] = right
def __repr__( self : Dict ):
'''simple docstring'''
return f'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'''
class _UpperCamelCase:
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Sequence , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
__a : Tuple = collection
__a : Dict = function
if self.collection:
__a : int = self._build_tree(0 , len(SCREAMING_SNAKE_CASE__ ) - 1 )
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
self._update_tree(self.root , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
return self._query_range(self.root , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
if start == end:
return SegmentTreeNode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.collection[start] )
__a : Tuple = (start + end) // 2
__a : Optional[int] = self._build_tree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : Tuple = self._build_tree(mid + 1 , SCREAMING_SNAKE_CASE__ )
return SegmentTreeNode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.fn(left.val , right.val ) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
if node.start == i and node.end == i:
__a : Optional[Any] = val
return
if i <= node.mid:
self._update_tree(node.left , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
self._update_tree(node.right , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : int = self.fn(node.left.val , node.right.val )
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , SCREAMING_SNAKE_CASE__ , node.mid ) , self._query_range(node.right , node.mid + 1 , SCREAMING_SNAKE_CASE__ ) , )
else:
# range in right child tree
return self._query_range(node.right , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
if self.root is not None:
__a : Tuple = Queue()
queue.put(self.root )
while not queue.empty():
__a : Tuple = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('''*''' * 50)
SCREAMING_SNAKE_CASE__ = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 47 |
from math import factorial
def UpperCamelCase ( snake_case__ , snake_case__):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError("Please enter positive integers for n and k where n >= k")
return factorial(snake_case__) // (factorial(snake_case__) * factorial(n - k))
if __name__ == "__main__":
print(
'''The number of five-card hands possible from a standard''',
f"fifty-two card deck is: {combinations(52, 5)}\n",
)
print(
'''If a class of 40 students must be arranged into groups of''',
f"4 for group projects, there are {combinations(40, 4)} ways",
'''to arrange them.\n''',
)
print(
'''If 10 teams are competing in a Formula One race, there''',
f"are {combinations(10, 3)} ways that first, second and",
'''third place can be awarded.''',
)
| 659 | 0 |
'''simple docstring'''
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
UpperCAmelCase__ : Optional[Any] = open # noqa: we just need to have a builtin inside this module to test it properly
| 48 |
import argparse
import json
from tqdm import tqdm
def UpperCamelCase ( ):
lowerCAmelCase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src_path" , type=snake_case__ , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , )
parser.add_argument(
"--evaluation_set" , type=snake_case__ , help="where to store parsed evaluation_set file" , )
parser.add_argument(
"--gold_data_path" , type=snake_case__ , help="where to store parsed gold_data_path file" , )
lowerCAmelCase_ : Dict = parser.parse_args()
with open(args.src_path , "r") as src_file, open(args.evaluation_set , "w") as eval_file, open(
args.gold_data_path , "w") as gold_file:
lowerCAmelCase_ : Optional[int] = json.load(snake_case__)
for dpr_record in tqdm(snake_case__):
lowerCAmelCase_ : str = dpr_record["question"]
lowerCAmelCase_ : Dict = [context["title"] for context in dpr_record["positive_ctxs"]]
eval_file.write(question + "\n")
gold_file.write("\t".join(snake_case__) + "\n")
if __name__ == "__main__":
main()
| 659 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase : Dict = logging.get_logger(__name__)
_lowercase : List[Any] = {
'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json',
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : List[str] = "data2vec-text"
def __init__( self : Dict , _lowercase : Union[str, Any]=3_05_22 , _lowercase : Tuple=7_68 , _lowercase : Union[str, Any]=12 , _lowercase : Dict=12 , _lowercase : int=30_72 , _lowercase : Any="gelu" , _lowercase : List[str]=0.1 , _lowercase : Optional[int]=0.1 , _lowercase : Dict=5_12 , _lowercase : Optional[Any]=2 , _lowercase : Optional[int]=0.02 , _lowercase : str=1E-12 , _lowercase : List[str]=1 , _lowercase : List[str]=0 , _lowercase : List[Any]=2 , _lowercase : List[str]="absolute" , _lowercase : str=True , _lowercase : Optional[int]=None , **_lowercase : int , ):
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = position_embedding_type
__UpperCAmelCase = use_cache
__UpperCAmelCase = classifier_dropout
class _UpperCAmelCase ( _lowerCAmelCase ):
@property
def a ( self : Dict ):
if self.task == "multiple-choice":
__UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 49 |
from collections.abc import Sequence
def UpperCamelCase ( snake_case__ = None):
if nums is None or not nums:
raise ValueError("Input sequence should not be empty")
lowerCAmelCase_ : Dict = nums[0]
for i in range(1 , len(snake_case__)):
lowerCAmelCase_ : Optional[int] = nums[i]
lowerCAmelCase_ : Optional[int] = max(snake_case__ , ans + num , snake_case__)
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
_lowercase = int(input('''Enter number of elements : ''').strip())
_lowercase = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array))
| 659 | 0 |
'''simple docstring'''
def A__ ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int ):
if index == number_of_items:
return 0
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = knapsack(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , index + 1 )
if weights[index] <= max_weight:
lowerCamelCase__ = values[index] + knapsack(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , max_weight - weights[index] , index + 1 )
return max(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
_lowercase = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 659 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : List[str] , a__ : Dict , a__ : List[Any]=3 , a__ : str=32 , a__ : List[Any]=3 , a__ : str=10 , a__ : List[Any]=[10, 20, 30, 40] , a__ : Dict=[1, 1, 2, 1] , a__ : Tuple=True , a__ : List[Any]=True , a__ : List[str]="relu" , a__ : List[Any]=3 , a__ : Union[str, Any]=None , ):
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = num_channels
UpperCAmelCase = embeddings_size
UpperCAmelCase = hidden_sizes
UpperCAmelCase = depths
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_act
UpperCAmelCase = num_labels
UpperCAmelCase = scope
UpperCAmelCase = len(a__ )
def __snake_case ( self : List[str] ):
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def __snake_case ( self : Any ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def __snake_case ( self : Optional[int] , a__ : List[Any] , a__ : List[Any] , a__ : Tuple ):
UpperCAmelCase = RegNetModel(config=a__ )
model.to(a__ )
model.eval()
UpperCAmelCase = model(a__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __snake_case ( self : int , a__ : List[Any] , a__ : Union[str, Any] , a__ : Union[str, Any] ):
UpperCAmelCase = self.num_labels
UpperCAmelCase = RegNetForImageClassification(a__ )
model.to(a__ )
model.eval()
UpperCAmelCase = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : Union[str, Any] ):
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase, UpperCAmelCase, UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =(RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
_lowerCamelCase =(
{"feature-extraction": RegNetModel, "image-classification": RegNetForImageClassification}
if is_torch_available()
else {}
)
_lowerCamelCase =False
_lowerCamelCase =False
_lowerCamelCase =False
_lowerCamelCase =False
def __snake_case ( self : List[str] ):
UpperCAmelCase = RegNetModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=a__ , has_text_modality=a__ )
def __snake_case ( self : Tuple ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __snake_case ( self : Optional[int] ):
return
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def __snake_case ( self : Optional[int] ):
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def __snake_case ( self : Tuple ):
pass
def __snake_case ( self : int ):
UpperCAmelCase, UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(a__ )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a__ )
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def __snake_case ( self : Optional[Any] ):
UpperCAmelCase, UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(config=a__ )
for name, module in model.named_modules():
if isinstance(a__ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
def __snake_case ( self : Dict ):
def check_hidden_states_output(a__ : List[Any] , a__ : List[Any] , a__ : str ):
UpperCAmelCase = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(a__ , a__ ) )
UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase = self.model_tester.num_stages
self.assertEqual(len(a__ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
UpperCAmelCase, UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCAmelCase = layer_type
UpperCAmelCase = True
check_hidden_states_output(a__ , a__ , a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
check_hidden_states_output(a__ , a__ , a__ )
def __snake_case ( self : str ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
@slow
def __snake_case ( self : str ):
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = RegNetModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def __snake_case ( ) -> Dict:
"""simple docstring"""
UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __snake_case ( self : int ):
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __snake_case ( self : List[str] ):
UpperCAmelCase = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(a__ )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=a__ , return_tensors='''pt''' ).to(a__ )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**a__ )
# verify the logits
UpperCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , a__ )
UpperCAmelCase = torch.tensor([-0.4_180, -1.5_051, -3.4_836] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1e-4 ) )
| 51 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_lowercase = '''src/diffusers'''
_lowercase = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
_lowercase = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
_lowercase = spec.loader.load_module()
def UpperCamelCase ( snake_case__ , snake_case__):
return line.startswith(snake_case__) or len(snake_case__) <= 1 or re.search(R"^\s*\)(\s*->.*:|:)\s*$" , snake_case__) is not None
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Tuple = object_name.split(".")
lowerCAmelCase_ : Union[str, Any] = 0
# First let's find the module where our object lives.
lowerCAmelCase_ : Union[str, Any] = parts[i]
while i < len(snake_case__) and not os.path.isfile(os.path.join(snake_case__ , F'''{module}.py''')):
i += 1
if i < len(snake_case__):
lowerCAmelCase_ : Dict = os.path.join(snake_case__ , parts[i])
if i >= len(snake_case__):
raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''')
with open(os.path.join(snake_case__ , F'''{module}.py''') , "r" , encoding="utf-8" , newline="\n") as f:
lowerCAmelCase_ : Optional[Any] = f.readlines()
# Now let's find the class / func in the code!
lowerCAmelCase_ : Union[str, Any] = ""
lowerCAmelCase_ : int = 0
for name in parts[i + 1 :]:
while (
line_index < len(snake_case__) and re.search(RF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index]) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(snake_case__):
raise ValueError(F''' {object_name} does not match any function or class in {module}.''')
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
lowerCAmelCase_ : Union[str, Any] = line_index
while line_index < len(snake_case__) and _should_continue(lines[line_index] , snake_case__):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1]) <= 1:
line_index -= 1
lowerCAmelCase_ : List[str] = lines[start_index:line_index]
return "".join(snake_case__)
_lowercase = re.compile(r'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
_lowercase = re.compile(r'''^\s*(\S+)->(\S+)(\s+.*|$)''')
_lowercase = re.compile(r'''<FILL\s+[^>]*>''')
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Any = code.split("\n")
lowerCAmelCase_ : Any = 0
while idx < len(snake_case__) and len(lines[idx]) == 0:
idx += 1
if idx < len(snake_case__):
return re.search(R"^(\s*)\S" , lines[idx]).groups()[0]
return ""
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Dict = len(get_indent(snake_case__)) > 0
if has_indent:
lowerCAmelCase_ : Dict = F'''class Bla:\n{code}'''
lowerCAmelCase_ : Optional[int] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 , preview=snake_case__)
lowerCAmelCase_ : Optional[Any] = black.format_str(snake_case__ , mode=snake_case__)
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = style_docstrings_in_code(snake_case__)
return result[len("class Bla:\n") :] if has_indent else result
def UpperCamelCase ( snake_case__ , snake_case__=False):
with open(snake_case__ , "r" , encoding="utf-8" , newline="\n") as f:
lowerCAmelCase_ : Tuple = f.readlines()
lowerCAmelCase_ : Tuple = []
lowerCAmelCase_ : Union[str, Any] = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(snake_case__):
lowerCAmelCase_ : Optional[int] = _re_copy_warning.search(lines[line_index])
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = search.groups()
lowerCAmelCase_ : int = find_code_in_diffusers(snake_case__)
lowerCAmelCase_ : Dict = get_indent(snake_case__)
lowerCAmelCase_ : Union[str, Any] = line_index + 1 if indent == theoretical_indent else line_index + 2
lowerCAmelCase_ : str = theoretical_indent
lowerCAmelCase_ : Union[str, Any] = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
lowerCAmelCase_ : Optional[int] = True
while line_index < len(snake_case__) and should_continue:
line_index += 1
if line_index >= len(snake_case__):
break
lowerCAmelCase_ : Dict = lines[line_index]
lowerCAmelCase_ : List[str] = _should_continue(snake_case__ , snake_case__) and re.search(F'''^{indent}# End copy''' , snake_case__) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1]) <= 1:
line_index -= 1
lowerCAmelCase_ : Dict = lines[start_index:line_index]
lowerCAmelCase_ : Optional[int] = "".join(snake_case__)
# Remove any nested `Copied from` comments to avoid circular copies
lowerCAmelCase_ : List[Any] = [line for line in theoretical_code.split("\n") if _re_copy_warning.search(snake_case__) is None]
lowerCAmelCase_ : Optional[Any] = "\n".join(snake_case__)
# Before comparing, use the `replace_pattern` on the original code.
if len(snake_case__) > 0:
lowerCAmelCase_ : List[str] = replace_pattern.replace("with" , "").split(",")
lowerCAmelCase_ : Tuple = [_re_replace_pattern.search(snake_case__) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = pattern.groups()
lowerCAmelCase_ : int = re.sub(snake_case__ , snake_case__ , snake_case__)
if option.strip() == "all-casing":
lowerCAmelCase_ : List[str] = re.sub(obja.lower() , obja.lower() , snake_case__)
lowerCAmelCase_ : int = re.sub(obja.upper() , obja.upper() , snake_case__)
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
lowerCAmelCase_ : List[Any] = blackify(lines[start_index - 1] + theoretical_code)
lowerCAmelCase_ : Union[str, Any] = theoretical_code[len(lines[start_index - 1]) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index])
if overwrite:
lowerCAmelCase_ : List[Any] = lines[:start_index] + [theoretical_code] + lines[line_index:]
lowerCAmelCase_ : Union[str, Any] = start_index + 1
if overwrite and len(snake_case__) > 0:
# Warn the user a file has been modified.
print(F'''Detected changes, rewriting {filename}.''')
with open(snake_case__ , "w" , encoding="utf-8" , newline="\n") as f:
f.writelines(snake_case__)
return diffs
def UpperCamelCase ( snake_case__ = False):
lowerCAmelCase_ : Tuple = glob.glob(os.path.join(snake_case__ , "**/*.py") , recursive=snake_case__)
lowerCAmelCase_ : int = []
for filename in all_files:
lowerCAmelCase_ : Union[str, Any] = is_copy_consistent(snake_case__ , snake_case__)
diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(snake_case__) > 0:
lowerCAmelCase_ : Optional[Any] = "\n".join(snake_case__)
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.")
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
_lowercase = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 659 | 0 |
"""simple docstring"""
import os
def __A ( ) -> Tuple:
with open(os.path.dirname(a_) + '''/grid.txt''') as f:
__a : Dict = [] # noqa: E741
for _ in range(20):
l.append([int(a_) for x in f.readline().split()])
__a : int = 0
# right
for i in range(20):
for j in range(17):
__a : Optional[Any] = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
__a : Union[str, Any] = temp
# down
for i in range(17):
for j in range(20):
__a : Any = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
__a : Union[str, Any] = temp
# diagonal 1
for i in range(17):
for j in range(17):
__a : Union[str, Any] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
__a : List[str] = temp
# diagonal 2
for i in range(17):
for j in range(3 , 20):
__a : int = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
__a : Union[str, Any] = temp
return maximum
if __name__ == "__main__":
print(solution()) | 52 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''microsoft/swinv2-tiny-patch4-window8-256''': (
'''https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'''
),
}
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = 'swinv2'
UpperCamelCase_ = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : List[Any] ,lowerCAmelCase__ : Optional[int]=2_24 ,lowerCAmelCase__ : Dict=4 ,lowerCAmelCase__ : Dict=3 ,lowerCAmelCase__ : List[Any]=96 ,lowerCAmelCase__ : Optional[Any]=[2, 2, 6, 2] ,lowerCAmelCase__ : Optional[Any]=[3, 6, 12, 24] ,lowerCAmelCase__ : Optional[int]=7 ,lowerCAmelCase__ : Dict=4.0 ,lowerCAmelCase__ : Dict=True ,lowerCAmelCase__ : str=0.0 ,lowerCAmelCase__ : Tuple=0.0 ,lowerCAmelCase__ : str=0.1 ,lowerCAmelCase__ : List[str]="gelu" ,lowerCAmelCase__ : Union[str, Any]=False ,lowerCAmelCase__ : Dict=0.02 ,lowerCAmelCase__ : int=1e-5 ,lowerCAmelCase__ : List[str]=32 ,**lowerCAmelCase__ : Tuple ,) -> List[str]:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = image_size
lowerCAmelCase_ : List[Any] = patch_size
lowerCAmelCase_ : Dict = num_channels
lowerCAmelCase_ : Optional[int] = embed_dim
lowerCAmelCase_ : Optional[Any] = depths
lowerCAmelCase_ : Any = len(lowerCAmelCase__ )
lowerCAmelCase_ : str = num_heads
lowerCAmelCase_ : List[str] = window_size
lowerCAmelCase_ : List[str] = mlp_ratio
lowerCAmelCase_ : Dict = qkv_bias
lowerCAmelCase_ : str = hidden_dropout_prob
lowerCAmelCase_ : str = attention_probs_dropout_prob
lowerCAmelCase_ : Union[str, Any] = drop_path_rate
lowerCAmelCase_ : List[Any] = hidden_act
lowerCAmelCase_ : Any = use_absolute_embeddings
lowerCAmelCase_ : List[str] = layer_norm_eps
lowerCAmelCase_ : int = initializer_range
lowerCAmelCase_ : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase_ : Tuple = int(embed_dim * 2 ** (len(lowerCAmelCase__ ) - 1) )
lowerCAmelCase_ : str = (0, 0, 0, 0)
| 659 | 0 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
@slow
@require_torch
def lowercase ( self : Tuple ) -> Any:
__lowerCAmelCase = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' )
__lowerCAmelCase = BertTokenizer.from_pretrained('bert-base-uncased' )
__lowerCAmelCase = bertabert.config.encoder.vocab_size
__lowerCAmelCase = tokenizer.sep_token_id
__lowerCAmelCase = tokenizer.cls_token_id
__lowerCAmelCase = 1_2_8
__lowerCAmelCase = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' )
__lowerCAmelCase = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' )
__lowerCAmelCase = train_dataset.select(range(3_2 ) )
__lowerCAmelCase = val_dataset.select(range(1_6 ) )
__lowerCAmelCase = 4
def _map_to_encoder_decoder_inputs(lowerCAmelCase_ : Optional[int] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__lowerCAmelCase = tokenizer(batch['article'] , padding='max_length' , truncation=lowerCAmelCase_ , max_length=5_1_2 )
__lowerCAmelCase = tokenizer(batch['highlights'] , padding='max_length' , truncation=lowerCAmelCase_ , max_length=1_2_8 )
__lowerCAmelCase = inputs.input_ids
__lowerCAmelCase = inputs.attention_mask
__lowerCAmelCase = outputs.input_ids
__lowerCAmelCase = outputs.input_ids.copy()
__lowerCAmelCase = [
[-1_0_0 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
__lowerCAmelCase = outputs.attention_mask
assert all(len(lowerCAmelCase_ ) == 5_1_2 for x in inputs.input_ids )
assert all(len(lowerCAmelCase_ ) == 1_2_8 for x in outputs.input_ids )
return batch
def _compute_metrics(lowerCAmelCase_ : List[str] ):
__lowerCAmelCase = pred.label_ids
__lowerCAmelCase = pred.predictions
# all unnecessary tokens are removed
__lowerCAmelCase = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
__lowerCAmelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(lowerCAmelCase_ ) )] ) / len(lowerCAmelCase_ )
return {"accuracy": accuracy}
# map train dataset
__lowerCAmelCase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowerCAmelCase_ , batch_size=lowerCAmelCase_ , remove_columns=['article', 'highlights'] , )
train_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
# same for validation dataset
__lowerCAmelCase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowerCAmelCase_ , batch_size=lowerCAmelCase_ , remove_columns=['article', 'highlights'] , )
val_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
__lowerCAmelCase = self.get_auto_remove_tmp_dir()
__lowerCAmelCase = SeqaSeqTrainingArguments(
output_dir=lowerCAmelCase_ , per_device_train_batch_size=lowerCAmelCase_ , per_device_eval_batch_size=lowerCAmelCase_ , predict_with_generate=lowerCAmelCase_ , evaluation_strategy='steps' , do_train=lowerCAmelCase_ , do_eval=lowerCAmelCase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
__lowerCAmelCase = SeqaSeqTrainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , compute_metrics=_compute_metrics , train_dataset=lowerCAmelCase_ , eval_dataset=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , )
# start training
trainer.train()
| 53 |
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
_lowercase = logging.get_logger(__name__)
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = ['input_features', 'attention_mask']
def __init__( self : Optional[Any] ,lowerCAmelCase__ : Any=80 ,lowerCAmelCase__ : Optional[Any]=1_60_00 ,lowerCAmelCase__ : List[str]=0.0 ,lowerCAmelCase__ : Tuple=10 ,lowerCAmelCase__ : Optional[Any]=25 ,lowerCAmelCase__ : Any="hamming_window" ,lowerCAmelCase__ : List[str]=32_768.0 ,lowerCAmelCase__ : Union[str, Any]=0.97 ,lowerCAmelCase__ : Any=1.0 ,lowerCAmelCase__ : str=True ,lowerCAmelCase__ : int=True ,lowerCAmelCase__ : Tuple=False ,**lowerCAmelCase__ : Optional[int] ,) -> Optional[Any]:
'''simple docstring'''
super().__init__(feature_size=lowerCAmelCase__ ,sampling_rate=lowerCAmelCase__ ,padding_value=lowerCAmelCase__ ,**lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = feature_size
lowerCAmelCase_ : List[Any] = sampling_rate
lowerCAmelCase_ : Union[str, Any] = padding_value
lowerCAmelCase_ : str = hop_length
lowerCAmelCase_ : str = win_length
lowerCAmelCase_ : str = frame_signal_scale
lowerCAmelCase_ : Any = preemphasis_coeff
lowerCAmelCase_ : Optional[Any] = mel_floor
lowerCAmelCase_ : List[str] = normalize_means
lowerCAmelCase_ : Optional[Any] = normalize_vars
lowerCAmelCase_ : Dict = win_function
lowerCAmelCase_ : List[Any] = return_attention_mask
lowerCAmelCase_ : Tuple = win_length * sampling_rate // 10_00
lowerCAmelCase_ : str = hop_length * sampling_rate // 10_00
lowerCAmelCase_ : Dict = optimal_fft_length(self.sample_size )
lowerCAmelCase_ : Optional[int] = (self.n_fft // 2) + 1
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : np.array ) -> np.ndarray:
'''simple docstring'''
if self.win_function == "hamming_window":
lowerCAmelCase_ : int = window_function(window_length=self.sample_size ,name=self.win_function ,periodic=lowerCAmelCase__ )
else:
lowerCAmelCase_ : Tuple = window_function(window_length=self.sample_size ,name=self.win_function )
lowerCAmelCase_ : List[str] = mel_filter_bank(
num_frequency_bins=self.n_freqs ,num_mel_filters=self.feature_size ,min_frequency=0.0 ,max_frequency=self.sampling_rate / 2.0 ,sampling_rate=self.sampling_rate ,)
lowerCAmelCase_ : Any = spectrogram(
one_waveform * self.frame_signal_scale ,window=lowerCAmelCase__ ,frame_length=self.sample_size ,hop_length=self.sample_stride ,fft_length=self.n_fft ,center=lowerCAmelCase__ ,preemphasis=self.preemphasis_coeff ,mel_filters=lowerCAmelCase__ ,mel_floor=self.mel_floor ,log_mel="log" ,)
return msfc_features.T
def UpperCAmelCase_ ( self : int ,lowerCAmelCase__ : List[Any] ,lowerCAmelCase__ : Optional[Any] ,lowerCAmelCase__ : Tuple ) -> Optional[Any]:
'''simple docstring'''
if self.normalize_means:
lowerCAmelCase_ : Optional[int] = x[:input_length].mean(axis=0 )
lowerCAmelCase_ : List[str] = np.subtract(lowerCAmelCase__ ,lowerCAmelCase__ )
if self.normalize_vars:
lowerCAmelCase_ : Optional[Any] = x[:input_length].std(axis=0 )
lowerCAmelCase_ : Tuple = np.divide(lowerCAmelCase__ ,lowerCAmelCase__ )
if input_length < x.shape[0]:
lowerCAmelCase_ : int = padding_value
# make sure array is in float32
lowerCAmelCase_ : Any = x.astype(np.floataa )
return x
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : List[np.ndarray] ,lowerCAmelCase__ : Optional[np.ndarray] = None ) -> List[np.ndarray]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(lowerCAmelCase__ ,lowerCAmelCase__ ,self.padding_value ) for x, n in zip(lowerCAmelCase__ ,lowerCAmelCase__ )]
def __call__( self : int ,lowerCAmelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,lowerCAmelCase__ : Union[bool, str, PaddingStrategy] = False ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : bool = False ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : Optional[bool] = None ,lowerCAmelCase__ : Optional[Union[str, TensorType]] = None ,lowerCAmelCase__ : Optional[int] = None ,**lowerCAmelCase__ : Union[str, Any] ,) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowerCAmelCase_ : List[Any] = isinstance(lowerCAmelCase__ ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
lowerCAmelCase_ : str = is_batched_numpy or (
isinstance(lowerCAmelCase__ ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase_ : Tuple = [np.asarray(lowerCAmelCase__ ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase__ ,np.ndarray ):
lowerCAmelCase_ : int = np.asarray(lowerCAmelCase__ ,dtype=np.floataa )
elif isinstance(lowerCAmelCase__ ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase_ : Union[str, Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase_ : Optional[int] = [raw_speech]
# extract fbank features
lowerCAmelCase_ : Dict = [self._extract_mfsc_features(lowerCAmelCase__ ) for one_waveform in raw_speech]
# convert into correct format for padding
lowerCAmelCase_ : int = BatchFeature({"input_features": features} )
lowerCAmelCase_ : Union[str, Any] = self.pad(
lowerCAmelCase__ ,padding=lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,truncation=lowerCAmelCase__ ,pad_to_multiple_of=lowerCAmelCase__ ,return_attention_mask=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
# make sure list is in array format
lowerCAmelCase_ : Optional[Any] = padded_inputs.get("input_features" )
if isinstance(input_features[0] ,lowerCAmelCase__ ):
lowerCAmelCase_ : Optional[int] = [np.asarray(lowerCAmelCase__ ,dtype=np.floataa ) for feature in input_features]
lowerCAmelCase_ : List[Any] = padded_inputs.get("attention_mask" )
if attention_mask is not None:
lowerCAmelCase_ : Dict = [np.asarray(lowerCAmelCase__ ,dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
lowerCAmelCase_ : Dict = (
np.array(lowerCAmelCase__ ,dtype=np.intaa )
if self._get_padding_strategies(lowerCAmelCase__ ,max_length=lowerCAmelCase__ ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
lowerCAmelCase_ : List[str] = self.normalize(
padded_inputs["input_features"] ,attention_mask=lowerCAmelCase__ )
if return_tensors is not None:
lowerCAmelCase_ : Dict = padded_inputs.convert_to_tensors(lowerCAmelCase__ )
return padded_inputs
| 659 | 0 |
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =[]
for line in lines:
UpperCAmelCase_ =re.sub(R"#.*" , "" , lowercase__ ) # remove comments
if line:
filtered_lines.append(lowercase__ )
UpperCAmelCase_ ="\n".join(lowercase__ )
# Make a hash from all this code
UpperCAmelCase_ =full_str.encode("utf-8" )
return shaaaa(lowercase__ ).hexdigest()
# get importable module names and hash for caching
__lowercase : Tuple ={
"""csv""": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"""json""": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"""pandas""": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"""parquet""": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"""arrow""": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"""text""": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"""imagefolder""": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"""audiofolder""": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
__lowercase : Optional[int] ={
""".csv""": ("""csv""", {}),
""".tsv""": ("""csv""", {"""sep""": """\t"""}),
""".json""": ("""json""", {}),
""".jsonl""": ("""json""", {}),
""".parquet""": ("""parquet""", {}),
""".arrow""": ("""arrow""", {}),
""".txt""": ("""text""", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
__lowercase : Dict ={"""imagefolder""", """audiofolder"""}
# Used to filter data files based on extensions given a module name
__lowercase : Dict[str, List[str]] ={}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(""".zip""")
_MODULE_TO_EXTENSIONS["audiofolder"].append(""".zip""")
| 54 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_lowercase = 10
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
for i in range(snake_case__ , snake_case__):
if array[i] == target:
return i
return -1
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : List[str] = 0
lowerCAmelCase_ : Tuple = len(snake_case__)
while left <= right:
if right - left < precision:
return lin_search(snake_case__ , snake_case__ , snake_case__ , snake_case__)
lowerCAmelCase_ : List[str] = (left + right) // 3 + 1
lowerCAmelCase_ : Tuple = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
lowerCAmelCase_ : str = one_third - 1
elif array[two_third] < target:
lowerCAmelCase_ : Any = two_third + 1
else:
lowerCAmelCase_ : List[str] = one_third + 1
lowerCAmelCase_ : Tuple = two_third - 1
else:
return -1
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
if left < right:
if right - left < precision:
return lin_search(snake_case__ , snake_case__ , snake_case__ , snake_case__)
lowerCAmelCase_ : Dict = (left + right) // 3 + 1
lowerCAmelCase_ : List[Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(snake_case__ , one_third - 1 , snake_case__ , snake_case__)
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , snake_case__ , snake_case__ , snake_case__)
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , snake_case__ , snake_case__)
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase = input('''Enter numbers separated by comma:\n''').strip()
_lowercase = [int(item.strip()) for item in user_input.split(''',''')]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
_lowercase = int(input('''Enter the number to be found in the list:\n''').strip())
_lowercase = ite_ternary_search(collection, target)
_lowercase = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f"Iterative search: {target} found at positions: {resulta}")
print(f"Recursive search: {target} found at positions: {resulta}")
else:
print('''Not found''')
| 659 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
snake_case_ = Features({"image": Image()} )
snake_case_ = Features({"labels": ClassLabel} )
snake_case_ = "image"
snake_case_ = "labels"
def UpperCamelCase_ ( self : Optional[Any] ,A : Tuple ):
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] ,A ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
__A = copy.deepcopy(self )
__A = self.label_schema.copy()
__A = features[self.label_column]
__A = label_schema
return task_template
@property
def UpperCamelCase_ ( self : Any ):
return {
self.image_column: "image",
self.label_column: "labels",
}
| 55 |
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
_lowercase = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
_lowercase = {
'''facebook/blenderbot_small-90M''': 512,
}
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = BlenderbotSmallTokenizer
def __init__( self : Optional[int] ,lowerCAmelCase__ : Optional[int]=None ,lowerCAmelCase__ : Union[str, Any]=None ,lowerCAmelCase__ : Any="<|endoftext|>" ,lowerCAmelCase__ : int="<|endoftext|>" ,lowerCAmelCase__ : Optional[Any]="<|endoftext|>" ,lowerCAmelCase__ : Union[str, Any]=False ,lowerCAmelCase__ : Optional[Any]=True ,**lowerCAmelCase__ : Union[str, Any] ,) -> str:
'''simple docstring'''
super().__init__(
ByteLevelBPETokenizer(
vocab=lowerCAmelCase__ ,merges=lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ ,trim_offsets=lowerCAmelCase__ ,) ,bos_token=lowerCAmelCase__ ,eos_token=lowerCAmelCase__ ,unk_token=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
lowerCAmelCase_ : Dict = add_prefix_space
def UpperCAmelCase_ ( self : int ,lowerCAmelCase__ : List[str] ,lowerCAmelCase__ : Tuple=None ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : str = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCAmelCase_ ( self : int ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowerCAmelCase_ : Dict = [self.sep_token_id]
lowerCAmelCase_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 659 | 0 |
'''simple docstring'''
def _a (lowercase__ : str , lowercase__ : str ) -> Dict:
"""simple docstring"""
assert x is not None
assert y is not None
__snake_case = len(lowercase__ )
__snake_case = len(lowercase__ )
# declaring the array for storing the dp values
__snake_case = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
__snake_case = 1 if x[i - 1] == y[j - 1] else 0
__snake_case = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
__snake_case = ''
__snake_case , __snake_case = m, n
while i > 0 and j > 0:
__snake_case = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
__snake_case = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
_a : Dict = "AGGTAB"
_a : Any = "GXTXAYB"
_a : Dict = 4
_a : Optional[int] = "GTAB"
_a , _a : Union[str, Any] = longest_common_subsequence(a, b)
print("len =", ln, ", sub-sequence =", subseq)
import doctest
doctest.testmod()
| 56 |
from collections.abc import Generator
from math import sin
def UpperCamelCase ( snake_case__):
if len(snake_case__) != 32:
raise ValueError("Input must be of length 32")
lowerCAmelCase_ : Tuple = b""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def UpperCamelCase ( snake_case__):
if i < 0:
raise ValueError("Input must be non-negative")
lowerCAmelCase_ : List[str] = format(snake_case__ , "08x")[-8:]
lowerCAmelCase_ : Any = b""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8")
return little_endian_hex
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Union[str, Any] = b""
for char in message:
bit_string += format(snake_case__ , "08b").encode("utf-8")
lowerCAmelCase_ : Optional[int] = format(len(snake_case__) , "064b").encode("utf-8")
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(snake_case__) % 5_12 != 4_48:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:]) + to_little_endian(start_len[:32])
return bit_string
def UpperCamelCase ( snake_case__):
if len(snake_case__) % 5_12 != 0:
raise ValueError("Input must have length that's a multiple of 512")
for pos in range(0 , len(snake_case__) , 5_12):
lowerCAmelCase_ : List[str] = bit_string[pos : pos + 5_12]
lowerCAmelCase_ : Union[str, Any] = []
for i in range(0 , 5_12 , 32):
block_words.append(int(to_little_endian(block[i : i + 32]) , 2))
yield block_words
def UpperCamelCase ( snake_case__):
if i < 0:
raise ValueError("Input must be non-negative")
lowerCAmelCase_ : Dict = format(snake_case__ , "032b")
lowerCAmelCase_ : str = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(snake_case__ , 2)
def UpperCamelCase ( snake_case__ , snake_case__):
return (a + b) % 2**32
def UpperCamelCase ( snake_case__ , snake_case__):
if i < 0:
raise ValueError("Input must be non-negative")
if shift < 0:
raise ValueError("Shift must be non-negative")
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Optional[Any] = preprocess(snake_case__)
lowerCAmelCase_ : Optional[Any] = [int(2**32 * abs(sin(i + 1))) for i in range(64)]
# Starting states
lowerCAmelCase_ : List[str] = 0x67_45_23_01
lowerCAmelCase_ : Union[str, Any] = 0xef_cd_ab_89
lowerCAmelCase_ : List[Any] = 0x98_ba_dc_fe
lowerCAmelCase_ : Tuple = 0x10_32_54_76
lowerCAmelCase_ : Any = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(snake_case__):
lowerCAmelCase_ : Optional[int] = aa
lowerCAmelCase_ : List[str] = ba
lowerCAmelCase_ : Any = ca
lowerCAmelCase_ : Union[str, Any] = da
# Hash current chunk
for i in range(64):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
lowerCAmelCase_ : Any = d ^ (b & (c ^ d))
lowerCAmelCase_ : Dict = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
lowerCAmelCase_ : Any = c ^ (d & (b ^ c))
lowerCAmelCase_ : List[str] = (5 * i + 1) % 16
elif i <= 47:
lowerCAmelCase_ : int = b ^ c ^ d
lowerCAmelCase_ : Optional[Any] = (3 * i + 5) % 16
else:
lowerCAmelCase_ : List[Any] = c ^ (b | not_aa(snake_case__))
lowerCAmelCase_ : List[Any] = (7 * i) % 16
lowerCAmelCase_ : Optional[Any] = (f + a + added_consts[i] + block_words[g]) % 2**32
lowerCAmelCase_ : Optional[Any] = d
lowerCAmelCase_ : Dict = c
lowerCAmelCase_ : List[str] = b
lowerCAmelCase_ : Any = sum_aa(snake_case__ , left_rotate_aa(snake_case__ , shift_amounts[i]))
# Add hashed chunk to running total
lowerCAmelCase_ : Dict = sum_aa(snake_case__ , snake_case__)
lowerCAmelCase_ : str = sum_aa(snake_case__ , snake_case__)
lowerCAmelCase_ : Optional[int] = sum_aa(snake_case__ , snake_case__)
lowerCAmelCase_ : int = sum_aa(snake_case__ , snake_case__)
lowerCAmelCase_ : Union[str, Any] = reformat_hex(snake_case__) + reformat_hex(snake_case__) + reformat_hex(snake_case__) + reformat_hex(snake_case__)
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 659 | 0 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _lowerCAmelCase:
"""simple docstring"""
@staticmethod
def _a ( *_lowerCamelCase , **_lowerCamelCase ):
pass
@is_pipeline_test
@require_torch
@require_vision
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
a : List[Any] =MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: str = pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' )
UpperCamelCase_: Optional[Any] = [
{
'image': Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'question': 'How many cats are there?',
},
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'question': 'How many cats are there?',
},
]
return vqa_pipeline, examples
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Optional[int] = vqa_pipeline(_lowerCamelCase , top_k=1 )
self.assertEqual(
_lowerCamelCase , [
[{'score': ANY(_lowerCamelCase ), 'answer': ANY(_lowerCamelCase )}],
[{'score': ANY(_lowerCamelCase ), 'answer': ANY(_lowerCamelCase )}],
] , )
@require_torch
def _a ( self ):
UpperCamelCase_: Tuple = pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' )
UpperCamelCase_: Any = './tests/fixtures/tests_samples/COCO/000000039769.png'
UpperCamelCase_: int = 'How many cats are there?'
UpperCamelCase_: int = vqa_pipeline(image=_lowerCamelCase , question='How many cats are there?' , top_k=2 )
self.assertEqual(
_lowerCamelCase , [{'score': ANY(_lowerCamelCase ), 'answer': ANY(_lowerCamelCase )}, {'score': ANY(_lowerCamelCase ), 'answer': ANY(_lowerCamelCase )}] )
UpperCamelCase_: Optional[Any] = vqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
_lowerCamelCase , [{'score': ANY(_lowerCamelCase ), 'answer': ANY(_lowerCamelCase )}, {'score': ANY(_lowerCamelCase ), 'answer': ANY(_lowerCamelCase )}] )
@slow
@require_torch
def _a ( self ):
UpperCamelCase_: Optional[Any] = pipeline('visual-question-answering' , model='dandelin/vilt-b32-finetuned-vqa' )
UpperCamelCase_: List[str] = './tests/fixtures/tests_samples/COCO/000000039769.png'
UpperCamelCase_: str = 'How many cats are there?'
UpperCamelCase_: Optional[Any] = vqa_pipeline(image=_lowerCamelCase , question=_lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [{'score': 0.8_7_9_9, 'answer': '2'}, {'score': 0.2_9_6, 'answer': '1'}] )
UpperCamelCase_: Tuple = vqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [{'score': 0.8_7_9_9, 'answer': '2'}, {'score': 0.2_9_6, 'answer': '1'}] )
UpperCamelCase_: Optional[int] = vqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [[{'score': 0.8_7_9_9, 'answer': '2'}, {'score': 0.2_9_6, 'answer': '1'}]] * 2 , )
@require_tf
@unittest.skip('Visual question answering not implemented in TF' )
def _a ( self ):
pass | 57 |
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('''1.6'''):
_lowercase = True
from torch.cuda.amp import autocast
_lowercase = logging.getLogger(__name__)
@dataclass
class __snake_case :
"""simple docstring"""
UpperCamelCase_ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
UpperCamelCase_ = field(
default=snake_case__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
UpperCamelCase_ = field(
default=snake_case__ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
UpperCamelCase_ = field(
default=snake_case__ , metadata={'help': 'Whether to log verbose messages or not.'} , )
UpperCamelCase_ = field(
default=2.0 , metadata={'help': 'Maximum temperature for gumbel softmax.'} )
UpperCamelCase_ = field(
default=0.5 , metadata={'help': 'Minimum temperature for gumbel softmax.'} )
UpperCamelCase_ = field(
default=0.99_99_95 , metadata={'help': 'Decay of gumbel temperature during training.'} )
def UpperCamelCase ( snake_case__ , snake_case__):
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout)] , )
lowerCAmelCase_ : str = logging.WARNING
if model_args.verbose_logging:
lowerCAmelCase_ : int = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank):
lowerCAmelCase_ : Any = logging.INFO
logger.setLevel(snake_case__)
@dataclass
class __snake_case :
"""simple docstring"""
UpperCamelCase_ = field(
default=snake_case__ , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
UpperCamelCase_ = field(
default=snake_case__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
UpperCamelCase_ = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
UpperCamelCase_ = field(
default='validation' , metadata={
'help': (
'The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
UpperCamelCase_ = field(
default='file' , metadata={'help': 'Column in the dataset that contains speech file path. Defaults to \'file\''} , )
UpperCamelCase_ = field(
default=snake_case__ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
UpperCamelCase_ = field(
default=1 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
UpperCamelCase_ = field(
default=snake_case__ , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
UpperCamelCase_ = field(
default=20.0 , metadata={'help': 'Filter audio files that are longer than `max_duration_in_seconds` seconds'} )
@dataclass
class __snake_case :
"""simple docstring"""
UpperCamelCase_ = 42
UpperCamelCase_ = 42
UpperCamelCase_ = "longest"
UpperCamelCase_ = None
UpperCamelCase_ = None
def __call__( self : str ,lowerCAmelCase__ : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
lowerCAmelCase_ : Tuple = self.feature_extractor.pad(
lowerCAmelCase__ ,max_length=self.max_length ,padding=self.padding ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors="pt" ,)
lowerCAmelCase_ : Union[str, Any] = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1] )
lowerCAmelCase_ : List[str] = batch["input_values"].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
lowerCAmelCase_ : Tuple = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1 ) ).to(
torch.long )
lowerCAmelCase_ : Optional[Any] = torch.zeros(
(batch_size, mask_indices_seq_length) ,dtype=torch.long ,device=batch["input_values"].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
lowerCAmelCase_ : Tuple = 1
lowerCAmelCase_ : int = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
lowerCAmelCase_ : str = _compute_mask_indices(
(batch_size, mask_indices_seq_length) ,self.model.config.mask_time_prob ,self.model.config.mask_time_length ,attention_mask=lowerCAmelCase__ ,min_masks=2 ,)
return batch
class __snake_case ( snake_case__ ):
"""simple docstring"""
def __init__( self : List[str] ,*lowerCAmelCase__ : Optional[int] ,lowerCAmelCase__ : Tuple=1 ,lowerCAmelCase__ : Optional[int]=0 ,lowerCAmelCase__ : Optional[Any]=1.0 ,**lowerCAmelCase__ : Any ) -> str:
'''simple docstring'''
super().__init__(*lowerCAmelCase__ ,**lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : int = max_gumbel_temp
lowerCAmelCase_ : Union[str, Any] = min_gumbel_temp
lowerCAmelCase_ : str = gumbel_temp_decay
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : nn.Module ,lowerCAmelCase__ : Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor:
'''simple docstring'''
model.train()
lowerCAmelCase_ : str = self._prepare_inputs(lowerCAmelCase__ )
if self.use_amp:
with autocast():
lowerCAmelCase_ : List[Any] = self.compute_loss(lowerCAmelCase__ ,lowerCAmelCase__ )
else:
lowerCAmelCase_ : List[Any] = self.compute_loss(lowerCAmelCase__ ,lowerCAmelCase__ )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
lowerCAmelCase_ : List[Any] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
lowerCAmelCase_ : Optional[Any] = loss.sum() / (inputs["mask_time_indices"]).sum()
else:
raise ValueError(f'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
lowerCAmelCase_ : int = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(lowerCAmelCase__ ).backward()
elif self.use_apex:
with amp.scale_loss(lowerCAmelCase__ ,self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(lowerCAmelCase__ )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step ,self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step ,self.min_gumbel_temp ) )
return loss.detach()
def UpperCamelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCAmelCase_ : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Dict = parser.parse_args_into_dataclasses()
configure_logger(snake_case__ , snake_case__)
# Downloading and loading a dataset from the hub.
lowerCAmelCase_ : List[str] = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir)
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
lowerCAmelCase_ : Any = DatasetDict()
lowerCAmelCase_ : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[:{data_args.validation_split_percentage}%]''' , cache_dir=model_args.cache_dir , )
lowerCAmelCase_ : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[{data_args.validation_split_percentage}%:]''' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
lowerCAmelCase_ : Union[str, Any] = DatasetDict()
lowerCAmelCase_ : int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , )
lowerCAmelCase_ : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}''' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
lowerCAmelCase_ : Dict = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=snake_case__)
def prepare_dataset(snake_case__):
# check that all files have the correct sampling rate
lowerCAmelCase_ , lowerCAmelCase_ : str = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate)
return batch
# load audio files into numpy arrays
lowerCAmelCase_ : int = datasets.map(
snake_case__ , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names)
# filter audio files that are too long
lowerCAmelCase_ : int = vectorized_datasets.filter(
lambda snake_case__: len(data["speech"]) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate))
def normalize(snake_case__):
return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate)
# normalize and transform to `BatchFeatures`
lowerCAmelCase_ : str = vectorized_datasets.map(
snake_case__ , batched=snake_case__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
lowerCAmelCase_ : Optional[Any] = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm='layer'")
lowerCAmelCase_ : Dict = WavaVecaForPreTraining(snake_case__)
lowerCAmelCase_ : int = DataCollatorForWavaVecaPretraining(model=snake_case__ , feature_extractor=snake_case__)
lowerCAmelCase_ : List[Any] = WavaVecaPreTrainer(
model=snake_case__ , data_collator=snake_case__ , args=snake_case__ , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=snake_case__ , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 659 | 0 |
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int ):
'''simple docstring'''
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError("""iterations must be defined as integers""" )
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not number >= 1:
raise ValueError(
"""starting number must be
and integer and be more than 0""" )
if not iterations >= 1:
raise ValueError("""Iterations must be done more than 0 times to play FizzBuzz""" )
snake_case_ : Optional[Any] = """"""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__UpperCamelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58 |
from __future__ import annotations
from collections.abc import Callable
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = 1_00 , ):
lowerCAmelCase_ : Any = x_start
lowerCAmelCase_ : Optional[Any] = fnc(snake_case__)
lowerCAmelCase_ : Union[str, Any] = 0.0
for _ in range(snake_case__):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
lowerCAmelCase_ : Any = (x_end - x_start) / steps + xa
lowerCAmelCase_ : Dict = fnc(snake_case__)
area += abs(fxa + fxa) * (xa - xa) / 2
# Increment step
lowerCAmelCase_ : int = xa
lowerCAmelCase_ : str = fxa
return area
if __name__ == "__main__":
def UpperCamelCase ( snake_case__):
return x**3 + x**2
print('''f(x) = x^3 + x^2''')
print('''The area between the curve, x = -5, x = 5 and the x axis is:''')
_lowercase = 10
while i <= 100000:
print(f"with {i} steps: {trapezoidal_area(f, -5, 5, i)}")
i *= 10
| 659 | 0 |
import numpy as np
from transformers import Pipeline
def lowerCAmelCase_ ( __a ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: Optional[Any] =np.max(__a , axis=-1 , keepdims=__a )
lowerCamelCase__: int =np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=__a )
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , **UpperCAmelCase_ : Union[str, Any]) ->str:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] ={}
if "second_text" in kwargs:
lowerCamelCase__: Optional[int] =kwargs["second_text"]
return preprocess_kwargs, {}, {}
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any]=None) ->int:
'''simple docstring'''
return self.tokenizer(UpperCAmelCase_ , text_pair=UpperCAmelCase_ , return_tensors=self.framework)
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : int) ->int:
'''simple docstring'''
return self.model(**UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : Optional[int]) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Tuple =model_outputs.logits[0].numpy()
lowerCamelCase__: Any =softmax(UpperCAmelCase_)
lowerCamelCase__: List[str] =np.argmax(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =self.model.config.idalabel[best_class]
lowerCamelCase__: str =probabilities[best_class].item()
lowerCamelCase__: List[str] =logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 59 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = StableDiffusionLDMaDPipeline
UpperCamelCase_ = TEXT_TO_IMAGE_PARAMS
UpperCamelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase_ ( self : Tuple ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase_ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") ,up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") ,cross_attention_dim=32 ,)
lowerCAmelCase_ : Any = DDIMScheduler(
beta_start=0.00_085 ,beta_end=0.012 ,beta_schedule="scaled_linear" ,clip_sample=lowerCAmelCase__ ,set_alpha_to_one=lowerCAmelCase__ ,)
torch.manual_seed(0 )
lowerCAmelCase_ : str = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=6 ,out_channels=6 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,)
torch.manual_seed(0 )
lowerCAmelCase_ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,)
lowerCAmelCase_ : Optional[int] = CLIPTextModel(lowerCAmelCase__ )
lowerCAmelCase_ : Dict = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCAmelCase_ : List[Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : List[Any] ,lowerCAmelCase__ : List[str]=0 ) -> Dict:
'''simple docstring'''
if str(lowerCAmelCase__ ).startswith("mps" ):
lowerCAmelCase_ : Optional[int] = torch.manual_seed(lowerCAmelCase__ )
else:
lowerCAmelCase_ : Dict = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
lowerCAmelCase_ : str = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ : List[str] = self.get_dummy_components()
lowerCAmelCase_ : Union[str, Any] = StableDiffusionLDMaDPipeline(**lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = ldmad_pipe.to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : Any = self.get_dummy_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = ldmad_pipe(**lowerCAmelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ : Any = output.rgb, output.depth
lowerCAmelCase_ : Dict = rgb[0, -3:, -3:, -1]
lowerCAmelCase_ : Tuple = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
lowerCAmelCase_ : Optional[Any] = np.array(
[0.37_338_176, 0.70_247, 0.74_203_193, 0.51_643_604, 0.58_256_793, 0.60_932_136, 0.4_181_095, 0.48_355_877, 0.46_535_262] )
lowerCAmelCase_ : Tuple = np.array([103.46_727, 85.812_004, 87.849_236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Dict = self.get_dummy_components()
lowerCAmelCase_ : List[str] = StableDiffusionLDMaDPipeline(**lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = ldmad_pipe.to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : str = 3 * [inputs["prompt"]]
# forward
lowerCAmelCase_ : Union[str, Any] = ldmad_pipe(**lowerCAmelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = output.rgb, output.depth
lowerCAmelCase_ : str = rgb_slice_a[0, -3:, -3:, -1]
lowerCAmelCase_ : List[str] = depth_slice_a[0, -3:, -1]
lowerCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = 3 * [inputs.pop("prompt" )]
lowerCAmelCase_ : str = ldmad_pipe.tokenizer(
lowerCAmelCase__ ,padding="max_length" ,max_length=ldmad_pipe.tokenizer.model_max_length ,truncation=lowerCAmelCase__ ,return_tensors="pt" ,)
lowerCAmelCase_ : Union[str, Any] = text_inputs["input_ids"].to(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = ldmad_pipe.text_encoder(lowerCAmelCase__ )[0]
lowerCAmelCase_ : Optional[int] = prompt_embeds
# forward
lowerCAmelCase_ : str = ldmad_pipe(**lowerCAmelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ : str = output.rgb, output.depth
lowerCAmelCase_ : Optional[Any] = rgb_slice_a[0, -3:, -3:, -1]
lowerCAmelCase_ : Tuple = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Any = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ : Optional[int] = self.get_dummy_components()
lowerCAmelCase_ : Dict = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = StableDiffusionLDMaDPipeline(**lowerCAmelCase__ )
lowerCAmelCase_ : Any = ldmad_pipe.to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = self.get_dummy_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = "french fries"
lowerCAmelCase_ : Optional[int] = ldmad_pipe(**lowerCAmelCase__ ,negative_prompt=lowerCAmelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = output.rgb, output.depth
lowerCAmelCase_ : Any = rgb[0, -3:, -3:, -1]
lowerCAmelCase_ : Tuple = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
lowerCAmelCase_ : int = np.array(
[0.37_044, 0.71_811_503, 0.7_223_251, 0.48_603_675, 0.5_638_391, 0.6_364_948, 0.42_833_704, 0.4_901_315, 0.47_926_217] )
lowerCAmelCase_ : Union[str, Any] = np.array([107.84_738, 84.62_802, 89.962_135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : Tuple ,lowerCAmelCase__ : Dict="cpu" ,lowerCAmelCase__ : Union[str, Any]=torch.floataa ,lowerCAmelCase__ : List[str]=0 ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Any = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = np.random.RandomState(lowerCAmelCase__ ).standard_normal((1, 4, 64, 64) )
lowerCAmelCase_ : Optional[Any] = torch.from_numpy(lowerCAmelCase__ ).to(device=lowerCAmelCase__ ,dtype=lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase_ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" )
lowerCAmelCase_ : List[str] = ldmad_pipe.to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : Dict = self.get_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = ldmad_pipe(**lowerCAmelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ : Dict = output.rgb, output.depth
lowerCAmelCase_ : List[str] = rgb[0, -3:, -3:, -1].flatten()
lowerCAmelCase_ : Optional[int] = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 5_12, 5_12, 3)
assert depth.shape == (1, 5_12, 5_12)
lowerCAmelCase_ : int = np.array(
[0.53_805_465, 0.56_707_305, 0.5_486_515, 0.57_012_236, 0.5_814_511, 0.56_253_487, 0.54_843_014, 0.55_092_263, 0.6_459_706] )
lowerCAmelCase_ : Optional[Any] = np.array(
[0.9_263_781, 0.6_678_672, 0.5_486_515, 0.92_202_145, 0.67_831_135, 0.56_253_487, 0.9_241_694, 0.7_551_478, 0.6_459_706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3
@nightly
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : Tuple ,lowerCAmelCase__ : Dict="cpu" ,lowerCAmelCase__ : List[str]=torch.floataa ,lowerCAmelCase__ : Optional[int]=0 ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Dict = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = np.random.RandomState(lowerCAmelCase__ ).standard_normal((1, 4, 64, 64) )
lowerCAmelCase_ : Any = torch.from_numpy(lowerCAmelCase__ ).to(device=lowerCAmelCase__ ,dtype=lowerCAmelCase__ )
lowerCAmelCase_ : int = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 50,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase_ ( self : Dict ) -> int:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" ).to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = self.get_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = ldmad_pipe(**lowerCAmelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ : Any = output.rgb, output.depth
lowerCAmelCase_ : Dict = 0.495_586
lowerCAmelCase_ : Optional[Any] = 0.33_795_515
lowerCAmelCase_ : Any = 112.48_518
lowerCAmelCase_ : List[Any] = 98.489_746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : int = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d-4c" ).to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : str = self.get_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = ldmad_pipe(**lowerCAmelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = output.rgb, output.depth
lowerCAmelCase_ : List[str] = 0.4_194_127
lowerCAmelCase_ : List[str] = 0.35_375_586
lowerCAmelCase_ : str = 0.5_638_502
lowerCAmelCase_ : Optional[Any] = 0.34_686_103
assert rgb.shape == (1, 5_12, 5_12, 3)
assert depth.shape == (1, 5_12, 5_12, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
| 659 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
'''configuration_upernet''': ['''UperNetConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''UperNetForSemanticSegmentation''',
'''UperNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 60 |
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
_lowercase = {
'''iou_prediction_head.layers.0''': '''iou_prediction_head.proj_in''',
'''iou_prediction_head.layers.1''': '''iou_prediction_head.layers.0''',
'''iou_prediction_head.layers.2''': '''iou_prediction_head.proj_out''',
'''mask_decoder.output_upscaling.0''': '''mask_decoder.upscale_conv1''',
'''mask_decoder.output_upscaling.1''': '''mask_decoder.upscale_layer_norm''',
'''mask_decoder.output_upscaling.3''': '''mask_decoder.upscale_conv2''',
'''mask_downscaling.0''': '''mask_embed.conv1''',
'''mask_downscaling.1''': '''mask_embed.layer_norm1''',
'''mask_downscaling.3''': '''mask_embed.conv2''',
'''mask_downscaling.4''': '''mask_embed.layer_norm2''',
'''mask_downscaling.6''': '''mask_embed.conv3''',
'''point_embeddings''': '''point_embed''',
'''pe_layer.positional_encoding_gaussian_matrix''': '''shared_embedding.positional_embedding''',
'''image_encoder''': '''vision_encoder''',
'''neck.0''': '''neck.conv1''',
'''neck.1''': '''neck.layer_norm1''',
'''neck.2''': '''neck.conv2''',
'''neck.3''': '''neck.layer_norm2''',
'''patch_embed.proj''': '''patch_embed.projection''',
'''.norm''': '''.layer_norm''',
'''blocks''': '''layers''',
}
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : int = {}
state_dict.pop("pixel_mean" , snake_case__)
state_dict.pop("pixel_std" , snake_case__)
lowerCAmelCase_ : List[Any] = R".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
lowerCAmelCase_ : Dict = key.replace(snake_case__ , snake_case__)
if re.match(snake_case__ , snake_case__):
lowerCAmelCase_ : Any = int(re.match(snake_case__ , snake_case__).group(2))
if layer_nb == 0:
lowerCAmelCase_ : List[Any] = key.replace("layers.0" , "proj_in")
elif layer_nb == 1:
lowerCAmelCase_ : List[Any] = key.replace("layers.1" , "layers.0")
elif layer_nb == 2:
lowerCAmelCase_ : int = key.replace("layers.2" , "proj_out")
lowerCAmelCase_ : int = value
lowerCAmelCase_ : Optional[int] = model_state_dict[
"prompt_encoder.shared_embedding.positional_embedding"
]
return model_state_dict
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__="ybelkada/segment-anything"):
lowerCAmelCase_ : Optional[int] = hf_hub_download(snake_case__ , F'''checkpoints/{model_name}.pth''')
if "sam_vit_b" in model_name:
lowerCAmelCase_ : Optional[Any] = SamConfig()
elif "sam_vit_l" in model_name:
lowerCAmelCase_ : Optional[int] = SamVisionConfig(
hidden_size=10_24 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
lowerCAmelCase_ : Union[str, Any] = SamConfig(
vision_config=snake_case__ , )
elif "sam_vit_h" in model_name:
lowerCAmelCase_ : Optional[Any] = SamVisionConfig(
hidden_size=12_80 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
lowerCAmelCase_ : Tuple = SamConfig(
vision_config=snake_case__ , )
lowerCAmelCase_ : Optional[Any] = torch.load(snake_case__ , map_location="cpu")
lowerCAmelCase_ : Union[str, Any] = replace_keys(snake_case__)
lowerCAmelCase_ : List[Any] = SamImageProcessor()
lowerCAmelCase_ : Any = SamProcessor(image_processor=snake_case__)
lowerCAmelCase_ : Any = SamModel(snake_case__)
hf_model.load_state_dict(snake_case__)
lowerCAmelCase_ : Dict = hf_model.to("cuda")
lowerCAmelCase_ : List[str] = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
lowerCAmelCase_ : List[Any] = Image.open(requests.get(snake_case__ , stream=snake_case__).raw).convert("RGB")
lowerCAmelCase_ : Optional[int] = [[[4_00, 6_50]]]
lowerCAmelCase_ : int = [[1]]
lowerCAmelCase_ : Optional[Any] = processor(images=np.array(snake_case__) , return_tensors="pt").to("cuda")
with torch.no_grad():
lowerCAmelCase_ : Optional[Any] = hf_model(**snake_case__)
lowerCAmelCase_ : Optional[int] = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_890_251_159_668
lowerCAmelCase_ : Any = processor(
images=np.array(snake_case__) , input_points=snake_case__ , input_labels=snake_case__ , return_tensors="pt").to("cuda")
with torch.no_grad():
lowerCAmelCase_ : Optional[Any] = hf_model(**snake_case__)
lowerCAmelCase_ : Union[str, Any] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_712_603_092_193_604
lowerCAmelCase_ : Tuple = ((75, 2_75, 17_25, 8_50),)
lowerCAmelCase_ : Optional[Any] = processor(images=np.array(snake_case__) , input_boxes=snake_case__ , return_tensors="pt").to("cuda")
with torch.no_grad():
lowerCAmelCase_ : List[Any] = hf_model(**snake_case__)
lowerCAmelCase_ : str = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8_686_015_605_926_514
# Test with 2 points and 1 image.
lowerCAmelCase_ : int = [[[4_00, 6_50], [8_00, 6_50]]]
lowerCAmelCase_ : Optional[Any] = [[1, 1]]
lowerCAmelCase_ : List[Any] = processor(
images=np.array(snake_case__) , input_points=snake_case__ , input_labels=snake_case__ , return_tensors="pt").to("cuda")
with torch.no_grad():
lowerCAmelCase_ : Tuple = hf_model(**snake_case__)
lowerCAmelCase_ : str = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_936_047_792_434_692
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
_lowercase = ['''sam_vit_b_01ec64''', '''sam_vit_h_4b8939''', '''sam_vit_l_0b3195''']
parser.add_argument(
'''--model_name''',
default='''sam_vit_h_4b8939''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
parser.add_argument(
'''--model_hub_id''',
default='''ybelkada/segment-anything''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
_lowercase = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 659 | 0 |
from __future__ import annotations
def _A ( lowerCAmelCase_ : list , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
"""simple docstring"""
lowerCAmelCase__ = []
lowerCAmelCase__ , lowerCAmelCase__ = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
lowerCAmelCase__ = result + left + right
return input_list
def _A ( lowerCAmelCase_ : list ):
"""simple docstring"""
if len(lowerCAmelCase_ ) <= 1:
return input_list
lowerCAmelCase__ = list(lowerCAmelCase_ )
# iteration for two-way merging
lowerCAmelCase__ = 2
while p <= len(lowerCAmelCase_ ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(lowerCAmelCase_ ) , lowerCAmelCase_ ):
lowerCAmelCase__ = i
lowerCAmelCase__ = i + p - 1
lowerCAmelCase__ = (low + high + 1) // 2
lowerCAmelCase__ = merge(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# final merge of last two parts
if p * 2 >= len(lowerCAmelCase_ ):
lowerCAmelCase__ = i
lowerCAmelCase__ = merge(lowerCAmelCase_ , 0 , lowerCAmelCase_ , len(lowerCAmelCase_ ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
UpperCamelCase = input('Enter numbers separated by a comma:\n').strip()
if user_input == "":
UpperCamelCase = []
else:
UpperCamelCase = [int(item.strip()) for item in user_input.split(',')]
print(iter_merge_sort(unsorted))
| 61 |
class __snake_case :
"""simple docstring"""
def __init__( self : Union[str, Any] ,lowerCAmelCase__ : str = "" ,lowerCAmelCase__ : bool = False ) -> None:
'''simple docstring'''
lowerCAmelCase_ : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
lowerCAmelCase_ : Optional[int] = is_leaf
lowerCAmelCase_ : List[str] = prefix
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : str ) -> tuple[str, str, str]:
'''simple docstring'''
lowerCAmelCase_ : List[str] = 0
for q, w in zip(self.prefix ,lowerCAmelCase__ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def UpperCAmelCase_ ( self : Optional[Any] ,lowerCAmelCase__ : list[str] ) -> None:
'''simple docstring'''
for word in words:
self.insert(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : str ) -> None:
'''simple docstring'''
if self.prefix == word:
lowerCAmelCase_ : Optional[Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowerCAmelCase_ : Optional[int] = RadixNode(prefix=lowerCAmelCase__ ,is_leaf=lowerCAmelCase__ )
else:
lowerCAmelCase_ : Optional[Any] = self.nodes[word[0]]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Any = incoming_node.match(
lowerCAmelCase__ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(lowerCAmelCase__ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowerCAmelCase_ : Dict = remaining_prefix
lowerCAmelCase_ : str = self.nodes[matching_string[0]]
lowerCAmelCase_ : Dict = RadixNode(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Any = aux_node
if remaining_word == "":
lowerCAmelCase_ : Optional[Any] = True
else:
self.nodes[matching_string[0]].insert(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ,lowerCAmelCase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ : List[str] = self.nodes.get(word[0] ,lowerCAmelCase__ )
if not incoming_node:
return False
else:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = incoming_node.match(
lowerCAmelCase__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ : int = self.nodes.get(word[0] ,lowerCAmelCase__ )
if not incoming_node:
return False
else:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = incoming_node.match(
lowerCAmelCase__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(lowerCAmelCase__ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
lowerCAmelCase_ : int = list(self.nodes.values() )[0]
lowerCAmelCase_ : List[Any] = merging_node.is_leaf
self.prefix += merging_node.prefix
lowerCAmelCase_ : int = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
lowerCAmelCase_ : List[str] = False
# If there is 1 edge, we merge it with its child
else:
lowerCAmelCase_ : Union[str, Any] = list(incoming_node.nodes.values() )[0]
lowerCAmelCase_ : Optional[int] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowerCAmelCase_ : List[str] = merging_node.nodes
return True
def UpperCAmelCase_ ( self : int ,lowerCAmelCase__ : int = 0 ) -> None:
'''simple docstring'''
if self.prefix != "":
print("-" * height ,self.prefix ," (leaf)" if self.is_leaf else "" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def UpperCamelCase ( ):
lowerCAmelCase_ : List[Any] = "banana bananas bandana band apple all beast".split()
lowerCAmelCase_ : Optional[Any] = RadixNode()
root.insert_many(snake_case__)
assert all(root.find(snake_case__) for word in words)
assert not root.find("bandanas")
assert not root.find("apps")
root.delete("all")
assert not root.find("all")
root.delete("banana")
assert not root.find("banana")
assert root.find("bananas")
return True
def UpperCamelCase ( ):
assert test_trie()
def UpperCamelCase ( ):
lowerCAmelCase_ : str = RadixNode()
lowerCAmelCase_ : str = "banana bananas bandanas bandana band apple all beast".split()
root.insert_many(snake_case__)
print("Words:" , snake_case__)
print("Tree:")
root.print_tree()
if __name__ == "__main__":
main()
| 659 | 0 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
snake_case = logging.getLogger(__name__)
def lowerCamelCase__ ( lowercase , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = False , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = bnb_quantization_config.load_in_abit
SCREAMING_SNAKE_CASE : List[Any] = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"
" make sure you have the latest version of `bitsandbytes` installed." )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"
"make sure you have the latest version of `bitsandbytes` installed." )
SCREAMING_SNAKE_CASE : Tuple = []
# custom device map
if isinstance(lowercase , lowercase ) and len(device_map.keys() ) > 1:
SCREAMING_SNAKE_CASE : int = [key for key, value in device_map.items() if value in ["disk", "cpu"]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
SCREAMING_SNAKE_CASE : Optional[Any] = get_keys_to_not_convert(lowercase )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
SCREAMING_SNAKE_CASE : int = []
SCREAMING_SNAKE_CASE : int = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(lowercase )
# compatibility with peft
SCREAMING_SNAKE_CASE : int = load_in_abit
SCREAMING_SNAKE_CASE : str = load_in_abit
SCREAMING_SNAKE_CASE : List[str] = get_parameter_device(lowercase )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"It is not recommended to quantize a loaded model. "
"The model should be instantiated under the `init_empty_weights` context manager." )
SCREAMING_SNAKE_CASE : Any = replace_with_bnb_layers(lowercase , lowercase , modules_to_not_convert=lowercase )
# convert param to the right dtype
SCREAMING_SNAKE_CASE : Optional[int] = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace(".weight" , "" ).replace(".bias" , "" )
SCREAMING_SNAKE_CASE : Optional[int] = getattr(lowercase , lowercase , lowercase )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(lowercase ):
param.to(lowercase )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization." )
logger.info(
F'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"We move the model to cuda." )
return model
elif weights_location is None:
raise RuntimeError(
F'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
SCREAMING_SNAKE_CASE : str = replace_with_bnb_layers(
lowercase , lowercase , modules_to_not_convert=lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = get_quantized_model_device_map(
lowercase , lowercase , lowercase , max_memory=lowercase , no_split_module_classes=lowercase , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : Optional[Any] = any(x in list(device_map.values() ) for x in ["cpu", "disk"] )
load_checkpoint_in_model(
lowercase , lowercase , lowercase , dtype=bnb_quantization_config.torch_dtype , offload_folder=lowercase , offload_state_dict=lowercase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(lowercase , device_map=lowercase , offload_dir=lowercase )
def lowerCamelCase__ ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=None ):
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
SCREAMING_SNAKE_CASE : Optional[Any] = {"": torch.cuda.current_device()}
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization." )
logger.info("The device_map was not initialized." "Setting device_map to `{'':torch.cuda.current_device()}`." )
if isinstance(lowercase , lowercase ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or "
"'sequential'." )
SCREAMING_SNAKE_CASE : Tuple = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
SCREAMING_SNAKE_CASE : Optional[Any] = {}
SCREAMING_SNAKE_CASE : Optional[Any] = special_dtypes
SCREAMING_SNAKE_CASE : List[str] = no_split_module_classes
SCREAMING_SNAKE_CASE : str = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
SCREAMING_SNAKE_CASE : List[Any] = get_balanced_memory(
lowercase , low_zero=(device_map == "balanced_low_0") , max_memory=lowercase , **lowercase , )
SCREAMING_SNAKE_CASE : int = max_memory
SCREAMING_SNAKE_CASE : Tuple = infer_auto_device_map(lowercase , **lowercase )
if isinstance(lowercase , lowercase ):
# check if don't have any quantized module on the cpu
SCREAMING_SNAKE_CASE : List[str] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
SCREAMING_SNAKE_CASE : List[Any] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n " )
else:
logger.info(
"Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit" )
del device_map_without_some_modules
return device_map
def lowerCamelCase__ ( lowercase , lowercase , lowercase=None , lowercase=None ):
"""simple docstring"""
if modules_to_not_convert is None:
SCREAMING_SNAKE_CASE : int = []
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = _replace_with_bnb_layers(
lowercase , lowercase , lowercase , lowercase )
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug." )
return model
def lowerCamelCase__ ( lowercase , lowercase , lowercase=None , lowercase=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = False
for name, module in model.named_children():
if current_key_name is None:
SCREAMING_SNAKE_CASE : Any = []
current_key_name.append(lowercase )
if isinstance(lowercase , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
SCREAMING_SNAKE_CASE : Dict = ".".join(lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
SCREAMING_SNAKE_CASE : Union[str, Any] = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
SCREAMING_SNAKE_CASE : Optional[int] = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=lowercase , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
SCREAMING_SNAKE_CASE : int = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("load_in_8bit and load_in_4bit can't be both False" )
SCREAMING_SNAKE_CASE : int = module.weight.data
if module.bias is not None:
SCREAMING_SNAKE_CASE : List[str] = module.bias.data
bnb_module.requires_grad_(lowercase )
setattr(lowercase , lowercase , lowercase )
SCREAMING_SNAKE_CASE : Any = True
if len(list(module.children() ) ) > 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = _replace_with_bnb_layers(
lowercase , lowercase , lowercase , lowercase )
SCREAMING_SNAKE_CASE : Tuple = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
with init_empty_weights():
SCREAMING_SNAKE_CASE : List[str] = deepcopy(lowercase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
SCREAMING_SNAKE_CASE : Dict = find_tied_parameters(lowercase )
# For compatibility with Accelerate < 0.18
if isinstance(lowercase , lowercase ):
SCREAMING_SNAKE_CASE : List[Any] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = sum(lowercase , [] )
SCREAMING_SNAKE_CASE : int = len(lowercase ) > 0
# Check if it is a base model
SCREAMING_SNAKE_CASE : Any = False
if hasattr(lowercase , "base_model_prefix" ):
SCREAMING_SNAKE_CASE : str = not hasattr(lowercase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
SCREAMING_SNAKE_CASE : Optional[int] = list(model.named_children() )
SCREAMING_SNAKE_CASE : Optional[int] = [list_modules[-1][0]]
# add last module together with tied weights
SCREAMING_SNAKE_CASE : str = set(lowercase ) - set(lowercase )
SCREAMING_SNAKE_CASE : List[str] = list(set(lowercase ) ) + list(lowercase )
# remove ".weight" from the keys
SCREAMING_SNAKE_CASE : str = [".weight", ".bias"]
SCREAMING_SNAKE_CASE : List[Any] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace(lowercase , "" )
filtered_module_names.append(lowercase )
return filtered_module_names
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
for m in model.modules():
if isinstance(lowercase , bnb.nn.Linearabit ):
return True
return False
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
return next(parameter.parameters() ).device
def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
if fpaa_statistics is None:
set_module_tensor_to_device(lowercase , lowercase , 0 , dtype=lowercase , value=lowercase )
SCREAMING_SNAKE_CASE : str = param_name
SCREAMING_SNAKE_CASE : List[str] = model
if "." in tensor_name:
SCREAMING_SNAKE_CASE : Dict = tensor_name.split("." )
for split in splits[:-1]:
SCREAMING_SNAKE_CASE : Tuple = getattr(lowercase , lowercase )
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''' )
SCREAMING_SNAKE_CASE : Dict = new_module
SCREAMING_SNAKE_CASE : List[str] = splits[-1]
# offload weights
SCREAMING_SNAKE_CASE : Tuple = False
offload_weight(module._parameters[tensor_name] , lowercase , lowercase , index=lowercase )
if hasattr(module._parameters[tensor_name] , "SCB" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("weight" , "SCB" ) , lowercase , index=lowercase , )
else:
offload_weight(lowercase , lowercase , lowercase , index=lowercase )
offload_weight(lowercase , param_name.replace("weight" , "SCB" ) , lowercase , index=lowercase )
set_module_tensor_to_device(lowercase , lowercase , "meta" , dtype=lowercase , value=torch.empty(*param.size() ) )
| 62 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case :
"""simple docstring"""
def __init__( self : Tuple ,lowerCAmelCase__ : List[str] ,lowerCAmelCase__ : Optional[Any]=12 ,lowerCAmelCase__ : Union[str, Any]=7 ,lowerCAmelCase__ : Union[str, Any]=True ,lowerCAmelCase__ : List[str]=True ,lowerCAmelCase__ : Any=True ,lowerCAmelCase__ : Optional[Any]=99 ,lowerCAmelCase__ : List[str]=32 ,lowerCAmelCase__ : Dict=32 ,lowerCAmelCase__ : str=2 ,lowerCAmelCase__ : Optional[int]=4 ,lowerCAmelCase__ : str=37 ,lowerCAmelCase__ : Dict=0.1 ,lowerCAmelCase__ : List[str]=0.1 ,lowerCAmelCase__ : str=5_12 ,lowerCAmelCase__ : Union[str, Any]=0.02 ,lowerCAmelCase__ : Tuple=0 ,lowerCAmelCase__ : str=None ,) -> str:
'''simple docstring'''
lowerCAmelCase_ : int = parent
lowerCAmelCase_ : str = batch_size
lowerCAmelCase_ : int = seq_length
lowerCAmelCase_ : Union[str, Any] = is_training
lowerCAmelCase_ : int = use_input_mask
lowerCAmelCase_ : List[Any] = use_labels
lowerCAmelCase_ : Dict = vocab_size
lowerCAmelCase_ : Union[str, Any] = hidden_size
lowerCAmelCase_ : Union[str, Any] = projection_dim
lowerCAmelCase_ : List[Any] = num_hidden_layers
lowerCAmelCase_ : Any = num_attention_heads
lowerCAmelCase_ : List[Any] = intermediate_size
lowerCAmelCase_ : Any = dropout
lowerCAmelCase_ : Optional[int] = attention_dropout
lowerCAmelCase_ : int = max_position_embeddings
lowerCAmelCase_ : Optional[int] = initializer_range
lowerCAmelCase_ : Any = scope
lowerCAmelCase_ : Tuple = bos_token_id
def UpperCAmelCase_ ( self : str ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowerCAmelCase_ : Dict = None
if self.use_input_mask:
lowerCAmelCase_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
lowerCAmelCase_ : List[Any] = input_mask.numpy()
lowerCAmelCase_ , lowerCAmelCase_ : str = input_mask.shape
lowerCAmelCase_ : Dict = np.random.randint(1 ,seq_length - 1 ,size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCAmelCase__ ):
lowerCAmelCase_ : Union[str, Any] = 1
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : List[Any] = self.get_config()
return config, input_ids, tf.convert_to_tensor(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : List[str] ) -> str:
'''simple docstring'''
return BlipTextConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,projection_dim=self.projection_dim ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,dropout=self.dropout ,attention_dropout=self.attention_dropout ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,bos_token_id=self.bos_token_id ,)
def UpperCAmelCase_ ( self : Optional[Any] ,lowerCAmelCase__ : str ,lowerCAmelCase__ : Any ,lowerCAmelCase__ : Dict ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = TFBlipTextModel(config=lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = model(lowerCAmelCase__ ,attention_mask=lowerCAmelCase__ ,training=lowerCAmelCase__ )
lowerCAmelCase_ : str = model(lowerCAmelCase__ ,training=lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
'''simple docstring'''
lowerCAmelCase_ : List[str] = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Dict = config_and_inputs
lowerCAmelCase_ : Tuple = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class __snake_case ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (TFBlipTextModel,) if is_tf_available() else ()
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
def UpperCAmelCase_ ( self : Optional[Any] ) -> str:
'''simple docstring'''
lowerCAmelCase_ : List[str] = BlipTextModelTester(self )
lowerCAmelCase_ : Tuple = ConfigTester(self ,config_class=lowerCAmelCase__ ,hidden_size=37 )
def UpperCAmelCase_ ( self : str ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason="Blip does not use inputs_embeds" )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def UpperCAmelCase_ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
pass
@slow
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Tuple = TFBlipTextModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : str=True ) -> List[Any]:
'''simple docstring'''
super().test_pt_tf_model_equivalence(allow_missing_keys=lowerCAmelCase__ )
| 659 | 0 |
def lowerCamelCase__ ( __lowerCamelCase : int ):
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
__UpperCAmelCase : Tuple = f"""Input value of [number={number}] must be an integer"""
raise TypeError(__lowerCamelCase )
if number < 1:
__UpperCAmelCase : List[str] = f"""Input value of [number={number}] must be > 0"""
raise ValueError(__lowerCamelCase )
__UpperCAmelCase : int = 1
for i in range(1 , __lowerCamelCase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
_lowercase = logging.get_logger(__name__)
_lowercase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
_lowercase = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
_lowercase = {
'''allenai/led-base-16384''': 16384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def UpperCamelCase ( ):
lowerCAmelCase_ : Optional[int] = (
list(range(ord("!") , ord("~") + 1)) + list(range(ord("¡") , ord("¬") + 1)) + list(range(ord("®") , ord("ÿ") + 1))
)
lowerCAmelCase_ : List[Any] = bs[:]
lowerCAmelCase_ : Optional[int] = 0
for b in range(2**8):
if b not in bs:
bs.append(snake_case__)
cs.append(2**8 + n)
n += 1
lowerCAmelCase_ : Tuple = [chr(snake_case__) for n in cs]
return dict(zip(snake_case__ , snake_case__))
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : str = set()
lowerCAmelCase_ : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
lowerCAmelCase_ : Union[str, Any] = char
return pairs
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ['input_ids', 'attention_mask']
def __init__( self : int ,lowerCAmelCase__ : Tuple ,lowerCAmelCase__ : Any ,lowerCAmelCase__ : Tuple="replace" ,lowerCAmelCase__ : Optional[int]="<s>" ,lowerCAmelCase__ : Optional[int]="</s>" ,lowerCAmelCase__ : Tuple="</s>" ,lowerCAmelCase__ : int="<s>" ,lowerCAmelCase__ : Union[str, Any]="<unk>" ,lowerCAmelCase__ : str="<pad>" ,lowerCAmelCase__ : Tuple="<mask>" ,lowerCAmelCase__ : Optional[int]=False ,**lowerCAmelCase__ : Tuple ,) -> Any:
'''simple docstring'''
lowerCAmelCase_ : int = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else bos_token
lowerCAmelCase_ : int = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else eos_token
lowerCAmelCase_ : int = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else sep_token
lowerCAmelCase_ : Any = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else cls_token
lowerCAmelCase_ : Tuple = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else unk_token
lowerCAmelCase_ : Any = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ : Optional[int] = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else mask_token
super().__init__(
errors=lowerCAmelCase__ ,bos_token=lowerCAmelCase__ ,eos_token=lowerCAmelCase__ ,unk_token=lowerCAmelCase__ ,sep_token=lowerCAmelCase__ ,cls_token=lowerCAmelCase__ ,pad_token=lowerCAmelCase__ ,mask_token=lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
with open(lowerCAmelCase__ ,encoding="utf-8" ) as vocab_handle:
lowerCAmelCase_ : List[str] = json.load(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = {v: k for k, v in self.encoder.items()}
lowerCAmelCase_ : Optional[int] = errors # how to handle errors in decoding
lowerCAmelCase_ : Optional[int] = bytes_to_unicode()
lowerCAmelCase_ : str = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ ,encoding="utf-8" ) as merges_handle:
lowerCAmelCase_ : List[str] = merges_handle.read().split("\n" )[1:-1]
lowerCAmelCase_ : List[Any] = [tuple(merge.split() ) for merge in bpe_merges]
lowerCAmelCase_ : Union[str, Any] = dict(zip(lowerCAmelCase__ ,range(len(lowerCAmelCase__ ) ) ) )
lowerCAmelCase_ : Dict = {}
lowerCAmelCase_ : List[str] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCAmelCase_ : Any = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def UpperCAmelCase_ ( self : Dict ) -> Dict:
'''simple docstring'''
return len(self.encoder )
def UpperCAmelCase_ ( self : Dict ) -> str:
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : Dict ) -> Dict:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowerCAmelCase_ : Union[str, Any] = tuple(lowerCAmelCase__ )
lowerCAmelCase_ : str = get_pairs(lowerCAmelCase__ )
if not pairs:
return token
while True:
lowerCAmelCase_ : Optional[int] = min(lowerCAmelCase__ ,key=lambda lowerCAmelCase__ : self.bpe_ranks.get(lowerCAmelCase__ ,float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = bigram
lowerCAmelCase_ : Tuple = []
lowerCAmelCase_ : str = 0
while i < len(lowerCAmelCase__ ):
try:
lowerCAmelCase_ : Union[str, Any] = word.index(lowerCAmelCase__ ,lowerCAmelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase_ : List[str] = j
if word[i] == first and i < len(lowerCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase_ : Optional[int] = tuple(lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = new_word
if len(lowerCAmelCase__ ) == 1:
break
else:
lowerCAmelCase_ : Dict = get_pairs(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = " ".join(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = word
return word
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : Dict ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ : Any = []
for token in re.findall(self.pat ,lowerCAmelCase__ ):
lowerCAmelCase_ : Optional[int] = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__ ).split(" " ) )
return bpe_tokens
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
return self.encoder.get(lowerCAmelCase__ ,self.encoder.get(self.unk_token ) )
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return self.decoder.get(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : List[Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : int = "".join(lowerCAmelCase__ )
lowerCAmelCase_ : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" ,errors=self.errors )
return text
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : str ,lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase_ : Optional[int] = os.path.join(
lowerCAmelCase__ ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ : List[str] = os.path.join(
lowerCAmelCase__ ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(lowerCAmelCase__ ,"w" ,encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=lowerCAmelCase__ ,ensure_ascii=lowerCAmelCase__ ) + "\n" )
lowerCAmelCase_ : Dict = 0
with open(lowerCAmelCase__ ,"w" ,encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda lowerCAmelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
lowerCAmelCase_ : List[Any] = token_index
writer.write(" ".join(lowerCAmelCase__ ) + "\n" )
index += 1
return vocab_file, merge_file
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase_ : Union[str, Any] = [self.cls_token_id]
lowerCAmelCase_ : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ,lowerCAmelCase__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ ,token_ids_a=lowerCAmelCase__ ,already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = [self.sep_token_id]
lowerCAmelCase_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : Union[str, Any] ,lowerCAmelCase__ : Optional[int]=False ,**lowerCAmelCase__ : str ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = kwargs.pop("add_prefix_space" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__ ) > 0 and not text[0].isspace()):
lowerCAmelCase_ : List[str] = " " + text
return (text, kwargs)
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : Union[Dict[str, EncodedInput], BatchEncoding] ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : Optional[bool] = None ,) -> dict:
'''simple docstring'''
lowerCAmelCase_ : int = super()._pad(
encoded_inputs=lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding_strategy=lowerCAmelCase__ ,pad_to_multiple_of=lowerCAmelCase__ ,return_attention_mask=lowerCAmelCase__ ,)
# Load from model defaults
if return_attention_mask is None:
lowerCAmelCase_ : List[Any] = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowerCAmelCase_ : Dict = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowerCAmelCase_ : List[Any] = len(encoded_inputs["global_attention_mask"] ) != len(lowerCAmelCase__ )
if needs_to_be_padded:
lowerCAmelCase_ : Union[str, Any] = len(lowerCAmelCase__ ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowerCAmelCase_ : Optional[int] = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
lowerCAmelCase_ : List[Any] = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 659 | 0 |
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _lowerCamelCase ( UpperCamelCase_ ):
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: str= self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase , '''num_attention_heads''' ) )
class _lowerCamelCase :
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=64 , lowerCAmelCase=3 , lowerCAmelCase=3 , lowerCAmelCase=2 , lowerCAmelCase=1 , lowerCAmelCase=16 , lowerCAmelCase=[128, 256, 384] , lowerCAmelCase=[4, 6, 8] , lowerCAmelCase=[2, 3, 4] , lowerCAmelCase=[16, 16, 16] , lowerCAmelCase=0 , lowerCAmelCase=[2, 2, 2] , lowerCAmelCase=[2, 2, 2] , lowerCAmelCase=0.02 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=2 , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Union[str, Any]= parent
SCREAMING_SNAKE_CASE__: str= batch_size
SCREAMING_SNAKE_CASE__: int= image_size
SCREAMING_SNAKE_CASE__: Any= num_channels
SCREAMING_SNAKE_CASE__: Tuple= kernel_size
SCREAMING_SNAKE_CASE__: List[str]= stride
SCREAMING_SNAKE_CASE__: Tuple= padding
SCREAMING_SNAKE_CASE__: str= hidden_sizes
SCREAMING_SNAKE_CASE__: List[Any]= num_attention_heads
SCREAMING_SNAKE_CASE__: Any= depths
SCREAMING_SNAKE_CASE__: Tuple= key_dim
SCREAMING_SNAKE_CASE__: Union[str, Any]= drop_path_rate
SCREAMING_SNAKE_CASE__: Optional[Any]= patch_size
SCREAMING_SNAKE_CASE__: List[str]= attention_ratio
SCREAMING_SNAKE_CASE__: List[str]= mlp_ratio
SCREAMING_SNAKE_CASE__: Dict= initializer_range
SCREAMING_SNAKE_CASE__: str= [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
SCREAMING_SNAKE_CASE__: str= is_training
SCREAMING_SNAKE_CASE__: Optional[Any]= use_labels
SCREAMING_SNAKE_CASE__: str= num_labels
SCREAMING_SNAKE_CASE__: Optional[int]= initializer_range
def UpperCamelCase_ ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__: int= floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__: Optional[int]= None
if self.use_labels:
SCREAMING_SNAKE_CASE__: int= ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE__: List[Any]= self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self ) -> Optional[int]:
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__: List[str]= LevitModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__: int= model(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= (self.image_size, self.image_size)
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Dict= image_size[0], image_size[1]
for _ in range(4 ):
SCREAMING_SNAKE_CASE__: int= floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
SCREAMING_SNAKE_CASE__: Optional[int]= floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> List[str]:
SCREAMING_SNAKE_CASE__: str= self.num_labels
SCREAMING_SNAKE_CASE__: int= LevitForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__: List[str]= model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: List[str]= self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: str= config_and_inputs
SCREAMING_SNAKE_CASE__: List[Any]= {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
__a = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
__a = (
{
"feature-extraction": LevitModel,
"image-classification": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__a = False
__a = False
__a = False
__a = False
__a = False
def UpperCamelCase_ ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: int= LevitModelTester(self )
SCREAMING_SNAKE_CASE__: Any= ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37 )
def UpperCamelCase_ ( self ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self ) -> Optional[Any]:
return
@unittest.skip(reason='''Levit does not use inputs_embeds''' )
def UpperCamelCase_ ( self ) -> Tuple:
pass
@unittest.skip(reason='''Levit does not support input and output embeddings''' )
def UpperCamelCase_ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='''Levit does not output attentions''' )
def UpperCamelCase_ ( self ) -> int:
pass
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Dict= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__: Any= model_class(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__: Union[str, Any]= [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__: Union[str, Any]= ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
def check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE__: Optional[int]= model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__: Tuple= model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
SCREAMING_SNAKE_CASE__: Optional[int]= outputs.hidden_states
SCREAMING_SNAKE_CASE__: List[str]= len(self.model_tester.depths ) + 1
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= (self.model_tester.image_size, self.model_tester.image_size)
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: int= image_size[0], image_size[1]
for _ in range(4 ):
SCREAMING_SNAKE_CASE__: str= floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
SCREAMING_SNAKE_CASE__: Dict= floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Any= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__: str= True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__: List[str]= True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCamelCase_ ( self ) -> Any:
pass
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Tuple= super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCamelCase_ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: Optional[Any]= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Optional[int]:
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: List[str]= self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__: int= True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowerCAmelCase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
SCREAMING_SNAKE_CASE__: Optional[int]= model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.train()
SCREAMING_SNAKE_CASE__: Optional[int]= self._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Union[str, Any]= model(**lowerCAmelCase ).loss
loss.backward()
def UpperCamelCase_ ( self ) -> str:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Tuple= self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE__: int= False
SCREAMING_SNAKE_CASE__: Dict= True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
SCREAMING_SNAKE_CASE__: List[str]= model_class(lowerCAmelCase )
model.gradient_checkpointing_enable()
model.to(lowerCAmelCase )
model.train()
SCREAMING_SNAKE_CASE__: Optional[int]= self._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= model(**lowerCAmelCase ).loss
loss.backward()
def UpperCamelCase_ ( self ) -> int:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Tuple= self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__: Optional[Any]= [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowerCAmelCase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f'Testing {model_class} with {problem_type["title"]}' ):
SCREAMING_SNAKE_CASE__: str= problem_type['''title''']
SCREAMING_SNAKE_CASE__: Optional[int]= problem_type['''num_labels''']
SCREAMING_SNAKE_CASE__: Optional[int]= model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.train()
SCREAMING_SNAKE_CASE__: Union[str, Any]= self._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
if problem_type["num_labels"] > 1:
SCREAMING_SNAKE_CASE__: str= inputs['''labels'''].unsqueeze(1 ).repeat(1 , problem_type['''num_labels'''] )
SCREAMING_SNAKE_CASE__: Optional[Any]= inputs['''labels'''].to(problem_type['''dtype'''] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowerCAmelCase ) as warning_list:
SCREAMING_SNAKE_CASE__: Any= model(**lowerCAmelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f'Something is going wrong in the regression problem: intercepted {w.message}' )
loss.backward()
@slow
def UpperCamelCase_ ( self ) -> List[str]:
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__: str= LevitModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def A__ ( ):
SCREAMING_SNAKE_CASE__: Tuple= Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _lowerCamelCase ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self ) -> Dict:
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def UpperCamelCase_ ( self ) -> Any:
SCREAMING_SNAKE_CASE__: str= LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= self.default_image_processor
SCREAMING_SNAKE_CASE__: Any= prepare_img()
SCREAMING_SNAKE_CASE__: Optional[Any]= image_processor(images=lowerCAmelCase , return_tensors='''pt''' ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__: List[str]= model(**lowerCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE__: List[str]= torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= torch.tensor([1.0448, -0.3745, -1.8317] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1e-4 ) )
| 64 |
import os
_lowercase = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1000}
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : List[str] = 0
lowerCAmelCase_ : Any = 0
while index < len(snake_case__) - 1:
lowerCAmelCase_ : Optional[Any] = SYMBOLS[numerals[index]]
lowerCAmelCase_ : int = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Optional[int] = ""
lowerCAmelCase_ : Tuple = num // 10_00
numerals += m_count * "M"
num %= 10_00
lowerCAmelCase_ : int = num // 1_00
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_00
lowerCAmelCase_ : int = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def UpperCamelCase ( snake_case__ = "/p089_roman.txt"):
lowerCAmelCase_ : int = 0
with open(os.path.dirname(snake_case__) + roman_numerals_filename) as filea:
lowerCAmelCase_ : List[Any] = filea.readlines()
for line in lines:
lowerCAmelCase_ : Any = line.strip()
lowerCAmelCase_ : Tuple = parse_roman_numerals(snake_case__)
lowerCAmelCase_ : List[Any] = generate_roman_numerals(snake_case__)
savings += len(snake_case__) - len(snake_case__)
return savings
if __name__ == "__main__":
print(f"{solution() = }")
| 659 | 0 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__UpperCAmelCase = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class __lowercase ( unittest.TestCase ):
snake_case_ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
snake_case_ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
snake_case_ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
snake_case_ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def __lowercase ( self : Tuple ,A : List[Any] ,A : Dict ,A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = ZeroShotClassificationPipeline(
model=A ,tokenizer=A ,candidate_labels=["""polics""", """health"""] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def __lowercase ( self : Optional[Any] ,A : List[str] ,A : Any ):
'''simple docstring'''
UpperCAmelCase__ : int = classifier("""Who are you voting for in 2020?""" ,candidate_labels="""politics""" )
self.assertEqual(A ,{"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} )
# No kwarg
UpperCAmelCase__ : str = classifier("""Who are you voting for in 2020?""" ,["""politics"""] )
self.assertEqual(A ,{"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} )
UpperCAmelCase__ : str = classifier("""Who are you voting for in 2020?""" ,candidate_labels=["""politics"""] )
self.assertEqual(A ,{"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} )
UpperCAmelCase__ : Optional[int] = classifier("""Who are you voting for in 2020?""" ,candidate_labels="""politics, public health""" )
self.assertEqual(
A ,{"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) ,1.0 )
UpperCAmelCase__ : Optional[Any] = classifier("""Who are you voting for in 2020?""" ,candidate_labels=["""politics""", """public health"""] )
self.assertEqual(
A ,{"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) ,1.0 )
UpperCAmelCase__ : Optional[int] = classifier(
"""Who are you voting for in 2020?""" ,candidate_labels="""politics""" ,hypothesis_template="""This text is about {}""" )
self.assertEqual(A ,{"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} )
# https://github.com/huggingface/transformers/issues/13846
UpperCAmelCase__ : Optional[int] = classifier(["""I am happy"""] ,["""positive""", """negative"""] )
self.assertEqual(
A ,[
{"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]}
for i in range(1 )
] ,)
UpperCAmelCase__ : Optional[int] = classifier(["""I am happy""", """I am sad"""] ,["""positive""", """negative"""] )
self.assertEqual(
A ,[
{"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]}
for i in range(2 )
] ,)
with self.assertRaises(A ):
classifier("""""" ,candidate_labels="""politics""" )
with self.assertRaises(A ):
classifier(A ,candidate_labels="""politics""" )
with self.assertRaises(A ):
classifier("""Who are you voting for in 2020?""" ,candidate_labels="""""" )
with self.assertRaises(A ):
classifier("""Who are you voting for in 2020?""" ,candidate_labels=A )
with self.assertRaises(A ):
classifier(
"""Who are you voting for in 2020?""" ,candidate_labels="""politics""" ,hypothesis_template="""Not formatting template""" ,)
with self.assertRaises(A ):
classifier(
"""Who are you voting for in 2020?""" ,candidate_labels="""politics""" ,hypothesis_template=A ,)
self.run_entailment_id(A )
def __lowercase ( self : List[Any] ,A : Pipeline ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = zero_shot_classifier.model.config
UpperCAmelCase__ : Any = config.labelaid
UpperCAmelCase__ : Optional[int] = zero_shot_classifier.entailment_id
UpperCAmelCase__ : List[Any] = {"""LABEL_0""": 0, """LABEL_1""": 1, """LABEL_2""": 2}
self.assertEqual(zero_shot_classifier.entailment_id ,-1 )
UpperCAmelCase__ : int = {"""entailment""": 0, """neutral""": 1, """contradiction""": 2}
self.assertEqual(zero_shot_classifier.entailment_id ,0 )
UpperCAmelCase__ : int = {"""ENTAIL""": 0, """NON-ENTAIL""": 1}
self.assertEqual(zero_shot_classifier.entailment_id ,0 )
UpperCAmelCase__ : int = {"""ENTAIL""": 2, """NEUTRAL""": 1, """CONTR""": 0}
self.assertEqual(zero_shot_classifier.entailment_id ,2 )
UpperCAmelCase__ : Tuple = original_labelaid
self.assertEqual(A ,zero_shot_classifier.entailment_id )
@require_torch
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Any = pipeline(
"""zero-shot-classification""" ,model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" ,framework="""pt""" ,)
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"""Who are you voting for in 2020?""" * 100 ,candidate_labels=["""politics""", """public health""", """science"""] )
@require_torch
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = pipeline(
"""zero-shot-classification""" ,model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" ,framework="""pt""" ,)
UpperCAmelCase__ : List[Any] = zero_shot_classifier(
"""Who are you voting for in 2020?""" ,candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(A ) ,{
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} ,)
@require_tf
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = pipeline(
"""zero-shot-classification""" ,model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" ,framework="""tf""" ,)
UpperCAmelCase__ : Tuple = zero_shot_classifier(
"""Who are you voting for in 2020?""" ,candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(A ) ,{
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} ,)
@slow
@require_torch
def __lowercase ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Any = pipeline("""zero-shot-classification""" ,model="""roberta-large-mnli""" ,framework="""pt""" )
UpperCAmelCase__ : Tuple = zero_shot_classifier(
"""Who are you voting for in 2020?""" ,candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(A ) ,{
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} ,)
UpperCAmelCase__ : str = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""" ,candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] ,multi_label=A ,)
self.assertEqual(
nested_simplify(A ) ,{
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} ,)
@slow
@require_tf
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : str = pipeline("""zero-shot-classification""" ,model="""roberta-large-mnli""" ,framework="""tf""" )
UpperCAmelCase__ : Dict = zero_shot_classifier(
"""Who are you voting for in 2020?""" ,candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(A ) ,{
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} ,)
UpperCAmelCase__ : Union[str, Any] = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""" ,candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] ,multi_label=A ,)
self.assertEqual(
nested_simplify(A ) ,{
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} ,)
| 65 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def UpperCamelCase ( ):
lowerCAmelCase_ : Dict = HfArgumentParser(snake_case__)
lowerCAmelCase_ : Dict = parser.parse_args_into_dataclasses()[0]
lowerCAmelCase_ : List[Any] = TensorFlowBenchmark(args=snake_case__)
try:
lowerCAmelCase_ : str = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowerCAmelCase_ : Optional[Any] = "Arg --no_{0} is no longer used, please use --no-{0} instead."
lowerCAmelCase_ : Tuple = " ".join(str(snake_case__).split(" ")[:-1])
lowerCAmelCase_ : List[Any] = ""
lowerCAmelCase_ : Optional[Any] = eval(str(snake_case__).split(" ")[-1])
lowerCAmelCase_ : List[Any] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:])
else:
wrong_args.append(snake_case__)
if len(snake_case__) > 0:
lowerCAmelCase_ : int = full_error_msg + begin_error_msg + str(snake_case__)
raise ValueError(snake_case__)
benchmark.run()
if __name__ == "__main__":
main()
| 659 | 0 |
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=1_3 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=9_9 , _lowerCAmelCase=3_2 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=3_7 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=1_6 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=4 , ):
_lowercase : Optional[Any] = parent
_lowercase : Any = batch_size
_lowercase : str = seq_length
_lowercase : Union[str, Any] = is_training
_lowercase : Tuple = use_attention_mask
_lowercase : List[str] = use_token_type_ids
_lowercase : Tuple = use_labels
_lowercase : Tuple = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : List[Any] = num_hidden_layers
_lowercase : List[Any] = num_attention_heads
_lowercase : Any = intermediate_size
_lowercase : List[str] = hidden_act
_lowercase : Dict = hidden_dropout_prob
_lowercase : int = attention_probs_dropout_prob
_lowercase : str = max_position_embeddings
_lowercase : Tuple = type_vocab_size
_lowercase : Optional[int] = type_sequence_label_size
_lowercase : Optional[int] = initializer_range
_lowercase : List[Any] = num_choices
def __a ( self ):
_lowercase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : str = None
if self.use_attention_mask:
_lowercase : int = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : Optional[Any] = None
if self.use_token_type_ids:
_lowercase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : Any = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __a ( self ):
_lowercase : List[Any] = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase : Dict = config_and_inputs
_lowercase : Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def __a ( self ):
_lowercase : Tuple = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase : List[Any] = config_and_inputs
_lowercase : Any = True
_lowercase : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class lowerCAmelCase_ ( __snake_case , unittest.TestCase ):
_UpperCamelCase : Optional[Any] = True
_UpperCamelCase : int = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __a ( self ):
_lowercase : Optional[Any] = FlaxBertModelTester(self )
@slow
def __a ( self ):
# Only check this for base model, not necessary for all model classes.
# This will also help speed-up tests.
_lowercase : Union[str, Any] = FlaxBertModel.from_pretrained('bert-base-cased' )
_lowercase : int = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCAmelCase )
| 66 |
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
_lowercase = [
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'''
''' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'''
''' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.''',
'''The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'''
''' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'''
''' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'''
''' body.''',
'''Amnesty International releases its annual report on the death penalty. The report catalogs the use of'''
''' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'''
''' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'''
''' punishment.''',
]
_lowercase = [
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'''
''' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'''
''' had informed his Lufthansa training school of an episode of severe depression, airline says .''',
'''Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'''
''' Israel and the United States opposed the move, which could open the door to war crimes investigations against'''
''' Israelis .''',
'''Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'''
''' death . Organization claims that governments around the world are using the threat of terrorism to advance'''
''' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'''
''' sentences up by 28% .''',
]
def UpperCamelCase ( ):
lowerCAmelCase_ : Any = calculate_rouge(snake_case__ , snake_case__ , bootstrap_aggregation=snake_case__ , rouge_keys=["rouge2", "rougeL"])
assert isinstance(snake_case__ , snake_case__)
lowerCAmelCase_ : str = calculate_rouge(snake_case__ , snake_case__ , bootstrap_aggregation=snake_case__ , rouge_keys=["rouge2"])
assert (
pd.DataFrame(no_aggregation["rouge2"]).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["rouge2"]).fmeasure.mean()
)
def UpperCamelCase ( ):
lowerCAmelCase_ : str = "rougeLsum"
lowerCAmelCase_ : Any = calculate_rouge(snake_case__ , snake_case__ , newline_sep=snake_case__ , rouge_keys=[k])[k]
lowerCAmelCase_ : List[Any] = calculate_rouge(snake_case__ , snake_case__ , newline_sep=snake_case__ , rouge_keys=[k])[k]
assert score > score_no_sep
def UpperCamelCase ( ):
lowerCAmelCase_ : int = ["rouge1", "rouge2", "rougeL"]
lowerCAmelCase_ : List[Any] = calculate_rouge(snake_case__ , snake_case__ , newline_sep=snake_case__ , rouge_keys=snake_case__)
lowerCAmelCase_ : List[Any] = calculate_rouge(snake_case__ , snake_case__ , newline_sep=snake_case__ , rouge_keys=snake_case__)
assert score_sep == score_no_sep
def UpperCamelCase ( ):
lowerCAmelCase_ : List[str] = [
"Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.",
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .",
]
lowerCAmelCase_ : Dict = [
"Margot Frank, died in 1945, a month earlier than previously thought.",
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"
" the final seconds on board Flight 9525.",
]
assert calculate_rouge(snake_case__ , snake_case__ , newline_sep=snake_case__) == calculate_rouge(snake_case__ , snake_case__ , newline_sep=snake_case__)
def UpperCamelCase ( ):
lowerCAmelCase_ : Optional[int] = [
"\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" "
]
lowerCAmelCase_ : Any = [
" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."
]
lowerCAmelCase_ : Any = calculate_rouge(snake_case__ , snake_case__ , rouge_keys=["rougeLsum"] , newline_sep=snake_case__)["rougeLsum"]
lowerCAmelCase_ : Any = calculate_rouge(snake_case__ , snake_case__ , rouge_keys=["rougeLsum"])["rougeLsum"]
assert new_score > prev_score
def UpperCamelCase ( ):
lowerCAmelCase_ : int = Path("examples/seq2seq/test_data/wmt_en_ro")
lowerCAmelCase_ : Dict = calculate_rouge_path(data_dir.joinpath("test.source") , data_dir.joinpath("test.target"))
assert isinstance(snake_case__ , snake_case__)
lowerCAmelCase_ : Any = calculate_rouge_path(
data_dir.joinpath("test.source") , data_dir.joinpath("test.target") , bootstrap_aggregation=snake_case__)
assert isinstance(snake_case__ , snake_case__)
| 659 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
_lowercase = tempfile.mkdtemp()
# fmt: off
_lowercase = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
_lowercase = dict(zip(__A ,range(len(__A ) ) ) )
_lowercase = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
_lowercase = {'unk_token': '<unk>'}
_lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
_lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(__A ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(__A ) )
_lowercase = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48145466, 0.4578275, 0.40821073],
'image_std': [0.26862954, 0.26130258, 0.27577711],
}
_lowercase = os.path.join(self.tmpdirname ,__A )
with open(self.image_processor_file ,'w' ,encoding='utf-8' ) as fp:
json.dump(__A ,__A )
def __UpperCAmelCase ( self : int ,**__A : Optional[Any] ) -> Dict:
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**__A )
def __UpperCAmelCase ( self : Optional[int] ,**__A : List[str] ) -> Dict:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**__A )
def __UpperCAmelCase ( self : Dict ,**__A : int ) -> List[Any]:
return ViTImageProcessor.from_pretrained(self.tmpdirname ,**__A )
def __UpperCAmelCase ( self : int ) -> str:
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
_lowercase = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
_lowercase = [Image.fromarray(np.moveaxis(__A ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def __UpperCAmelCase ( self : Tuple ) -> str:
_lowercase = self.get_tokenizer()
_lowercase = self.get_rust_tokenizer()
_lowercase = self.get_image_processor()
_lowercase = CLIPSegProcessor(tokenizer=__A ,image_processor=__A )
processor_slow.save_pretrained(self.tmpdirname )
_lowercase = CLIPSegProcessor.from_pretrained(self.tmpdirname ,use_fast=__A )
_lowercase = CLIPSegProcessor(tokenizer=__A ,image_processor=__A )
processor_fast.save_pretrained(self.tmpdirname )
_lowercase = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer ,__A )
self.assertIsInstance(processor_fast.tokenizer ,__A )
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor ,__A )
self.assertIsInstance(processor_fast.image_processor ,__A )
def __UpperCAmelCase ( self : Any ) -> List[Any]:
_lowercase = CLIPSegProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowercase = self.get_tokenizer(bos_token='(BOS)' ,eos_token='(EOS)' )
_lowercase = self.get_image_processor(do_normalize=__A ,padding_value=1.0 )
_lowercase = CLIPSegProcessor.from_pretrained(
self.tmpdirname ,bos_token='(BOS)' ,eos_token='(EOS)' ,do_normalize=__A ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,__A )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,__A )
def __UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
_lowercase = self.get_image_processor()
_lowercase = self.get_tokenizer()
_lowercase = CLIPSegProcessor(tokenizer=__A ,image_processor=__A )
_lowercase = self.prepare_image_inputs()
_lowercase = image_processor(__A ,return_tensors='np' )
_lowercase = processor(images=__A ,return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
_lowercase = self.get_image_processor()
_lowercase = self.get_tokenizer()
_lowercase = CLIPSegProcessor(tokenizer=__A ,image_processor=__A )
_lowercase = 'lower newer'
_lowercase = processor(text=__A )
_lowercase = tokenizer(__A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def __UpperCAmelCase ( self : str ) -> Dict:
_lowercase = self.get_image_processor()
_lowercase = self.get_tokenizer()
_lowercase = CLIPSegProcessor(tokenizer=__A ,image_processor=__A )
_lowercase = 'lower newer'
_lowercase = self.prepare_image_inputs()
_lowercase = processor(text=__A ,images=__A )
self.assertListEqual(list(inputs.keys() ) ,['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def __UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
_lowercase = self.get_image_processor()
_lowercase = self.get_tokenizer()
_lowercase = CLIPSegProcessor(tokenizer=__A ,image_processor=__A )
_lowercase = self.prepare_image_inputs()
_lowercase = self.prepare_image_inputs()
_lowercase = processor(images=__A ,visual_prompt=__A )
self.assertListEqual(list(inputs.keys() ) ,['pixel_values', 'conditional_pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
_lowercase = self.get_image_processor()
_lowercase = self.get_tokenizer()
_lowercase = CLIPSegProcessor(tokenizer=__A ,image_processor=__A )
_lowercase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowercase = processor.batch_decode(__A )
_lowercase = tokenizer.batch_decode(__A )
self.assertListEqual(__A ,__A ) | 67 |
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = LEDTokenizer
UpperCamelCase_ = LEDTokenizerFast
UpperCamelCase_ = True
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
super().setUp()
lowerCAmelCase_ : Union[str, Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowerCAmelCase_ : Tuple = dict(zip(lowerCAmelCase__ ,range(len(lowerCAmelCase__ ) ) ) )
lowerCAmelCase_ : int = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowerCAmelCase_ : Union[str, Any] = {"unk_token": "<unk>"}
lowerCAmelCase_ : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase__ ) )
def UpperCAmelCase_ ( self : List[Any] ,**lowerCAmelCase__ : int ) -> Tuple:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ,**lowerCAmelCase__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname ,**lowerCAmelCase__ )
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : int ) -> List[str]:
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
return LEDTokenizer.from_pretrained("allenai/led-base-16384" )
@cached_property
def UpperCAmelCase_ ( self : List[str] ) -> Dict:
'''simple docstring'''
return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" )
@require_torch
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
lowerCAmelCase_ : int = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase_ : Any = tokenizer(lowerCAmelCase__ ,max_length=len(lowerCAmelCase__ ) ,padding=lowerCAmelCase__ ,return_tensors="pt" )
self.assertIsInstance(lowerCAmelCase__ ,lowerCAmelCase__ )
self.assertEqual((2, 9) ,batch.input_ids.shape )
self.assertEqual((2, 9) ,batch.attention_mask.shape )
lowerCAmelCase_ : int = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
@require_torch
def UpperCAmelCase_ ( self : Dict ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : int = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase_ : Optional[Any] = tokenizer(lowerCAmelCase__ ,padding=lowerCAmelCase__ ,return_tensors="pt" )
self.assertIn("input_ids" ,lowerCAmelCase__ )
self.assertIn("attention_mask" ,lowerCAmelCase__ )
self.assertNotIn("labels" ,lowerCAmelCase__ )
self.assertNotIn("decoder_attention_mask" ,lowerCAmelCase__ )
@require_torch
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : int = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase_ : Optional[int] = tokenizer(text_target=lowerCAmelCase__ ,max_length=32 ,padding="max_length" ,return_tensors="pt" )
self.assertEqual(32 ,targets["input_ids"].shape[1] )
@require_torch
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase_ : Tuple = tokenizer(
["I am a small frog" * 10_24, "I am a small frog"] ,padding=lowerCAmelCase__ ,truncation=lowerCAmelCase__ ,return_tensors="pt" )
self.assertIsInstance(lowerCAmelCase__ ,lowerCAmelCase__ )
self.assertEqual(batch.input_ids.shape ,(2, 51_22) )
@require_torch
def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : Tuple = ["A long paragraph for summarization."]
lowerCAmelCase_ : Dict = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase_ : Optional[Any] = tokenizer(lowerCAmelCase__ ,return_tensors="pt" )
lowerCAmelCase_ : Optional[Any] = tokenizer(text_target=lowerCAmelCase__ ,return_tensors="pt" )
lowerCAmelCase_ : List[str] = inputs["input_ids"]
lowerCAmelCase_ : Any = targets["input_ids"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def UpperCAmelCase_ ( self : str ) -> Tuple:
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase_ : str = ["Summary of the text.", "Another summary."]
lowerCAmelCase_ : str = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
lowerCAmelCase_ : List[Any] = tokenizer(lowerCAmelCase__ ,padding=lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = [[0] * len(lowerCAmelCase__ ) for x in encoded_output["input_ids"]]
lowerCAmelCase_ : Optional[int] = tokenizer.pad(lowerCAmelCase__ )
self.assertSequenceEqual(outputs["global_attention_mask"] ,lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase_ : Dict = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ ,**lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = self.tokenizer_class.from_pretrained(lowerCAmelCase__ ,**lowerCAmelCase__ )
lowerCAmelCase_ : Dict = "A, <mask> AllenNLP sentence."
lowerCAmelCase_ : Tuple = tokenizer_r.encode_plus(lowerCAmelCase__ ,add_special_tokens=lowerCAmelCase__ ,return_token_type_ids=lowerCAmelCase__ )
lowerCAmelCase_ : int = tokenizer_p.encode_plus(lowerCAmelCase__ ,add_special_tokens=lowerCAmelCase__ ,return_token_type_ids=lowerCAmelCase__ )
self.assertEqual(sum(tokens_r["token_type_ids"] ) ,sum(tokens_p["token_type_ids"] ) )
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) ,sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) ,)
lowerCAmelCase_ : Any = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
lowerCAmelCase_ : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
self.assertSequenceEqual(tokens_p["input_ids"] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase__ ,["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
lowerCAmelCase__ ,["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 659 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__A = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 68 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''Visual-Attention-Network/van-base''': (
'''https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'''
),
}
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = 'van'
def __init__( self : List[str] ,lowerCAmelCase__ : int=2_24 ,lowerCAmelCase__ : Optional[int]=3 ,lowerCAmelCase__ : Dict=[7, 3, 3, 3] ,lowerCAmelCase__ : List[str]=[4, 2, 2, 2] ,lowerCAmelCase__ : Union[str, Any]=[64, 1_28, 3_20, 5_12] ,lowerCAmelCase__ : Union[str, Any]=[3, 3, 12, 3] ,lowerCAmelCase__ : Any=[8, 8, 4, 4] ,lowerCAmelCase__ : Optional[int]="gelu" ,lowerCAmelCase__ : List[str]=0.02 ,lowerCAmelCase__ : Optional[Any]=1e-6 ,lowerCAmelCase__ : Dict=1e-2 ,lowerCAmelCase__ : Union[str, Any]=0.0 ,lowerCAmelCase__ : Optional[Any]=0.0 ,**lowerCAmelCase__ : List[str] ,) -> Tuple:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = image_size
lowerCAmelCase_ : List[str] = num_channels
lowerCAmelCase_ : str = patch_sizes
lowerCAmelCase_ : Optional[Any] = strides
lowerCAmelCase_ : List[Any] = hidden_sizes
lowerCAmelCase_ : int = depths
lowerCAmelCase_ : int = mlp_ratios
lowerCAmelCase_ : str = hidden_act
lowerCAmelCase_ : List[str] = initializer_range
lowerCAmelCase_ : Dict = layer_norm_eps
lowerCAmelCase_ : str = layer_scale_init_value
lowerCAmelCase_ : Tuple = drop_path_rate
lowerCAmelCase_ : Dict = dropout_rate
| 659 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
a : Union[str, Any] = {
'''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''],
'''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = [
'''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXJapaneseForCausalLM''',
'''GPTNeoXJapaneseLayer''',
'''GPTNeoXJapaneseModel''',
'''GPTNeoXJapanesePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
a : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 69 |
from math import factorial
def UpperCamelCase ( snake_case__ , snake_case__):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError("Please enter positive integers for n and k where n >= k")
return factorial(snake_case__) // (factorial(snake_case__) * factorial(n - k))
if __name__ == "__main__":
print(
'''The number of five-card hands possible from a standard''',
f"fifty-two card deck is: {combinations(52, 5)}\n",
)
print(
'''If a class of 40 students must be arranged into groups of''',
f"4 for group projects, there are {combinations(40, 4)} ways",
'''to arrange them.\n''',
)
print(
'''If 10 teams are competing in a Formula One race, there''',
f"are {combinations(10, 3)} ways that first, second and",
'''third place can be awarded.''',
)
| 659 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class A( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = StableUnCLIPImgaImgPipeline
UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCamelCase = frozenset([] )
def a__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = 32
lowerCamelCase_ = embedder_hidden_size
# image encoding components
lowerCamelCase_ = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
lowerCamelCase_ = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=A_ , projection_dim=A_ , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
lowerCamelCase_ = StableUnCLIPImageNormalizer(embedding_dim=A_ )
lowerCamelCase_ = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=A_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=A_ , layers_per_block=1 , upcast_attention=A_ , use_linear_projection=A_ , )
torch.manual_seed(0 )
lowerCamelCase_ = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.00085 , beta_end=0.012 , prediction_type='v_prediction' , set_alpha_to_one=A_ , steps_offset=1 , )
torch.manual_seed(0 )
lowerCamelCase_ = AutoencoderKL()
lowerCamelCase_ = {
# image encoding components
'feature_extractor': feature_extractor,
'image_encoder': image_encoder.eval(),
# image noising components
'image_normalizer': image_normalizer.eval(),
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder.eval(),
'unet': unet.eval(),
'scheduler': scheduler,
'vae': vae.eval(),
}
return components
def a__ ( self : List[Any] , A_ : Any , A_ : List[str]=0 , A_ : int=True ) -> List[Any]:
"""simple docstring"""
if str(A_ ).startswith('mps' ):
lowerCamelCase_ = torch.manual_seed(A_ )
else:
lowerCamelCase_ = torch.Generator(device=A_ ).manual_seed(A_ )
lowerCamelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(A_ ) ).to(A_ )
if pil_image:
lowerCamelCase_ = input_image * 0.5 + 0.5
lowerCamelCase_ = input_image.clamp(0 , 1 )
lowerCamelCase_ = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowerCamelCase_ = DiffusionPipeline.numpy_to_pil(A_ )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def a__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = StableUnCLIPImgaImgPipeline(**A_ )
lowerCamelCase_ = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
lowerCamelCase_ = self.get_dummy_inputs(A_ )
inputs.update({'image_embeds': None} )
lowerCamelCase_ = sd_pipe(**A_ ).images
lowerCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase_ = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def a__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
lowerCamelCase_ = torch_device in ['cpu', 'mps']
self._test_attention_slicing_forward_pass(test_max_difference=A_ )
def a__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
lowerCamelCase_ = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=A_ )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def a__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=A_ )
@slow
@require_torch_gpu
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : Any ) -> Any:
"""simple docstring"""
lowerCamelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
lowerCamelCase_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy' )
lowerCamelCase_ = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-l-img2img' , torch_dtype=torch.floataa )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase_ = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCamelCase_ = pipe(A_ , 'anime turle' , generator=A_ , output_type='np' )
lowerCamelCase_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(A_ , A_ )
def a__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
lowerCamelCase_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy' )
lowerCamelCase_ = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase_ = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCamelCase_ = pipe(A_ , 'anime turle' , generator=A_ , output_type='np' )
lowerCamelCase_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(A_ , A_ )
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase_ = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa )
lowerCamelCase_ = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase_ = pipe(
A_ , 'anime turtle' , num_inference_steps=2 , output_type='np' , )
lowerCamelCase_ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 70 |
import argparse
import json
from tqdm import tqdm
def UpperCamelCase ( ):
lowerCAmelCase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src_path" , type=snake_case__ , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , )
parser.add_argument(
"--evaluation_set" , type=snake_case__ , help="where to store parsed evaluation_set file" , )
parser.add_argument(
"--gold_data_path" , type=snake_case__ , help="where to store parsed gold_data_path file" , )
lowerCAmelCase_ : Dict = parser.parse_args()
with open(args.src_path , "r") as src_file, open(args.evaluation_set , "w") as eval_file, open(
args.gold_data_path , "w") as gold_file:
lowerCAmelCase_ : Optional[int] = json.load(snake_case__)
for dpr_record in tqdm(snake_case__):
lowerCAmelCase_ : str = dpr_record["question"]
lowerCAmelCase_ : Dict = [context["title"] for context in dpr_record["positive_ctxs"]]
eval_file.write(question + "\n")
gold_file.write("\t".join(snake_case__) + "\n")
if __name__ == "__main__":
main()
| 659 | 0 |
'''simple docstring'''
from maths.prime_check import is_prime
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Any = F'''Input value of [number={number}] must be an integer'''
raise TypeError(_SCREAMING_SNAKE_CASE )
if is_prime(_SCREAMING_SNAKE_CASE ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 |
from collections.abc import Sequence
def UpperCamelCase ( snake_case__ = None):
if nums is None or not nums:
raise ValueError("Input sequence should not be empty")
lowerCAmelCase_ : Dict = nums[0]
for i in range(1 , len(snake_case__)):
lowerCAmelCase_ : Optional[int] = nums[i]
lowerCAmelCase_ : Optional[int] = max(snake_case__ , ans + num , snake_case__)
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
_lowercase = int(input('''Enter number of elements : ''').strip())
_lowercase = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array))
| 659 | 0 |
'''simple docstring'''
from __future__ import annotations
import bisect
def UpperCamelCase ( lowercase_ : list[int] , lowercase_ : int , lowercase_ : int = 0 , lowercase_ : int = -1 ) -> int:
'''simple docstring'''
if hi < 0:
lowercase =len(lowercase_ )
while lo < hi:
lowercase =lo + (hi - lo) // 2
if sorted_collection[mid] < item:
lowercase =mid + 1
else:
lowercase =mid
return lo
def UpperCamelCase ( lowercase_ : list[int] , lowercase_ : int , lowercase_ : int = 0 , lowercase_ : int = -1 ) -> int:
'''simple docstring'''
if hi < 0:
lowercase =len(lowercase_ )
while lo < hi:
lowercase =lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
lowercase =mid + 1
else:
lowercase =mid
return lo
def UpperCamelCase ( lowercase_ : list[int] , lowercase_ : int , lowercase_ : int = 0 , lowercase_ : int = -1 ) -> None:
'''simple docstring'''
sorted_collection.insert(bisect_left(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) , lowercase_ )
def UpperCamelCase ( lowercase_ : list[int] , lowercase_ : int , lowercase_ : int = 0 , lowercase_ : int = -1 ) -> None:
'''simple docstring'''
sorted_collection.insert(bisect_right(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) , lowercase_ )
def UpperCamelCase ( lowercase_ : list[int] , lowercase_ : int ) -> int | None:
'''simple docstring'''
lowercase =0
lowercase =len(lowercase_ ) - 1
while left <= right:
lowercase =left + (right - left) // 2
lowercase =sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
lowercase =midpoint - 1
else:
lowercase =midpoint + 1
return None
def UpperCamelCase ( lowercase_ : list[int] , lowercase_ : int ) -> int | None:
'''simple docstring'''
lowercase =bisect.bisect_left(lowercase_ , lowercase_ )
if index != len(lowercase_ ) and sorted_collection[index] == item:
return index
return None
def UpperCamelCase ( lowercase_ : list[int] , lowercase_ : int , lowercase_ : int , lowercase_ : int ) -> int | None:
'''simple docstring'''
if right < left:
return None
lowercase =left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(lowercase_ , lowercase_ , lowercase_ , midpoint - 1 )
else:
return binary_search_by_recursion(lowercase_ , lowercase_ , midpoint + 1 , lowercase_ )
if __name__ == "__main__":
_UpperCAmelCase : List[str] = input('''Enter numbers separated by comma:\n''').strip()
_UpperCAmelCase : Any = sorted(int(item) for item in user_input.split(''','''))
_UpperCAmelCase : Tuple = int(input('''Enter a single number to be found in the list:\n'''))
_UpperCAmelCase : int = binary_search(collection, target)
if result is None:
print(F"""{target} was not found in {collection}.""")
else:
print(F"""{target} was found at position {result} in {collection}.""")
| 72 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
_lowercase = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 659 | 0 |
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = int(number**0.5)
return number == sq * sq
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
SCREAMING_SNAKE_CASE = x_den * y_den * z_den
SCREAMING_SNAKE_CASE = gcd(_UpperCAmelCase , _UpperCAmelCase)
top //= hcf
bottom //= hcf
return top, bottom
def lowerCamelCase__ (_UpperCAmelCase = 35):
SCREAMING_SNAKE_CASE = set()
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = Fraction(0)
SCREAMING_SNAKE_CASE = 42
for x_num in range(1 , order + 1):
for x_den in range(x_num + 1 , order + 1):
for y_num in range(1 , order + 1):
for y_den in range(y_num + 1 , order + 1):
# n=1
SCREAMING_SNAKE_CASE = x_num * y_den + x_den * y_num
SCREAMING_SNAKE_CASE = x_den * y_den
SCREAMING_SNAKE_CASE = gcd(_UpperCAmelCase , _UpperCAmelCase)
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE = add_three(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
unique_s.add(_UpperCAmelCase)
# n=2
SCREAMING_SNAKE_CASE = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
SCREAMING_SNAKE_CASE = x_den * x_den * y_den * y_den
if is_sq(_UpperCAmelCase) and is_sq(_UpperCAmelCase):
SCREAMING_SNAKE_CASE = int(sqrt(_UpperCAmelCase))
SCREAMING_SNAKE_CASE = int(sqrt(_UpperCAmelCase))
SCREAMING_SNAKE_CASE = gcd(_UpperCAmelCase , _UpperCAmelCase)
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE = add_three(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
unique_s.add(_UpperCAmelCase)
# n=-1
SCREAMING_SNAKE_CASE = x_num * y_num
SCREAMING_SNAKE_CASE = x_den * y_num + x_num * y_den
SCREAMING_SNAKE_CASE = gcd(_UpperCAmelCase , _UpperCAmelCase)
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE = add_three(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
unique_s.add(_UpperCAmelCase)
# n=2
SCREAMING_SNAKE_CASE = x_num * x_num * y_num * y_num
SCREAMING_SNAKE_CASE = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_UpperCAmelCase) and is_sq(_UpperCAmelCase):
SCREAMING_SNAKE_CASE = int(sqrt(_UpperCAmelCase))
SCREAMING_SNAKE_CASE = int(sqrt(_UpperCAmelCase))
SCREAMING_SNAKE_CASE = gcd(_UpperCAmelCase , _UpperCAmelCase)
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
SCREAMING_SNAKE_CASE = add_three(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
unique_s.add(_UpperCAmelCase)
for num, den in unique_s:
total += Fraction(_UpperCAmelCase , _UpperCAmelCase)
return total.denominator + total.numerator
if __name__ == "__main__":
print(f"""{solution() = }""")
| 73 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_lowercase = '''src/diffusers'''
_lowercase = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
_lowercase = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
_lowercase = spec.loader.load_module()
def UpperCamelCase ( snake_case__ , snake_case__):
return line.startswith(snake_case__) or len(snake_case__) <= 1 or re.search(R"^\s*\)(\s*->.*:|:)\s*$" , snake_case__) is not None
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Tuple = object_name.split(".")
lowerCAmelCase_ : Union[str, Any] = 0
# First let's find the module where our object lives.
lowerCAmelCase_ : Union[str, Any] = parts[i]
while i < len(snake_case__) and not os.path.isfile(os.path.join(snake_case__ , F'''{module}.py''')):
i += 1
if i < len(snake_case__):
lowerCAmelCase_ : Dict = os.path.join(snake_case__ , parts[i])
if i >= len(snake_case__):
raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''')
with open(os.path.join(snake_case__ , F'''{module}.py''') , "r" , encoding="utf-8" , newline="\n") as f:
lowerCAmelCase_ : Optional[Any] = f.readlines()
# Now let's find the class / func in the code!
lowerCAmelCase_ : Union[str, Any] = ""
lowerCAmelCase_ : int = 0
for name in parts[i + 1 :]:
while (
line_index < len(snake_case__) and re.search(RF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index]) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(snake_case__):
raise ValueError(F''' {object_name} does not match any function or class in {module}.''')
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
lowerCAmelCase_ : Union[str, Any] = line_index
while line_index < len(snake_case__) and _should_continue(lines[line_index] , snake_case__):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1]) <= 1:
line_index -= 1
lowerCAmelCase_ : List[str] = lines[start_index:line_index]
return "".join(snake_case__)
_lowercase = re.compile(r'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
_lowercase = re.compile(r'''^\s*(\S+)->(\S+)(\s+.*|$)''')
_lowercase = re.compile(r'''<FILL\s+[^>]*>''')
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Any = code.split("\n")
lowerCAmelCase_ : Any = 0
while idx < len(snake_case__) and len(lines[idx]) == 0:
idx += 1
if idx < len(snake_case__):
return re.search(R"^(\s*)\S" , lines[idx]).groups()[0]
return ""
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Dict = len(get_indent(snake_case__)) > 0
if has_indent:
lowerCAmelCase_ : Dict = F'''class Bla:\n{code}'''
lowerCAmelCase_ : Optional[int] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 , preview=snake_case__)
lowerCAmelCase_ : Optional[Any] = black.format_str(snake_case__ , mode=snake_case__)
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = style_docstrings_in_code(snake_case__)
return result[len("class Bla:\n") :] if has_indent else result
def UpperCamelCase ( snake_case__ , snake_case__=False):
with open(snake_case__ , "r" , encoding="utf-8" , newline="\n") as f:
lowerCAmelCase_ : Tuple = f.readlines()
lowerCAmelCase_ : Tuple = []
lowerCAmelCase_ : Union[str, Any] = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(snake_case__):
lowerCAmelCase_ : Optional[int] = _re_copy_warning.search(lines[line_index])
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = search.groups()
lowerCAmelCase_ : int = find_code_in_diffusers(snake_case__)
lowerCAmelCase_ : Dict = get_indent(snake_case__)
lowerCAmelCase_ : Union[str, Any] = line_index + 1 if indent == theoretical_indent else line_index + 2
lowerCAmelCase_ : str = theoretical_indent
lowerCAmelCase_ : Union[str, Any] = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
lowerCAmelCase_ : Optional[int] = True
while line_index < len(snake_case__) and should_continue:
line_index += 1
if line_index >= len(snake_case__):
break
lowerCAmelCase_ : Dict = lines[line_index]
lowerCAmelCase_ : List[str] = _should_continue(snake_case__ , snake_case__) and re.search(F'''^{indent}# End copy''' , snake_case__) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1]) <= 1:
line_index -= 1
lowerCAmelCase_ : Dict = lines[start_index:line_index]
lowerCAmelCase_ : Optional[int] = "".join(snake_case__)
# Remove any nested `Copied from` comments to avoid circular copies
lowerCAmelCase_ : List[Any] = [line for line in theoretical_code.split("\n") if _re_copy_warning.search(snake_case__) is None]
lowerCAmelCase_ : Optional[Any] = "\n".join(snake_case__)
# Before comparing, use the `replace_pattern` on the original code.
if len(snake_case__) > 0:
lowerCAmelCase_ : List[str] = replace_pattern.replace("with" , "").split(",")
lowerCAmelCase_ : Tuple = [_re_replace_pattern.search(snake_case__) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = pattern.groups()
lowerCAmelCase_ : int = re.sub(snake_case__ , snake_case__ , snake_case__)
if option.strip() == "all-casing":
lowerCAmelCase_ : List[str] = re.sub(obja.lower() , obja.lower() , snake_case__)
lowerCAmelCase_ : int = re.sub(obja.upper() , obja.upper() , snake_case__)
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
lowerCAmelCase_ : List[Any] = blackify(lines[start_index - 1] + theoretical_code)
lowerCAmelCase_ : Union[str, Any] = theoretical_code[len(lines[start_index - 1]) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index])
if overwrite:
lowerCAmelCase_ : List[Any] = lines[:start_index] + [theoretical_code] + lines[line_index:]
lowerCAmelCase_ : Union[str, Any] = start_index + 1
if overwrite and len(snake_case__) > 0:
# Warn the user a file has been modified.
print(F'''Detected changes, rewriting {filename}.''')
with open(snake_case__ , "w" , encoding="utf-8" , newline="\n") as f:
f.writelines(snake_case__)
return diffs
def UpperCamelCase ( snake_case__ = False):
lowerCAmelCase_ : Tuple = glob.glob(os.path.join(snake_case__ , "**/*.py") , recursive=snake_case__)
lowerCAmelCase_ : int = []
for filename in all_files:
lowerCAmelCase_ : Union[str, Any] = is_copy_consistent(snake_case__ , snake_case__)
diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(snake_case__) > 0:
lowerCAmelCase_ : Optional[Any] = "\n".join(snake_case__)
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.")
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
_lowercase = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 659 | 0 |
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = 42
class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
@register_to_config
def __init__( self : Dict , _A : int = 16 , _A : int = 88 , _A : Optional[int] = None , _A : Optional[int] = None , _A : int = 1 , _A : float = 0.0 , _A : int = 32 , _A : Optional[int] = None , _A : bool = False , _A : Optional[int] = None , _A : str = "geglu" , _A : bool = True , _A : bool = True , ):
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE : Dict = num_attention_heads
__SCREAMING_SNAKE_CASE : Optional[int] = attention_head_dim
__SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads * attention_head_dim
__SCREAMING_SNAKE_CASE : Tuple = in_channels
__SCREAMING_SNAKE_CASE : str = torch.nn.GroupNorm(num_groups=_A , num_channels=_A , eps=1e-6 , affine=_A )
__SCREAMING_SNAKE_CASE : List[Any] = nn.Linear(_A , _A )
# 3. Define transformers blocks
__SCREAMING_SNAKE_CASE : List[Any] = nn.ModuleList(
[
BasicTransformerBlock(
_A , _A , _A , dropout=_A , cross_attention_dim=_A , activation_fn=_A , attention_bias=_A , double_self_attention=_A , norm_elementwise_affine=_A , )
for d in range(_A )
] )
__SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(_A , _A )
def UpperCAmelCase__ ( self : str , _A : Dict , _A : int=None , _A : Tuple=None , _A : Dict=None , _A : List[Any]=1 , _A : Union[str, Any]=None , _A : bool = True , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = hidden_states.shape
__SCREAMING_SNAKE_CASE : Any = batch_frames // num_frames
__SCREAMING_SNAKE_CASE : Dict = hidden_states
__SCREAMING_SNAKE_CASE : str = hidden_states[None, :].reshape(_A , _A , _A , _A , _A )
__SCREAMING_SNAKE_CASE : List[Any] = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.norm(_A )
__SCREAMING_SNAKE_CASE : List[str] = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , _A , _A )
__SCREAMING_SNAKE_CASE : List[Any] = self.proj_in(_A )
# 2. Blocks
for block in self.transformer_blocks:
__SCREAMING_SNAKE_CASE : Optional[Any] = block(
_A , encoder_hidden_states=_A , timestep=_A , cross_attention_kwargs=_A , class_labels=_A , )
# 3. Output
__SCREAMING_SNAKE_CASE : Any = self.proj_out(_A )
__SCREAMING_SNAKE_CASE : List[str] = (
hidden_states[None, None, :]
.reshape(_A , _A , _A , _A , _A )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
__SCREAMING_SNAKE_CASE : Optional[Any] = hidden_states.reshape(_A , _A , _A , _A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=_A )
| 74 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''microsoft/swinv2-tiny-patch4-window8-256''': (
'''https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'''
),
}
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = 'swinv2'
UpperCamelCase_ = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : List[Any] ,lowerCAmelCase__ : Optional[int]=2_24 ,lowerCAmelCase__ : Dict=4 ,lowerCAmelCase__ : Dict=3 ,lowerCAmelCase__ : List[Any]=96 ,lowerCAmelCase__ : Optional[Any]=[2, 2, 6, 2] ,lowerCAmelCase__ : Optional[Any]=[3, 6, 12, 24] ,lowerCAmelCase__ : Optional[int]=7 ,lowerCAmelCase__ : Dict=4.0 ,lowerCAmelCase__ : Dict=True ,lowerCAmelCase__ : str=0.0 ,lowerCAmelCase__ : Tuple=0.0 ,lowerCAmelCase__ : str=0.1 ,lowerCAmelCase__ : List[str]="gelu" ,lowerCAmelCase__ : Union[str, Any]=False ,lowerCAmelCase__ : Dict=0.02 ,lowerCAmelCase__ : int=1e-5 ,lowerCAmelCase__ : List[str]=32 ,**lowerCAmelCase__ : Tuple ,) -> List[str]:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = image_size
lowerCAmelCase_ : List[Any] = patch_size
lowerCAmelCase_ : Dict = num_channels
lowerCAmelCase_ : Optional[int] = embed_dim
lowerCAmelCase_ : Optional[Any] = depths
lowerCAmelCase_ : Any = len(lowerCAmelCase__ )
lowerCAmelCase_ : str = num_heads
lowerCAmelCase_ : List[str] = window_size
lowerCAmelCase_ : List[str] = mlp_ratio
lowerCAmelCase_ : Dict = qkv_bias
lowerCAmelCase_ : str = hidden_dropout_prob
lowerCAmelCase_ : str = attention_probs_dropout_prob
lowerCAmelCase_ : Union[str, Any] = drop_path_rate
lowerCAmelCase_ : List[Any] = hidden_act
lowerCAmelCase_ : Any = use_absolute_embeddings
lowerCAmelCase_ : List[str] = layer_norm_eps
lowerCAmelCase_ : int = initializer_range
lowerCAmelCase_ : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase_ : Tuple = int(embed_dim * 2 ** (len(lowerCAmelCase__ ) - 1) )
lowerCAmelCase_ : str = (0, 0, 0, 0)
| 659 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
UpperCamelCase__ = logging.get_logger(__name__)
class lowerCamelCase_ ( __a ):
def __init__( self : Union[str, Any] , *_A : Any , **_A : int ):
'''simple docstring'''
warnings.warn(
'''The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use FlavaImageProcessor instead.''' , _A , )
super().__init__(*_A , **_A )
| 75 |
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
_lowercase = logging.get_logger(__name__)
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = ['input_features', 'attention_mask']
def __init__( self : Optional[Any] ,lowerCAmelCase__ : Any=80 ,lowerCAmelCase__ : Optional[Any]=1_60_00 ,lowerCAmelCase__ : List[str]=0.0 ,lowerCAmelCase__ : Tuple=10 ,lowerCAmelCase__ : Optional[Any]=25 ,lowerCAmelCase__ : Any="hamming_window" ,lowerCAmelCase__ : List[str]=32_768.0 ,lowerCAmelCase__ : Union[str, Any]=0.97 ,lowerCAmelCase__ : Any=1.0 ,lowerCAmelCase__ : str=True ,lowerCAmelCase__ : int=True ,lowerCAmelCase__ : Tuple=False ,**lowerCAmelCase__ : Optional[int] ,) -> Optional[Any]:
'''simple docstring'''
super().__init__(feature_size=lowerCAmelCase__ ,sampling_rate=lowerCAmelCase__ ,padding_value=lowerCAmelCase__ ,**lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = feature_size
lowerCAmelCase_ : List[Any] = sampling_rate
lowerCAmelCase_ : Union[str, Any] = padding_value
lowerCAmelCase_ : str = hop_length
lowerCAmelCase_ : str = win_length
lowerCAmelCase_ : str = frame_signal_scale
lowerCAmelCase_ : Any = preemphasis_coeff
lowerCAmelCase_ : Optional[Any] = mel_floor
lowerCAmelCase_ : List[str] = normalize_means
lowerCAmelCase_ : Optional[Any] = normalize_vars
lowerCAmelCase_ : Dict = win_function
lowerCAmelCase_ : List[Any] = return_attention_mask
lowerCAmelCase_ : Tuple = win_length * sampling_rate // 10_00
lowerCAmelCase_ : str = hop_length * sampling_rate // 10_00
lowerCAmelCase_ : Dict = optimal_fft_length(self.sample_size )
lowerCAmelCase_ : Optional[int] = (self.n_fft // 2) + 1
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : np.array ) -> np.ndarray:
'''simple docstring'''
if self.win_function == "hamming_window":
lowerCAmelCase_ : int = window_function(window_length=self.sample_size ,name=self.win_function ,periodic=lowerCAmelCase__ )
else:
lowerCAmelCase_ : Tuple = window_function(window_length=self.sample_size ,name=self.win_function )
lowerCAmelCase_ : List[str] = mel_filter_bank(
num_frequency_bins=self.n_freqs ,num_mel_filters=self.feature_size ,min_frequency=0.0 ,max_frequency=self.sampling_rate / 2.0 ,sampling_rate=self.sampling_rate ,)
lowerCAmelCase_ : Any = spectrogram(
one_waveform * self.frame_signal_scale ,window=lowerCAmelCase__ ,frame_length=self.sample_size ,hop_length=self.sample_stride ,fft_length=self.n_fft ,center=lowerCAmelCase__ ,preemphasis=self.preemphasis_coeff ,mel_filters=lowerCAmelCase__ ,mel_floor=self.mel_floor ,log_mel="log" ,)
return msfc_features.T
def UpperCAmelCase_ ( self : int ,lowerCAmelCase__ : List[Any] ,lowerCAmelCase__ : Optional[Any] ,lowerCAmelCase__ : Tuple ) -> Optional[Any]:
'''simple docstring'''
if self.normalize_means:
lowerCAmelCase_ : Optional[int] = x[:input_length].mean(axis=0 )
lowerCAmelCase_ : List[str] = np.subtract(lowerCAmelCase__ ,lowerCAmelCase__ )
if self.normalize_vars:
lowerCAmelCase_ : Optional[Any] = x[:input_length].std(axis=0 )
lowerCAmelCase_ : Tuple = np.divide(lowerCAmelCase__ ,lowerCAmelCase__ )
if input_length < x.shape[0]:
lowerCAmelCase_ : int = padding_value
# make sure array is in float32
lowerCAmelCase_ : Any = x.astype(np.floataa )
return x
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : List[np.ndarray] ,lowerCAmelCase__ : Optional[np.ndarray] = None ) -> List[np.ndarray]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(lowerCAmelCase__ ,lowerCAmelCase__ ,self.padding_value ) for x, n in zip(lowerCAmelCase__ ,lowerCAmelCase__ )]
def __call__( self : int ,lowerCAmelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,lowerCAmelCase__ : Union[bool, str, PaddingStrategy] = False ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : bool = False ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : Optional[bool] = None ,lowerCAmelCase__ : Optional[Union[str, TensorType]] = None ,lowerCAmelCase__ : Optional[int] = None ,**lowerCAmelCase__ : Union[str, Any] ,) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowerCAmelCase_ : List[Any] = isinstance(lowerCAmelCase__ ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
lowerCAmelCase_ : str = is_batched_numpy or (
isinstance(lowerCAmelCase__ ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase_ : Tuple = [np.asarray(lowerCAmelCase__ ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase__ ,np.ndarray ):
lowerCAmelCase_ : int = np.asarray(lowerCAmelCase__ ,dtype=np.floataa )
elif isinstance(lowerCAmelCase__ ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase_ : Union[str, Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase_ : Optional[int] = [raw_speech]
# extract fbank features
lowerCAmelCase_ : Dict = [self._extract_mfsc_features(lowerCAmelCase__ ) for one_waveform in raw_speech]
# convert into correct format for padding
lowerCAmelCase_ : int = BatchFeature({"input_features": features} )
lowerCAmelCase_ : Union[str, Any] = self.pad(
lowerCAmelCase__ ,padding=lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,truncation=lowerCAmelCase__ ,pad_to_multiple_of=lowerCAmelCase__ ,return_attention_mask=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
# make sure list is in array format
lowerCAmelCase_ : Optional[Any] = padded_inputs.get("input_features" )
if isinstance(input_features[0] ,lowerCAmelCase__ ):
lowerCAmelCase_ : Optional[int] = [np.asarray(lowerCAmelCase__ ,dtype=np.floataa ) for feature in input_features]
lowerCAmelCase_ : List[Any] = padded_inputs.get("attention_mask" )
if attention_mask is not None:
lowerCAmelCase_ : Dict = [np.asarray(lowerCAmelCase__ ,dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
lowerCAmelCase_ : Dict = (
np.array(lowerCAmelCase__ ,dtype=np.intaa )
if self._get_padding_strategies(lowerCAmelCase__ ,max_length=lowerCAmelCase__ ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
lowerCAmelCase_ : List[str] = self.normalize(
padded_inputs["input_features"] ,attention_mask=lowerCAmelCase__ )
if return_tensors is not None:
lowerCAmelCase_ : Dict = padded_inputs.convert_to_tensors(lowerCAmelCase__ )
return padded_inputs
| 659 | 0 |
"""simple docstring"""
import os
import sys
a_ = os.path.join(os.path.dirname(__file__), 'src')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
a_ = [
'torch',
'numpy',
'tokenizers',
'filelock',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses',
'importlib_metadata',
'huggingface_hub',
]
@add_start_docstrings(AutoConfig.__doc__ )
def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
return AutoConfig.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
return AutoTokenizer.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
@add_start_docstrings(AutoModel.__doc__ )
def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
return AutoModel.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
return AutoModelForCausalLM.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
return AutoModelForMaskedLM.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
return AutoModelForSequenceClassification.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
return AutoModelForQuestionAnswering.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
| 76 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_lowercase = 10
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
for i in range(snake_case__ , snake_case__):
if array[i] == target:
return i
return -1
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : List[str] = 0
lowerCAmelCase_ : Tuple = len(snake_case__)
while left <= right:
if right - left < precision:
return lin_search(snake_case__ , snake_case__ , snake_case__ , snake_case__)
lowerCAmelCase_ : List[str] = (left + right) // 3 + 1
lowerCAmelCase_ : Tuple = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
lowerCAmelCase_ : str = one_third - 1
elif array[two_third] < target:
lowerCAmelCase_ : Any = two_third + 1
else:
lowerCAmelCase_ : List[str] = one_third + 1
lowerCAmelCase_ : Tuple = two_third - 1
else:
return -1
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
if left < right:
if right - left < precision:
return lin_search(snake_case__ , snake_case__ , snake_case__ , snake_case__)
lowerCAmelCase_ : Dict = (left + right) // 3 + 1
lowerCAmelCase_ : List[Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(snake_case__ , one_third - 1 , snake_case__ , snake_case__)
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , snake_case__ , snake_case__ , snake_case__)
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , snake_case__ , snake_case__)
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase = input('''Enter numbers separated by comma:\n''').strip()
_lowercase = [int(item.strip()) for item in user_input.split(''',''')]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
_lowercase = int(input('''Enter the number to be found in the list:\n''').strip())
_lowercase = ite_ternary_search(collection, target)
_lowercase = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f"Iterative search: {target} found at positions: {resulta}")
print(f"Recursive search: {target} found at positions: {resulta}")
else:
print('''Not found''')
| 659 | 0 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
A = logging.get_logger(__name__)
A = [
("""bert.bert""", """visual_bert"""),
("""bert.cls""", """cls"""),
("""bert.classifier""", """cls"""),
("""token_type_embeddings_visual""", """visual_token_type_embeddings"""),
("""position_embeddings_visual""", """visual_position_embeddings"""),
("""projection""", """visual_projection"""),
]
A = [
"""nlvr2_coco_pre_trained.th""",
"""nlvr2_fine_tuned.th""",
"""nlvr2_pre_trained.th""",
"""vcr_coco_pre_train.th""",
"""vcr_fine_tune.th""",
"""vcr_pre_train.th""",
"""vqa_coco_pre_trained.th""",
"""vqa_fine_tuned.th""",
"""vqa_pre_trained.th""",
]
def _UpperCamelCase ( UpperCamelCase ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : str = torch.load(UpperCamelCase , map_location="cpu" )
return sd
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase=rename_keys_prefix ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = OrderedDict()
__UpperCAmelCase : Union[str, Any] = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
__UpperCAmelCase : Optional[int] = key
for name_pair in rename_keys_prefix:
__UpperCAmelCase : List[Any] = new_key.replace(name_pair[0] , name_pair[1] )
__UpperCAmelCase : Dict = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
__UpperCAmelCase : Optional[Any] = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), f"The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."
# Get Config
if "pre" in checkpoint_path:
__UpperCAmelCase : str = "pretraining"
if "vcr" in checkpoint_path:
__UpperCAmelCase : Optional[Any] = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
__UpperCAmelCase : Union[str, Any] = {"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
__UpperCAmelCase : Union[str, Any] = {"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
__UpperCAmelCase : List[Any] = {"visual_embedding_dim": 1024}
else:
raise NotImplementedError(f"No implementation found for `{checkpoint_path}`." )
else:
if "vcr" in checkpoint_path:
__UpperCAmelCase : Union[str, Any] = {"visual_embedding_dim": 512}
__UpperCAmelCase : Union[str, Any] = "multichoice"
elif "vqa_advanced" in checkpoint_path:
__UpperCAmelCase : Union[str, Any] = {"visual_embedding_dim": 2048}
__UpperCAmelCase : Any = "vqa_advanced"
elif "vqa" in checkpoint_path:
__UpperCAmelCase : str = {"visual_embedding_dim": 2048, "num_labels": 3129}
__UpperCAmelCase : List[str] = "vqa"
elif "nlvr" in checkpoint_path:
__UpperCAmelCase : Any = {
"visual_embedding_dim": 1024,
"num_labels": 2,
}
__UpperCAmelCase : List[Any] = "nlvr"
__UpperCAmelCase : Optional[int] = VisualBertConfig(**UpperCamelCase )
# Load State Dict
__UpperCAmelCase : Dict = load_state_dict(UpperCamelCase )
__UpperCAmelCase : List[Any] = get_new_dict(UpperCamelCase , UpperCamelCase )
if model_type == "pretraining":
__UpperCAmelCase : Optional[int] = VisualBertForPreTraining(UpperCamelCase )
elif model_type == "vqa":
__UpperCAmelCase : str = VisualBertForQuestionAnswering(UpperCamelCase )
elif model_type == "nlvr":
__UpperCAmelCase : List[Any] = VisualBertForVisualReasoning(UpperCamelCase )
elif model_type == "multichoice":
__UpperCAmelCase : Tuple = VisualBertForMultipleChoice(UpperCamelCase )
model.load_state_dict(UpperCamelCase )
# Save Checkpoints
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
model.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""orig_checkpoint_path""", type=str, help="""A path to .th on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", type=str, help="""Path to the output PyTorch model.""")
A = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 77 |
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
_lowercase = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
_lowercase = {
'''facebook/blenderbot_small-90M''': 512,
}
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = BlenderbotSmallTokenizer
def __init__( self : Optional[int] ,lowerCAmelCase__ : Optional[int]=None ,lowerCAmelCase__ : Union[str, Any]=None ,lowerCAmelCase__ : Any="<|endoftext|>" ,lowerCAmelCase__ : int="<|endoftext|>" ,lowerCAmelCase__ : Optional[Any]="<|endoftext|>" ,lowerCAmelCase__ : Union[str, Any]=False ,lowerCAmelCase__ : Optional[Any]=True ,**lowerCAmelCase__ : Union[str, Any] ,) -> str:
'''simple docstring'''
super().__init__(
ByteLevelBPETokenizer(
vocab=lowerCAmelCase__ ,merges=lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ ,trim_offsets=lowerCAmelCase__ ,) ,bos_token=lowerCAmelCase__ ,eos_token=lowerCAmelCase__ ,unk_token=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
lowerCAmelCase_ : Dict = add_prefix_space
def UpperCAmelCase_ ( self : int ,lowerCAmelCase__ : List[str] ,lowerCAmelCase__ : Tuple=None ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : str = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCAmelCase_ ( self : int ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowerCAmelCase_ : Dict = [self.sep_token_id]
lowerCAmelCase_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 659 | 0 |
'''simple docstring'''
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : List[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = k_size // 2
UpperCAmelCase_ , UpperCAmelCase_ = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
UpperCAmelCase_ = 1 / (2 * pi * sigma) * exp(-(square(snake_case_ ) + square(snake_case_ )) / (2 * square(snake_case_ )) )
return g
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : str , snake_case_ : Optional[Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = image.shape[0], image.shape[1]
# dst image height and width
UpperCAmelCase_ = height - k_size + 1
UpperCAmelCase_ = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
UpperCAmelCase_ = zeros((dst_height * dst_width, k_size * k_size) )
UpperCAmelCase_ = 0
for i, j in product(range(snake_case_ ) , range(snake_case_ ) ):
UpperCAmelCase_ = ravel(image[i : i + k_size, j : j + k_size] )
UpperCAmelCase_ = window
row += 1
# turn the kernel into shape(k*k, 1)
UpperCAmelCase_ = gen_gaussian_kernel(snake_case_ , snake_case_ )
UpperCAmelCase_ = ravel(snake_case_ )
# reshape and get the dst image
UpperCAmelCase_ = dot(snake_case_ , snake_case_ ).reshape(snake_case_ , snake_case_ ).astype(snake_case_ )
return dst
if __name__ == "__main__":
# read original image
SCREAMING_SNAKE_CASE_: Any =imread(r'../image_data/lena.jpg')
# turn image in gray scale value
SCREAMING_SNAKE_CASE_: Union[str, Any] =cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
SCREAMING_SNAKE_CASE_: int =gaussian_filter(gray, 3, sigma=1)
SCREAMING_SNAKE_CASE_: Any =gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('gaussian filter with 3x3 mask', gaussianaxa)
imshow('gaussian filter with 5x5 mask', gaussianaxa)
waitKey()
| 78 |
from collections.abc import Generator
from math import sin
def UpperCamelCase ( snake_case__):
if len(snake_case__) != 32:
raise ValueError("Input must be of length 32")
lowerCAmelCase_ : Tuple = b""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def UpperCamelCase ( snake_case__):
if i < 0:
raise ValueError("Input must be non-negative")
lowerCAmelCase_ : List[str] = format(snake_case__ , "08x")[-8:]
lowerCAmelCase_ : Any = b""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8")
return little_endian_hex
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Union[str, Any] = b""
for char in message:
bit_string += format(snake_case__ , "08b").encode("utf-8")
lowerCAmelCase_ : Optional[int] = format(len(snake_case__) , "064b").encode("utf-8")
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(snake_case__) % 5_12 != 4_48:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:]) + to_little_endian(start_len[:32])
return bit_string
def UpperCamelCase ( snake_case__):
if len(snake_case__) % 5_12 != 0:
raise ValueError("Input must have length that's a multiple of 512")
for pos in range(0 , len(snake_case__) , 5_12):
lowerCAmelCase_ : List[str] = bit_string[pos : pos + 5_12]
lowerCAmelCase_ : Union[str, Any] = []
for i in range(0 , 5_12 , 32):
block_words.append(int(to_little_endian(block[i : i + 32]) , 2))
yield block_words
def UpperCamelCase ( snake_case__):
if i < 0:
raise ValueError("Input must be non-negative")
lowerCAmelCase_ : Dict = format(snake_case__ , "032b")
lowerCAmelCase_ : str = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(snake_case__ , 2)
def UpperCamelCase ( snake_case__ , snake_case__):
return (a + b) % 2**32
def UpperCamelCase ( snake_case__ , snake_case__):
if i < 0:
raise ValueError("Input must be non-negative")
if shift < 0:
raise ValueError("Shift must be non-negative")
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Optional[Any] = preprocess(snake_case__)
lowerCAmelCase_ : Optional[Any] = [int(2**32 * abs(sin(i + 1))) for i in range(64)]
# Starting states
lowerCAmelCase_ : List[str] = 0x67_45_23_01
lowerCAmelCase_ : Union[str, Any] = 0xef_cd_ab_89
lowerCAmelCase_ : List[Any] = 0x98_ba_dc_fe
lowerCAmelCase_ : Tuple = 0x10_32_54_76
lowerCAmelCase_ : Any = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(snake_case__):
lowerCAmelCase_ : Optional[int] = aa
lowerCAmelCase_ : List[str] = ba
lowerCAmelCase_ : Any = ca
lowerCAmelCase_ : Union[str, Any] = da
# Hash current chunk
for i in range(64):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
lowerCAmelCase_ : Any = d ^ (b & (c ^ d))
lowerCAmelCase_ : Dict = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
lowerCAmelCase_ : Any = c ^ (d & (b ^ c))
lowerCAmelCase_ : List[str] = (5 * i + 1) % 16
elif i <= 47:
lowerCAmelCase_ : int = b ^ c ^ d
lowerCAmelCase_ : Optional[Any] = (3 * i + 5) % 16
else:
lowerCAmelCase_ : List[Any] = c ^ (b | not_aa(snake_case__))
lowerCAmelCase_ : List[Any] = (7 * i) % 16
lowerCAmelCase_ : Optional[Any] = (f + a + added_consts[i] + block_words[g]) % 2**32
lowerCAmelCase_ : Optional[Any] = d
lowerCAmelCase_ : Dict = c
lowerCAmelCase_ : List[str] = b
lowerCAmelCase_ : Any = sum_aa(snake_case__ , left_rotate_aa(snake_case__ , shift_amounts[i]))
# Add hashed chunk to running total
lowerCAmelCase_ : Dict = sum_aa(snake_case__ , snake_case__)
lowerCAmelCase_ : str = sum_aa(snake_case__ , snake_case__)
lowerCAmelCase_ : Optional[int] = sum_aa(snake_case__ , snake_case__)
lowerCAmelCase_ : int = sum_aa(snake_case__ , snake_case__)
lowerCAmelCase_ : Union[str, Any] = reformat_hex(snake_case__) + reformat_hex(snake_case__) + reformat_hex(snake_case__) + reformat_hex(snake_case__)
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 659 | 0 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"""
UpperCAmelCase__ : int = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw ).convert("""RGB""" )
UpperCAmelCase__ : Any = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_145_466, 0.4_578_275, 0.40_821_073) , (0.26_862_954, 0.26_130_258, 0.27_577_711) ),
] )
UpperCAmelCase__ : Optional[int] = transform(__lowerCamelCase ).unsqueeze(0 ).to(__lowerCamelCase )
return image
def _lowerCamelCase ( __lowerCamelCase ) -> str:
'''simple docstring'''
if "visual_encoder" in key:
UpperCAmelCase__ : Dict = re.sub("""visual_encoder*""" , """vision_model.encoder""" , __lowerCamelCase )
if "blocks" in key:
UpperCAmelCase__ : Optional[Any] = re.sub(r"""blocks""" , """layers""" , __lowerCamelCase )
if "attn" in key:
UpperCAmelCase__ : List[str] = re.sub(r"""attn""" , """self_attn""" , __lowerCamelCase )
if "norm1" in key:
UpperCAmelCase__ : Union[str, Any] = re.sub(r"""norm1""" , """layer_norm1""" , __lowerCamelCase )
if "norm2" in key:
UpperCAmelCase__ : Any = re.sub(r"""norm2""" , """layer_norm2""" , __lowerCamelCase )
if "encoder.norm" in key:
UpperCAmelCase__ : Dict = re.sub(r"""encoder.norm""" , """post_layernorm""" , __lowerCamelCase )
if "encoder.patch_embed.proj" in key:
UpperCAmelCase__ : List[str] = re.sub(r"""encoder.patch_embed.proj""" , """embeddings.patch_embedding""" , __lowerCamelCase )
if "encoder.pos_embed" in key:
UpperCAmelCase__ : List[str] = re.sub(r"""encoder.pos_embed""" , """embeddings.position_embedding""" , __lowerCamelCase )
if "encoder.cls_token" in key:
UpperCAmelCase__ : List[Any] = re.sub(r"""encoder.cls_token""" , """embeddings.class_embedding""" , __lowerCamelCase )
if "self_attn" in key:
UpperCAmelCase__ : List[Any] = re.sub(r"""self_attn.proj""" , """self_attn.projection""" , __lowerCamelCase )
return key
@torch.no_grad()
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase=None ) -> Tuple:
'''simple docstring'''
if config_path is not None:
UpperCAmelCase__ : Any = BlipConfig.from_pretrained(__lowerCamelCase )
else:
UpperCAmelCase__ : str = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
UpperCAmelCase__ : int = BlipForConditionalGeneration(__lowerCamelCase ).eval()
UpperCAmelCase__ : Any = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"""
UpperCAmelCase__ : List[str] = blip_decoder(pretrained=__lowerCamelCase , image_size=384 , vit="""base""" )
UpperCAmelCase__ : Union[str, Any] = pt_model.eval()
UpperCAmelCase__ : Optional[int] = pt_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase__ : Dict = modified_state_dict.pop(__lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = rename_key(__lowerCamelCase )
UpperCAmelCase__ : List[str] = value
hf_model.load_state_dict(__lowerCamelCase )
UpperCAmelCase__ : Tuple = 384
UpperCAmelCase__ : str = load_demo_image(image_size=__lowerCamelCase , device="""cpu""" )
UpperCAmelCase__ : str = BertTokenizer.from_pretrained("""bert-base-uncased""" )
UpperCAmelCase__ : Dict = tokenizer(["""a picture of"""] ).input_ids
UpperCAmelCase__ : int = hf_model.generate(__lowerCamelCase , __lowerCamelCase )
assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
UpperCAmelCase__ : Any = hf_model.generate(__lowerCamelCase )
assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(__lowerCamelCase )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
UpperCAmelCase__ : Union[str, Any] = (
"""https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"""
)
UpperCAmelCase__ : List[Any] = blip_vqa(pretrained=__lowerCamelCase , image_size=__lowerCamelCase , vit="""base""" )
vqa_model.eval()
UpperCAmelCase__ : str = vqa_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase__ : Dict = modified_state_dict.pop(__lowerCamelCase )
UpperCAmelCase__ : Dict = rename_key(__lowerCamelCase )
UpperCAmelCase__ : int = value
UpperCAmelCase__ : List[str] = BlipForQuestionAnswering(__lowerCamelCase )
hf_vqa_model.load_state_dict(__lowerCamelCase )
UpperCAmelCase__ : Tuple = ["""How many dogs are in this image?"""]
UpperCAmelCase__ : Union[str, Any] = tokenizer(__lowerCamelCase , return_tensors="""pt""" ).input_ids
UpperCAmelCase__ : Optional[Any] = hf_vqa_model.generate(__lowerCamelCase , __lowerCamelCase )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + """_vqa""" )
UpperCAmelCase__ : int = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"""
UpperCAmelCase__ : Any = blip_itm(pretrained=__lowerCamelCase , image_size=__lowerCamelCase , vit="""base""" )
itm_model.eval()
UpperCAmelCase__ : List[Any] = itm_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase__ : Dict = modified_state_dict.pop(__lowerCamelCase )
UpperCAmelCase__ : int = rename_key(__lowerCamelCase )
UpperCAmelCase__ : Any = value
UpperCAmelCase__ : Optional[int] = BlipForImageTextRetrieval(__lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = ["""A picture of a woman with a dog sitting in a beach"""]
UpperCAmelCase__ : List[Any] = tokenizer(
__lowerCamelCase , return_tensors="""pt""" , padding="""max_length""" , truncation=__lowerCamelCase , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(__lowerCamelCase )
hf_itm_model.eval()
UpperCAmelCase__ : List[str] = hf_itm_model(__lowerCamelCase , __lowerCamelCase , use_itm_head=__lowerCamelCase )
UpperCAmelCase__ : List[str] = hf_itm_model(__lowerCamelCase , __lowerCamelCase , use_itm_head=__lowerCamelCase )
assert out[0].item() == 0.2_110_687_494_277_954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.45_698_845_386_505_127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + """_itm""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 79 |
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('''1.6'''):
_lowercase = True
from torch.cuda.amp import autocast
_lowercase = logging.getLogger(__name__)
@dataclass
class __snake_case :
"""simple docstring"""
UpperCamelCase_ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
UpperCamelCase_ = field(
default=snake_case__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
UpperCamelCase_ = field(
default=snake_case__ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
UpperCamelCase_ = field(
default=snake_case__ , metadata={'help': 'Whether to log verbose messages or not.'} , )
UpperCamelCase_ = field(
default=2.0 , metadata={'help': 'Maximum temperature for gumbel softmax.'} )
UpperCamelCase_ = field(
default=0.5 , metadata={'help': 'Minimum temperature for gumbel softmax.'} )
UpperCamelCase_ = field(
default=0.99_99_95 , metadata={'help': 'Decay of gumbel temperature during training.'} )
def UpperCamelCase ( snake_case__ , snake_case__):
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout)] , )
lowerCAmelCase_ : str = logging.WARNING
if model_args.verbose_logging:
lowerCAmelCase_ : int = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank):
lowerCAmelCase_ : Any = logging.INFO
logger.setLevel(snake_case__)
@dataclass
class __snake_case :
"""simple docstring"""
UpperCamelCase_ = field(
default=snake_case__ , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
UpperCamelCase_ = field(
default=snake_case__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
UpperCamelCase_ = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
UpperCamelCase_ = field(
default='validation' , metadata={
'help': (
'The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
UpperCamelCase_ = field(
default='file' , metadata={'help': 'Column in the dataset that contains speech file path. Defaults to \'file\''} , )
UpperCamelCase_ = field(
default=snake_case__ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
UpperCamelCase_ = field(
default=1 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
UpperCamelCase_ = field(
default=snake_case__ , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
UpperCamelCase_ = field(
default=20.0 , metadata={'help': 'Filter audio files that are longer than `max_duration_in_seconds` seconds'} )
@dataclass
class __snake_case :
"""simple docstring"""
UpperCamelCase_ = 42
UpperCamelCase_ = 42
UpperCamelCase_ = "longest"
UpperCamelCase_ = None
UpperCamelCase_ = None
def __call__( self : str ,lowerCAmelCase__ : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
lowerCAmelCase_ : Tuple = self.feature_extractor.pad(
lowerCAmelCase__ ,max_length=self.max_length ,padding=self.padding ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors="pt" ,)
lowerCAmelCase_ : Union[str, Any] = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1] )
lowerCAmelCase_ : List[str] = batch["input_values"].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
lowerCAmelCase_ : Tuple = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1 ) ).to(
torch.long )
lowerCAmelCase_ : Optional[Any] = torch.zeros(
(batch_size, mask_indices_seq_length) ,dtype=torch.long ,device=batch["input_values"].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
lowerCAmelCase_ : Tuple = 1
lowerCAmelCase_ : int = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
lowerCAmelCase_ : str = _compute_mask_indices(
(batch_size, mask_indices_seq_length) ,self.model.config.mask_time_prob ,self.model.config.mask_time_length ,attention_mask=lowerCAmelCase__ ,min_masks=2 ,)
return batch
class __snake_case ( snake_case__ ):
"""simple docstring"""
def __init__( self : List[str] ,*lowerCAmelCase__ : Optional[int] ,lowerCAmelCase__ : Tuple=1 ,lowerCAmelCase__ : Optional[int]=0 ,lowerCAmelCase__ : Optional[Any]=1.0 ,**lowerCAmelCase__ : Any ) -> str:
'''simple docstring'''
super().__init__(*lowerCAmelCase__ ,**lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : int = max_gumbel_temp
lowerCAmelCase_ : Union[str, Any] = min_gumbel_temp
lowerCAmelCase_ : str = gumbel_temp_decay
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : nn.Module ,lowerCAmelCase__ : Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor:
'''simple docstring'''
model.train()
lowerCAmelCase_ : str = self._prepare_inputs(lowerCAmelCase__ )
if self.use_amp:
with autocast():
lowerCAmelCase_ : List[Any] = self.compute_loss(lowerCAmelCase__ ,lowerCAmelCase__ )
else:
lowerCAmelCase_ : List[Any] = self.compute_loss(lowerCAmelCase__ ,lowerCAmelCase__ )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
lowerCAmelCase_ : List[Any] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
lowerCAmelCase_ : Optional[Any] = loss.sum() / (inputs["mask_time_indices"]).sum()
else:
raise ValueError(f'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
lowerCAmelCase_ : int = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(lowerCAmelCase__ ).backward()
elif self.use_apex:
with amp.scale_loss(lowerCAmelCase__ ,self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(lowerCAmelCase__ )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step ,self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step ,self.min_gumbel_temp ) )
return loss.detach()
def UpperCamelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCAmelCase_ : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Dict = parser.parse_args_into_dataclasses()
configure_logger(snake_case__ , snake_case__)
# Downloading and loading a dataset from the hub.
lowerCAmelCase_ : List[str] = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir)
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
lowerCAmelCase_ : Any = DatasetDict()
lowerCAmelCase_ : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[:{data_args.validation_split_percentage}%]''' , cache_dir=model_args.cache_dir , )
lowerCAmelCase_ : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[{data_args.validation_split_percentage}%:]''' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
lowerCAmelCase_ : Union[str, Any] = DatasetDict()
lowerCAmelCase_ : int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , )
lowerCAmelCase_ : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}''' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
lowerCAmelCase_ : Dict = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=snake_case__)
def prepare_dataset(snake_case__):
# check that all files have the correct sampling rate
lowerCAmelCase_ , lowerCAmelCase_ : str = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate)
return batch
# load audio files into numpy arrays
lowerCAmelCase_ : int = datasets.map(
snake_case__ , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names)
# filter audio files that are too long
lowerCAmelCase_ : int = vectorized_datasets.filter(
lambda snake_case__: len(data["speech"]) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate))
def normalize(snake_case__):
return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate)
# normalize and transform to `BatchFeatures`
lowerCAmelCase_ : str = vectorized_datasets.map(
snake_case__ , batched=snake_case__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
lowerCAmelCase_ : Optional[Any] = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm='layer'")
lowerCAmelCase_ : Dict = WavaVecaForPreTraining(snake_case__)
lowerCAmelCase_ : int = DataCollatorForWavaVecaPretraining(model=snake_case__ , feature_extractor=snake_case__)
lowerCAmelCase_ : List[Any] = WavaVecaPreTrainer(
model=snake_case__ , data_collator=snake_case__ , args=snake_case__ , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=snake_case__ , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 659 | 0 |
__UpperCamelCase : List[str] = """Alexander Joslin"""
import operator as op
from .stack import Stack
def snake_case ( lowerCamelCase ):
'''simple docstring'''
__lowercase = {"""*""": op.mul, """/""": op.truediv, """+""": op.add, """-""": op.sub}
__lowercase = Stack()
__lowercase = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(lowerCamelCase ) )
elif i in operators:
# RULE 2
operator_stack.push(lowerCamelCase )
elif i == ")":
# RULE 4
__lowercase = operator_stack.peek()
operator_stack.pop()
__lowercase = operand_stack.peek()
operand_stack.pop()
__lowercase = operand_stack.peek()
operand_stack.pop()
__lowercase = operators[opr](lowerCamelCase , lowerCamelCase )
operand_stack.push(lowerCamelCase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
__UpperCamelCase : Union[str, Any] = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
| 80 |
from __future__ import annotations
from collections.abc import Callable
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = 1_00 , ):
lowerCAmelCase_ : Any = x_start
lowerCAmelCase_ : Optional[Any] = fnc(snake_case__)
lowerCAmelCase_ : Union[str, Any] = 0.0
for _ in range(snake_case__):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
lowerCAmelCase_ : Any = (x_end - x_start) / steps + xa
lowerCAmelCase_ : Dict = fnc(snake_case__)
area += abs(fxa + fxa) * (xa - xa) / 2
# Increment step
lowerCAmelCase_ : int = xa
lowerCAmelCase_ : str = fxa
return area
if __name__ == "__main__":
def UpperCamelCase ( snake_case__):
return x**3 + x**2
print('''f(x) = x^3 + x^2''')
print('''The area between the curve, x = -5, x = 5 and the x axis is:''')
_lowercase = 10
while i <= 100000:
print(f"with {i} steps: {trapezoidal_area(f, -5, 5, i)}")
i *= 10
| 659 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case : Dict = logging.get_logger(__name__)
_snake_case : Dict = {
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = "xmod"
def __init__( self : Tuple , lowerCamelCase : Any=30522 , lowerCamelCase : Union[str, Any]=768 , lowerCamelCase : Union[str, Any]=12 , lowerCamelCase : List[str]=12 , lowerCamelCase : List[Any]=3072 , lowerCamelCase : List[Any]="gelu" , lowerCamelCase : Union[str, Any]=0.1 , lowerCamelCase : Dict=0.1 , lowerCamelCase : Any=512 , lowerCamelCase : Any=2 , lowerCamelCase : List[str]=0.02 , lowerCamelCase : Union[str, Any]=1E-12 , lowerCamelCase : List[Any]=1 , lowerCamelCase : Union[str, Any]=0 , lowerCamelCase : List[str]=2 , lowerCamelCase : List[str]="absolute" , lowerCamelCase : Dict=True , lowerCamelCase : List[Any]=None , lowerCamelCase : Union[str, Any]=False , lowerCamelCase : int=2 , lowerCamelCase : Union[str, Any]=False , lowerCamelCase : List[Any]=True , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : Optional[Any]=("en_XX",) , lowerCamelCase : List[Any]=None , **lowerCamelCase : Union[str, Any] , ) -> Optional[Any]:
super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
__snake_case : Union[str, Any] = vocab_size
__snake_case : List[Any] = hidden_size
__snake_case : Any = num_hidden_layers
__snake_case : Tuple = num_attention_heads
__snake_case : List[str] = hidden_act
__snake_case : Optional[int] = intermediate_size
__snake_case : Optional[int] = hidden_dropout_prob
__snake_case : Dict = attention_probs_dropout_prob
__snake_case : Any = max_position_embeddings
__snake_case : Optional[Any] = type_vocab_size
__snake_case : List[Any] = initializer_range
__snake_case : List[str] = layer_norm_eps
__snake_case : str = position_embedding_type
__snake_case : int = use_cache
__snake_case : List[str] = classifier_dropout
__snake_case : Tuple = pre_norm
__snake_case : Any = adapter_reduction_factor
__snake_case : str = adapter_layer_norm
__snake_case : Optional[Any] = adapter_reuse_layer_norm
__snake_case : List[Any] = ln_before_adapter
__snake_case : List[str] = list(lowerCamelCase )
__snake_case : int = default_language
class a (_lowerCAmelCase ):
"""simple docstring"""
@property
def __snake_case ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__snake_case : Dict = {0: "batch", 1: "choice", 2: "sequence"}
else:
__snake_case : Dict = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 81 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = StableDiffusionLDMaDPipeline
UpperCamelCase_ = TEXT_TO_IMAGE_PARAMS
UpperCamelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase_ ( self : Tuple ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase_ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") ,up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") ,cross_attention_dim=32 ,)
lowerCAmelCase_ : Any = DDIMScheduler(
beta_start=0.00_085 ,beta_end=0.012 ,beta_schedule="scaled_linear" ,clip_sample=lowerCAmelCase__ ,set_alpha_to_one=lowerCAmelCase__ ,)
torch.manual_seed(0 )
lowerCAmelCase_ : str = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=6 ,out_channels=6 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,)
torch.manual_seed(0 )
lowerCAmelCase_ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,)
lowerCAmelCase_ : Optional[int] = CLIPTextModel(lowerCAmelCase__ )
lowerCAmelCase_ : Dict = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCAmelCase_ : List[Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : List[Any] ,lowerCAmelCase__ : List[str]=0 ) -> Dict:
'''simple docstring'''
if str(lowerCAmelCase__ ).startswith("mps" ):
lowerCAmelCase_ : Optional[int] = torch.manual_seed(lowerCAmelCase__ )
else:
lowerCAmelCase_ : Dict = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
lowerCAmelCase_ : str = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ : List[str] = self.get_dummy_components()
lowerCAmelCase_ : Union[str, Any] = StableDiffusionLDMaDPipeline(**lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = ldmad_pipe.to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : Any = self.get_dummy_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = ldmad_pipe(**lowerCAmelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ : Any = output.rgb, output.depth
lowerCAmelCase_ : Dict = rgb[0, -3:, -3:, -1]
lowerCAmelCase_ : Tuple = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
lowerCAmelCase_ : Optional[Any] = np.array(
[0.37_338_176, 0.70_247, 0.74_203_193, 0.51_643_604, 0.58_256_793, 0.60_932_136, 0.4_181_095, 0.48_355_877, 0.46_535_262] )
lowerCAmelCase_ : Tuple = np.array([103.46_727, 85.812_004, 87.849_236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Dict = self.get_dummy_components()
lowerCAmelCase_ : List[str] = StableDiffusionLDMaDPipeline(**lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = ldmad_pipe.to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : str = 3 * [inputs["prompt"]]
# forward
lowerCAmelCase_ : Union[str, Any] = ldmad_pipe(**lowerCAmelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = output.rgb, output.depth
lowerCAmelCase_ : str = rgb_slice_a[0, -3:, -3:, -1]
lowerCAmelCase_ : List[str] = depth_slice_a[0, -3:, -1]
lowerCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = 3 * [inputs.pop("prompt" )]
lowerCAmelCase_ : str = ldmad_pipe.tokenizer(
lowerCAmelCase__ ,padding="max_length" ,max_length=ldmad_pipe.tokenizer.model_max_length ,truncation=lowerCAmelCase__ ,return_tensors="pt" ,)
lowerCAmelCase_ : Union[str, Any] = text_inputs["input_ids"].to(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = ldmad_pipe.text_encoder(lowerCAmelCase__ )[0]
lowerCAmelCase_ : Optional[int] = prompt_embeds
# forward
lowerCAmelCase_ : str = ldmad_pipe(**lowerCAmelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ : str = output.rgb, output.depth
lowerCAmelCase_ : Optional[Any] = rgb_slice_a[0, -3:, -3:, -1]
lowerCAmelCase_ : Tuple = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Any = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ : Optional[int] = self.get_dummy_components()
lowerCAmelCase_ : Dict = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = StableDiffusionLDMaDPipeline(**lowerCAmelCase__ )
lowerCAmelCase_ : Any = ldmad_pipe.to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = self.get_dummy_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = "french fries"
lowerCAmelCase_ : Optional[int] = ldmad_pipe(**lowerCAmelCase__ ,negative_prompt=lowerCAmelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = output.rgb, output.depth
lowerCAmelCase_ : Any = rgb[0, -3:, -3:, -1]
lowerCAmelCase_ : Tuple = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
lowerCAmelCase_ : int = np.array(
[0.37_044, 0.71_811_503, 0.7_223_251, 0.48_603_675, 0.5_638_391, 0.6_364_948, 0.42_833_704, 0.4_901_315, 0.47_926_217] )
lowerCAmelCase_ : Union[str, Any] = np.array([107.84_738, 84.62_802, 89.962_135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : Tuple ,lowerCAmelCase__ : Dict="cpu" ,lowerCAmelCase__ : Union[str, Any]=torch.floataa ,lowerCAmelCase__ : List[str]=0 ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Any = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = np.random.RandomState(lowerCAmelCase__ ).standard_normal((1, 4, 64, 64) )
lowerCAmelCase_ : Optional[Any] = torch.from_numpy(lowerCAmelCase__ ).to(device=lowerCAmelCase__ ,dtype=lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase_ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" )
lowerCAmelCase_ : List[str] = ldmad_pipe.to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : Dict = self.get_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = ldmad_pipe(**lowerCAmelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ : Dict = output.rgb, output.depth
lowerCAmelCase_ : List[str] = rgb[0, -3:, -3:, -1].flatten()
lowerCAmelCase_ : Optional[int] = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 5_12, 5_12, 3)
assert depth.shape == (1, 5_12, 5_12)
lowerCAmelCase_ : int = np.array(
[0.53_805_465, 0.56_707_305, 0.5_486_515, 0.57_012_236, 0.5_814_511, 0.56_253_487, 0.54_843_014, 0.55_092_263, 0.6_459_706] )
lowerCAmelCase_ : Optional[Any] = np.array(
[0.9_263_781, 0.6_678_672, 0.5_486_515, 0.92_202_145, 0.67_831_135, 0.56_253_487, 0.9_241_694, 0.7_551_478, 0.6_459_706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3
@nightly
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : Tuple ,lowerCAmelCase__ : Dict="cpu" ,lowerCAmelCase__ : List[str]=torch.floataa ,lowerCAmelCase__ : Optional[int]=0 ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Dict = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = np.random.RandomState(lowerCAmelCase__ ).standard_normal((1, 4, 64, 64) )
lowerCAmelCase_ : Any = torch.from_numpy(lowerCAmelCase__ ).to(device=lowerCAmelCase__ ,dtype=lowerCAmelCase__ )
lowerCAmelCase_ : int = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 50,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase_ ( self : Dict ) -> int:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" ).to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = self.get_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = ldmad_pipe(**lowerCAmelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ : Any = output.rgb, output.depth
lowerCAmelCase_ : Dict = 0.495_586
lowerCAmelCase_ : Optional[Any] = 0.33_795_515
lowerCAmelCase_ : Any = 112.48_518
lowerCAmelCase_ : List[Any] = 98.489_746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : int = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d-4c" ).to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : str = self.get_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = ldmad_pipe(**lowerCAmelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = output.rgb, output.depth
lowerCAmelCase_ : List[str] = 0.4_194_127
lowerCAmelCase_ : List[str] = 0.35_375_586
lowerCAmelCase_ : str = 0.5_638_502
lowerCAmelCase_ : Optional[Any] = 0.34_686_103
assert rgb.shape == (1, 5_12, 5_12, 3)
assert depth.shape == (1, 5_12, 5_12, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
| 659 | 0 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE )
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = field(default='''image-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCamelCase = Features({'''image''': Image()} )
UpperCamelCase = Features({'''labels''': ClassLabel} )
UpperCamelCase = "image"
UpperCamelCase = "labels"
def lowercase__ ( self : str , _UpperCAmelCase : str ) -> Dict:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , _UpperCAmelCase ):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" )
UpperCAmelCase_ = copy.deepcopy(self )
UpperCAmelCase_ = self.label_schema.copy()
UpperCAmelCase_ = features[self.label_column]
UpperCAmelCase_ = label_schema
return task_template
@property
def lowercase__ ( self : List[str] ) -> Dict[str, str]:
'''simple docstring'''
return {
self.image_column: "image",
self.label_column: "labels",
}
| 82 |
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
_lowercase = {
'''iou_prediction_head.layers.0''': '''iou_prediction_head.proj_in''',
'''iou_prediction_head.layers.1''': '''iou_prediction_head.layers.0''',
'''iou_prediction_head.layers.2''': '''iou_prediction_head.proj_out''',
'''mask_decoder.output_upscaling.0''': '''mask_decoder.upscale_conv1''',
'''mask_decoder.output_upscaling.1''': '''mask_decoder.upscale_layer_norm''',
'''mask_decoder.output_upscaling.3''': '''mask_decoder.upscale_conv2''',
'''mask_downscaling.0''': '''mask_embed.conv1''',
'''mask_downscaling.1''': '''mask_embed.layer_norm1''',
'''mask_downscaling.3''': '''mask_embed.conv2''',
'''mask_downscaling.4''': '''mask_embed.layer_norm2''',
'''mask_downscaling.6''': '''mask_embed.conv3''',
'''point_embeddings''': '''point_embed''',
'''pe_layer.positional_encoding_gaussian_matrix''': '''shared_embedding.positional_embedding''',
'''image_encoder''': '''vision_encoder''',
'''neck.0''': '''neck.conv1''',
'''neck.1''': '''neck.layer_norm1''',
'''neck.2''': '''neck.conv2''',
'''neck.3''': '''neck.layer_norm2''',
'''patch_embed.proj''': '''patch_embed.projection''',
'''.norm''': '''.layer_norm''',
'''blocks''': '''layers''',
}
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : int = {}
state_dict.pop("pixel_mean" , snake_case__)
state_dict.pop("pixel_std" , snake_case__)
lowerCAmelCase_ : List[Any] = R".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
lowerCAmelCase_ : Dict = key.replace(snake_case__ , snake_case__)
if re.match(snake_case__ , snake_case__):
lowerCAmelCase_ : Any = int(re.match(snake_case__ , snake_case__).group(2))
if layer_nb == 0:
lowerCAmelCase_ : List[Any] = key.replace("layers.0" , "proj_in")
elif layer_nb == 1:
lowerCAmelCase_ : List[Any] = key.replace("layers.1" , "layers.0")
elif layer_nb == 2:
lowerCAmelCase_ : int = key.replace("layers.2" , "proj_out")
lowerCAmelCase_ : int = value
lowerCAmelCase_ : Optional[int] = model_state_dict[
"prompt_encoder.shared_embedding.positional_embedding"
]
return model_state_dict
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__="ybelkada/segment-anything"):
lowerCAmelCase_ : Optional[int] = hf_hub_download(snake_case__ , F'''checkpoints/{model_name}.pth''')
if "sam_vit_b" in model_name:
lowerCAmelCase_ : Optional[Any] = SamConfig()
elif "sam_vit_l" in model_name:
lowerCAmelCase_ : Optional[int] = SamVisionConfig(
hidden_size=10_24 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
lowerCAmelCase_ : Union[str, Any] = SamConfig(
vision_config=snake_case__ , )
elif "sam_vit_h" in model_name:
lowerCAmelCase_ : Optional[Any] = SamVisionConfig(
hidden_size=12_80 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
lowerCAmelCase_ : Tuple = SamConfig(
vision_config=snake_case__ , )
lowerCAmelCase_ : Optional[Any] = torch.load(snake_case__ , map_location="cpu")
lowerCAmelCase_ : Union[str, Any] = replace_keys(snake_case__)
lowerCAmelCase_ : List[Any] = SamImageProcessor()
lowerCAmelCase_ : Any = SamProcessor(image_processor=snake_case__)
lowerCAmelCase_ : Any = SamModel(snake_case__)
hf_model.load_state_dict(snake_case__)
lowerCAmelCase_ : Dict = hf_model.to("cuda")
lowerCAmelCase_ : List[str] = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
lowerCAmelCase_ : List[Any] = Image.open(requests.get(snake_case__ , stream=snake_case__).raw).convert("RGB")
lowerCAmelCase_ : Optional[int] = [[[4_00, 6_50]]]
lowerCAmelCase_ : int = [[1]]
lowerCAmelCase_ : Optional[Any] = processor(images=np.array(snake_case__) , return_tensors="pt").to("cuda")
with torch.no_grad():
lowerCAmelCase_ : Optional[Any] = hf_model(**snake_case__)
lowerCAmelCase_ : Optional[int] = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_890_251_159_668
lowerCAmelCase_ : Any = processor(
images=np.array(snake_case__) , input_points=snake_case__ , input_labels=snake_case__ , return_tensors="pt").to("cuda")
with torch.no_grad():
lowerCAmelCase_ : Optional[Any] = hf_model(**snake_case__)
lowerCAmelCase_ : Union[str, Any] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_712_603_092_193_604
lowerCAmelCase_ : Tuple = ((75, 2_75, 17_25, 8_50),)
lowerCAmelCase_ : Optional[Any] = processor(images=np.array(snake_case__) , input_boxes=snake_case__ , return_tensors="pt").to("cuda")
with torch.no_grad():
lowerCAmelCase_ : List[Any] = hf_model(**snake_case__)
lowerCAmelCase_ : str = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8_686_015_605_926_514
# Test with 2 points and 1 image.
lowerCAmelCase_ : int = [[[4_00, 6_50], [8_00, 6_50]]]
lowerCAmelCase_ : Optional[Any] = [[1, 1]]
lowerCAmelCase_ : List[Any] = processor(
images=np.array(snake_case__) , input_points=snake_case__ , input_labels=snake_case__ , return_tensors="pt").to("cuda")
with torch.no_grad():
lowerCAmelCase_ : Tuple = hf_model(**snake_case__)
lowerCAmelCase_ : str = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_936_047_792_434_692
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
_lowercase = ['''sam_vit_b_01ec64''', '''sam_vit_h_4b8939''', '''sam_vit_l_0b3195''']
parser.add_argument(
'''--model_name''',
default='''sam_vit_h_4b8939''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
parser.add_argument(
'''--model_hub_id''',
default='''ybelkada/segment-anything''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
_lowercase = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 659 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class __snake_case ( _lowercase):
snake_case__ : Optional[Any] = "camembert"
def __init__( self : Optional[Any] , __lowerCAmelCase : Any=3_0_5_2_2 , __lowerCAmelCase : List[str]=7_6_8 , __lowerCAmelCase : List[str]=1_2 , __lowerCAmelCase : Optional[int]=1_2 , __lowerCAmelCase : List[Any]=3_0_7_2 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Optional[int]=5_1_2 , __lowerCAmelCase : str=2 , __lowerCAmelCase : int=0.02 , __lowerCAmelCase : List[Any]=1E-12 , __lowerCAmelCase : Union[str, Any]=1 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : str="absolute" , __lowerCAmelCase : Any=True , __lowerCAmelCase : Optional[int]=None , **__lowerCAmelCase : Optional[int] , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
_lowerCamelCase : Tuple = vocab_size
_lowerCamelCase : str = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Any = num_attention_heads
_lowerCamelCase : Optional[Any] = hidden_act
_lowerCamelCase : List[str] = intermediate_size
_lowerCamelCase : Optional[Any] = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Optional[Any] = max_position_embeddings
_lowerCamelCase : Tuple = type_vocab_size
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : Dict = layer_norm_eps
_lowerCamelCase : List[Any] = position_embedding_type
_lowerCamelCase : int = use_cache
_lowerCamelCase : List[str] = classifier_dropout
class __snake_case ( _lowercase):
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
if self.task == "multiple-choice":
_lowerCamelCase : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_lowerCamelCase : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 83 |
class __snake_case :
"""simple docstring"""
def __init__( self : Union[str, Any] ,lowerCAmelCase__ : str = "" ,lowerCAmelCase__ : bool = False ) -> None:
'''simple docstring'''
lowerCAmelCase_ : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
lowerCAmelCase_ : Optional[int] = is_leaf
lowerCAmelCase_ : List[str] = prefix
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : str ) -> tuple[str, str, str]:
'''simple docstring'''
lowerCAmelCase_ : List[str] = 0
for q, w in zip(self.prefix ,lowerCAmelCase__ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def UpperCAmelCase_ ( self : Optional[Any] ,lowerCAmelCase__ : list[str] ) -> None:
'''simple docstring'''
for word in words:
self.insert(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : str ) -> None:
'''simple docstring'''
if self.prefix == word:
lowerCAmelCase_ : Optional[Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowerCAmelCase_ : Optional[int] = RadixNode(prefix=lowerCAmelCase__ ,is_leaf=lowerCAmelCase__ )
else:
lowerCAmelCase_ : Optional[Any] = self.nodes[word[0]]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Any = incoming_node.match(
lowerCAmelCase__ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(lowerCAmelCase__ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowerCAmelCase_ : Dict = remaining_prefix
lowerCAmelCase_ : str = self.nodes[matching_string[0]]
lowerCAmelCase_ : Dict = RadixNode(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Any = aux_node
if remaining_word == "":
lowerCAmelCase_ : Optional[Any] = True
else:
self.nodes[matching_string[0]].insert(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ,lowerCAmelCase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ : List[str] = self.nodes.get(word[0] ,lowerCAmelCase__ )
if not incoming_node:
return False
else:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = incoming_node.match(
lowerCAmelCase__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ : int = self.nodes.get(word[0] ,lowerCAmelCase__ )
if not incoming_node:
return False
else:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = incoming_node.match(
lowerCAmelCase__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(lowerCAmelCase__ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
lowerCAmelCase_ : int = list(self.nodes.values() )[0]
lowerCAmelCase_ : List[Any] = merging_node.is_leaf
self.prefix += merging_node.prefix
lowerCAmelCase_ : int = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
lowerCAmelCase_ : List[str] = False
# If there is 1 edge, we merge it with its child
else:
lowerCAmelCase_ : Union[str, Any] = list(incoming_node.nodes.values() )[0]
lowerCAmelCase_ : Optional[int] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowerCAmelCase_ : List[str] = merging_node.nodes
return True
def UpperCAmelCase_ ( self : int ,lowerCAmelCase__ : int = 0 ) -> None:
'''simple docstring'''
if self.prefix != "":
print("-" * height ,self.prefix ," (leaf)" if self.is_leaf else "" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def UpperCamelCase ( ):
lowerCAmelCase_ : List[Any] = "banana bananas bandana band apple all beast".split()
lowerCAmelCase_ : Optional[Any] = RadixNode()
root.insert_many(snake_case__)
assert all(root.find(snake_case__) for word in words)
assert not root.find("bandanas")
assert not root.find("apps")
root.delete("all")
assert not root.find("all")
root.delete("banana")
assert not root.find("banana")
assert root.find("bananas")
return True
def UpperCamelCase ( ):
assert test_trie()
def UpperCamelCase ( ):
lowerCAmelCase_ : str = RadixNode()
lowerCAmelCase_ : str = "banana bananas bandanas bandana band apple all beast".split()
root.insert_many(snake_case__)
print("Words:" , snake_case__)
print("Tree:")
root.print_tree()
if __name__ == "__main__":
main()
| 659 | 0 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
UpperCAmelCase = '''\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
'''
UpperCAmelCase = '''\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
'''
UpperCAmelCase = '''
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: "c" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric(\'mauve\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/krishnap25/mauve' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/krishnap25/mauve'] , reference_urls=[
'https://arxiv.org/abs/2102.01454',
'https://github.com/krishnap25/mauve',
] , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case="auto" , snake_case=-1 , snake_case=0.9 , snake_case=5 , snake_case=500 , snake_case="gpt2-large" , snake_case=-1 , snake_case=1024 , snake_case=25 , snake_case=5 , snake_case=True , snake_case=25 , ):
lowercase = compute_mauve(
p_text=snake_case , q_text=snake_case , p_features=snake_case , q_features=snake_case , p_tokens=snake_case , q_tokens=snake_case , num_buckets=snake_case , pca_max_data=snake_case , kmeans_explained_var=snake_case , kmeans_num_redo=snake_case , kmeans_max_iter=snake_case , featurize_model_name=snake_case , device_id=snake_case , max_text_length=snake_case , divergence_curve_discretization_size=snake_case , mauve_scaling_factor=snake_case , verbose=snake_case , seed=snake_case , )
return out
| 84 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case :
"""simple docstring"""
def __init__( self : Tuple ,lowerCAmelCase__ : List[str] ,lowerCAmelCase__ : Optional[Any]=12 ,lowerCAmelCase__ : Union[str, Any]=7 ,lowerCAmelCase__ : Union[str, Any]=True ,lowerCAmelCase__ : List[str]=True ,lowerCAmelCase__ : Any=True ,lowerCAmelCase__ : Optional[Any]=99 ,lowerCAmelCase__ : List[str]=32 ,lowerCAmelCase__ : Dict=32 ,lowerCAmelCase__ : str=2 ,lowerCAmelCase__ : Optional[int]=4 ,lowerCAmelCase__ : str=37 ,lowerCAmelCase__ : Dict=0.1 ,lowerCAmelCase__ : List[str]=0.1 ,lowerCAmelCase__ : str=5_12 ,lowerCAmelCase__ : Union[str, Any]=0.02 ,lowerCAmelCase__ : Tuple=0 ,lowerCAmelCase__ : str=None ,) -> str:
'''simple docstring'''
lowerCAmelCase_ : int = parent
lowerCAmelCase_ : str = batch_size
lowerCAmelCase_ : int = seq_length
lowerCAmelCase_ : Union[str, Any] = is_training
lowerCAmelCase_ : int = use_input_mask
lowerCAmelCase_ : List[Any] = use_labels
lowerCAmelCase_ : Dict = vocab_size
lowerCAmelCase_ : Union[str, Any] = hidden_size
lowerCAmelCase_ : Union[str, Any] = projection_dim
lowerCAmelCase_ : List[Any] = num_hidden_layers
lowerCAmelCase_ : Any = num_attention_heads
lowerCAmelCase_ : List[Any] = intermediate_size
lowerCAmelCase_ : Any = dropout
lowerCAmelCase_ : Optional[int] = attention_dropout
lowerCAmelCase_ : int = max_position_embeddings
lowerCAmelCase_ : Optional[int] = initializer_range
lowerCAmelCase_ : Any = scope
lowerCAmelCase_ : Tuple = bos_token_id
def UpperCAmelCase_ ( self : str ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowerCAmelCase_ : Dict = None
if self.use_input_mask:
lowerCAmelCase_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
lowerCAmelCase_ : List[Any] = input_mask.numpy()
lowerCAmelCase_ , lowerCAmelCase_ : str = input_mask.shape
lowerCAmelCase_ : Dict = np.random.randint(1 ,seq_length - 1 ,size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCAmelCase__ ):
lowerCAmelCase_ : Union[str, Any] = 1
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : List[Any] = self.get_config()
return config, input_ids, tf.convert_to_tensor(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : List[str] ) -> str:
'''simple docstring'''
return BlipTextConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,projection_dim=self.projection_dim ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,dropout=self.dropout ,attention_dropout=self.attention_dropout ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,bos_token_id=self.bos_token_id ,)
def UpperCAmelCase_ ( self : Optional[Any] ,lowerCAmelCase__ : str ,lowerCAmelCase__ : Any ,lowerCAmelCase__ : Dict ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = TFBlipTextModel(config=lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = model(lowerCAmelCase__ ,attention_mask=lowerCAmelCase__ ,training=lowerCAmelCase__ )
lowerCAmelCase_ : str = model(lowerCAmelCase__ ,training=lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
'''simple docstring'''
lowerCAmelCase_ : List[str] = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Dict = config_and_inputs
lowerCAmelCase_ : Tuple = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class __snake_case ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (TFBlipTextModel,) if is_tf_available() else ()
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
def UpperCAmelCase_ ( self : Optional[Any] ) -> str:
'''simple docstring'''
lowerCAmelCase_ : List[str] = BlipTextModelTester(self )
lowerCAmelCase_ : Tuple = ConfigTester(self ,config_class=lowerCAmelCase__ ,hidden_size=37 )
def UpperCAmelCase_ ( self : str ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason="Blip does not use inputs_embeds" )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def UpperCAmelCase_ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
pass
@slow
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Tuple = TFBlipTextModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : str=True ) -> List[Any]:
'''simple docstring'''
super().test_pt_tf_model_equivalence(allow_missing_keys=lowerCAmelCase__ )
| 659 | 0 |
def _a ( lowercase__ : int = 10_00 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = 2**power
SCREAMING_SNAKE_CASE__ : Optional[int] = str(lowercase__ )
SCREAMING_SNAKE_CASE__ : int = list(lowercase__ )
SCREAMING_SNAKE_CASE__ : Tuple = 0
for i in list_num:
sum_of_num += int(lowercase__ )
return sum_of_num
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = int(input("Enter the power of 2: ").strip())
print("2 ^ ", power, " = ", 2**power)
SCREAMING_SNAKE_CASE__ : Dict = solution(power)
print("Sum of the digits is: ", result)
| 85 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
_lowercase = logging.get_logger(__name__)
_lowercase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
_lowercase = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
_lowercase = {
'''allenai/led-base-16384''': 16384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def UpperCamelCase ( ):
lowerCAmelCase_ : Optional[int] = (
list(range(ord("!") , ord("~") + 1)) + list(range(ord("¡") , ord("¬") + 1)) + list(range(ord("®") , ord("ÿ") + 1))
)
lowerCAmelCase_ : List[Any] = bs[:]
lowerCAmelCase_ : Optional[int] = 0
for b in range(2**8):
if b not in bs:
bs.append(snake_case__)
cs.append(2**8 + n)
n += 1
lowerCAmelCase_ : Tuple = [chr(snake_case__) for n in cs]
return dict(zip(snake_case__ , snake_case__))
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : str = set()
lowerCAmelCase_ : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
lowerCAmelCase_ : Union[str, Any] = char
return pairs
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ['input_ids', 'attention_mask']
def __init__( self : int ,lowerCAmelCase__ : Tuple ,lowerCAmelCase__ : Any ,lowerCAmelCase__ : Tuple="replace" ,lowerCAmelCase__ : Optional[int]="<s>" ,lowerCAmelCase__ : Optional[int]="</s>" ,lowerCAmelCase__ : Tuple="</s>" ,lowerCAmelCase__ : int="<s>" ,lowerCAmelCase__ : Union[str, Any]="<unk>" ,lowerCAmelCase__ : str="<pad>" ,lowerCAmelCase__ : Tuple="<mask>" ,lowerCAmelCase__ : Optional[int]=False ,**lowerCAmelCase__ : Tuple ,) -> Any:
'''simple docstring'''
lowerCAmelCase_ : int = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else bos_token
lowerCAmelCase_ : int = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else eos_token
lowerCAmelCase_ : int = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else sep_token
lowerCAmelCase_ : Any = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else cls_token
lowerCAmelCase_ : Tuple = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else unk_token
lowerCAmelCase_ : Any = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ : Optional[int] = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else mask_token
super().__init__(
errors=lowerCAmelCase__ ,bos_token=lowerCAmelCase__ ,eos_token=lowerCAmelCase__ ,unk_token=lowerCAmelCase__ ,sep_token=lowerCAmelCase__ ,cls_token=lowerCAmelCase__ ,pad_token=lowerCAmelCase__ ,mask_token=lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
with open(lowerCAmelCase__ ,encoding="utf-8" ) as vocab_handle:
lowerCAmelCase_ : List[str] = json.load(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = {v: k for k, v in self.encoder.items()}
lowerCAmelCase_ : Optional[int] = errors # how to handle errors in decoding
lowerCAmelCase_ : Optional[int] = bytes_to_unicode()
lowerCAmelCase_ : str = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ ,encoding="utf-8" ) as merges_handle:
lowerCAmelCase_ : List[str] = merges_handle.read().split("\n" )[1:-1]
lowerCAmelCase_ : List[Any] = [tuple(merge.split() ) for merge in bpe_merges]
lowerCAmelCase_ : Union[str, Any] = dict(zip(lowerCAmelCase__ ,range(len(lowerCAmelCase__ ) ) ) )
lowerCAmelCase_ : Dict = {}
lowerCAmelCase_ : List[str] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCAmelCase_ : Any = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def UpperCAmelCase_ ( self : Dict ) -> Dict:
'''simple docstring'''
return len(self.encoder )
def UpperCAmelCase_ ( self : Dict ) -> str:
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : Dict ) -> Dict:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowerCAmelCase_ : Union[str, Any] = tuple(lowerCAmelCase__ )
lowerCAmelCase_ : str = get_pairs(lowerCAmelCase__ )
if not pairs:
return token
while True:
lowerCAmelCase_ : Optional[int] = min(lowerCAmelCase__ ,key=lambda lowerCAmelCase__ : self.bpe_ranks.get(lowerCAmelCase__ ,float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = bigram
lowerCAmelCase_ : Tuple = []
lowerCAmelCase_ : str = 0
while i < len(lowerCAmelCase__ ):
try:
lowerCAmelCase_ : Union[str, Any] = word.index(lowerCAmelCase__ ,lowerCAmelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase_ : List[str] = j
if word[i] == first and i < len(lowerCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase_ : Optional[int] = tuple(lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = new_word
if len(lowerCAmelCase__ ) == 1:
break
else:
lowerCAmelCase_ : Dict = get_pairs(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = " ".join(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = word
return word
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : Dict ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ : Any = []
for token in re.findall(self.pat ,lowerCAmelCase__ ):
lowerCAmelCase_ : Optional[int] = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__ ).split(" " ) )
return bpe_tokens
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
return self.encoder.get(lowerCAmelCase__ ,self.encoder.get(self.unk_token ) )
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return self.decoder.get(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : List[Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : int = "".join(lowerCAmelCase__ )
lowerCAmelCase_ : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" ,errors=self.errors )
return text
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : str ,lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase_ : Optional[int] = os.path.join(
lowerCAmelCase__ ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ : List[str] = os.path.join(
lowerCAmelCase__ ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(lowerCAmelCase__ ,"w" ,encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=lowerCAmelCase__ ,ensure_ascii=lowerCAmelCase__ ) + "\n" )
lowerCAmelCase_ : Dict = 0
with open(lowerCAmelCase__ ,"w" ,encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda lowerCAmelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
lowerCAmelCase_ : List[Any] = token_index
writer.write(" ".join(lowerCAmelCase__ ) + "\n" )
index += 1
return vocab_file, merge_file
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase_ : Union[str, Any] = [self.cls_token_id]
lowerCAmelCase_ : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ,lowerCAmelCase__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ ,token_ids_a=lowerCAmelCase__ ,already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = [self.sep_token_id]
lowerCAmelCase_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : Union[str, Any] ,lowerCAmelCase__ : Optional[int]=False ,**lowerCAmelCase__ : str ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = kwargs.pop("add_prefix_space" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__ ) > 0 and not text[0].isspace()):
lowerCAmelCase_ : List[str] = " " + text
return (text, kwargs)
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : Union[Dict[str, EncodedInput], BatchEncoding] ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : Optional[bool] = None ,) -> dict:
'''simple docstring'''
lowerCAmelCase_ : int = super()._pad(
encoded_inputs=lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding_strategy=lowerCAmelCase__ ,pad_to_multiple_of=lowerCAmelCase__ ,return_attention_mask=lowerCAmelCase__ ,)
# Load from model defaults
if return_attention_mask is None:
lowerCAmelCase_ : List[Any] = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowerCAmelCase_ : Dict = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowerCAmelCase_ : List[Any] = len(encoded_inputs["global_attention_mask"] ) != len(lowerCAmelCase__ )
if needs_to_be_padded:
lowerCAmelCase_ : Union[str, Any] = len(lowerCAmelCase__ ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowerCAmelCase_ : Optional[int] = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
lowerCAmelCase_ : List[Any] = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 659 | 0 |
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
__a :Dict = {
'susnato/ernie-m-base_pytorch': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json',
'susnato/ernie-m-large_pytorch': 'https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json',
}
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : List[str] = 'ernie_m'
_lowerCamelCase : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self : List[str] , UpperCAmelCase : int = 250002 , UpperCAmelCase : int = 768 , UpperCAmelCase : int = 12 , UpperCAmelCase : int = 12 , UpperCAmelCase : int = 3072 , UpperCAmelCase : str = "gelu" , UpperCAmelCase : float = 0.1 , UpperCAmelCase : float = 0.1 , UpperCAmelCase : int = 514 , UpperCAmelCase : float = 0.02 , UpperCAmelCase : int = 1 , UpperCAmelCase : float = 1E-05 , UpperCAmelCase : Any=None , UpperCAmelCase : Optional[int]=False , UpperCAmelCase : Any=0.0 , **UpperCAmelCase : str , ):
super().__init__(pad_token_id=UpperCAmelCase , **UpperCAmelCase )
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = initializer_range
A_ = layer_norm_eps
A_ = classifier_dropout
A_ = is_decoder
A_ = act_dropout | 86 |
import os
_lowercase = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1000}
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : List[str] = 0
lowerCAmelCase_ : Any = 0
while index < len(snake_case__) - 1:
lowerCAmelCase_ : Optional[Any] = SYMBOLS[numerals[index]]
lowerCAmelCase_ : int = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Optional[int] = ""
lowerCAmelCase_ : Tuple = num // 10_00
numerals += m_count * "M"
num %= 10_00
lowerCAmelCase_ : int = num // 1_00
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_00
lowerCAmelCase_ : int = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def UpperCamelCase ( snake_case__ = "/p089_roman.txt"):
lowerCAmelCase_ : int = 0
with open(os.path.dirname(snake_case__) + roman_numerals_filename) as filea:
lowerCAmelCase_ : List[Any] = filea.readlines()
for line in lines:
lowerCAmelCase_ : Any = line.strip()
lowerCAmelCase_ : Tuple = parse_roman_numerals(snake_case__)
lowerCAmelCase_ : List[Any] = generate_roman_numerals(snake_case__)
savings += len(snake_case__) - len(snake_case__)
return savings
if __name__ == "__main__":
print(f"{solution() = }")
| 659 | 0 |
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
_lowerCamelCase : Any = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class UpperCamelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCAmelCase__ : List[str]) ->Any:
'''simple docstring'''
super().__init__()
A__ = torchvision.models.resnetaaa(pretrained=UpperCAmelCase__)
A__ = list(model.children())[:-2]
A__ = nn.Sequential(*UpperCAmelCase__)
A__ = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds])
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Optional[int]) ->int:
'''simple docstring'''
A__ = self.pool(self.model(UpperCAmelCase__))
A__ = torch.flatten(UpperCAmelCase__ , start_dim=2)
A__ = out.transpose(1 , 2).contiguous()
return out # BxNx2048
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int) ->Tuple:
'''simple docstring'''
A__ = [json.loads(UpperCAmelCase__) for l in open(UpperCAmelCase__)]
A__ = os.path.dirname(UpperCAmelCase__)
A__ = tokenizer
A__ = labels
A__ = len(UpperCAmelCase__)
A__ = max_seq_length
A__ = transforms
def __len__( self : Optional[int]) ->Any:
'''simple docstring'''
return len(self.data)
def __getitem__( self : int , UpperCAmelCase__ : Tuple) ->str:
'''simple docstring'''
A__ = torch.LongTensor(self.tokenizer.encode(self.data[index]['''text'''] , add_special_tokens=UpperCAmelCase__))
A__ , A__ , A__ = sentence[0], sentence[1:-1], sentence[-1]
A__ = sentence[: self.max_seq_length]
A__ = torch.zeros(self.n_classes)
A__ = 1
A__ = Image.open(os.path.join(self.data_dir , self.data[index]['''img'''])).convert('''RGB''')
A__ = self.transforms(UpperCAmelCase__)
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
'''simple docstring'''
A__ = Counter()
for row in self.data:
label_freqs.update(row['''label'''])
return label_freqs
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[Any]:
"""simple docstring"""
A__ = [len(row['''sentence'''] ) for row in batch]
A__ , A__ = len(lowercase_ ), max(lowercase_ )
A__ = torch.zeros(lowercase_ , lowercase_ , dtype=torch.long )
A__ = torch.zeros(lowercase_ , lowercase_ , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(lowercase_ , lowercase_ ) ):
A__ = input_row['''sentence''']
A__ = 1
A__ = torch.stack([row['''image'''] for row in batch] )
A__ = torch.stack([row['''label'''] for row in batch] )
A__ = torch.stack([row['''image_start_token'''] for row in batch] )
A__ = torch.stack([row['''image_end_token'''] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def SCREAMING_SNAKE_CASE ( ) -> str:
"""simple docstring"""
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
"""simple docstring"""
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_77_70_44, 0.44_53_14_29, 0.40_66_10_17] , std=[0.12_22_19_94, 0.12_14_58_35, 0.14_38_04_69] , ),
] )
| 87 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def UpperCamelCase ( ):
lowerCAmelCase_ : Dict = HfArgumentParser(snake_case__)
lowerCAmelCase_ : Dict = parser.parse_args_into_dataclasses()[0]
lowerCAmelCase_ : List[Any] = TensorFlowBenchmark(args=snake_case__)
try:
lowerCAmelCase_ : str = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowerCAmelCase_ : Optional[Any] = "Arg --no_{0} is no longer used, please use --no-{0} instead."
lowerCAmelCase_ : Tuple = " ".join(str(snake_case__).split(" ")[:-1])
lowerCAmelCase_ : List[Any] = ""
lowerCAmelCase_ : Optional[Any] = eval(str(snake_case__).split(" ")[-1])
lowerCAmelCase_ : List[Any] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:])
else:
wrong_args.append(snake_case__)
if len(snake_case__) > 0:
lowerCAmelCase_ : int = full_error_msg + begin_error_msg + str(snake_case__)
raise ValueError(snake_case__)
benchmark.run()
if __name__ == "__main__":
main()
| 659 | 0 |
"""simple docstring"""
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
UpperCAmelCase = """3"""
print("""Python version:""", sys.version)
print("""OS platform:""", platform.platform())
print("""OS architecture:""", platform.machine())
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
except ImportError:
print("""Torch version:""", None)
try:
import transformers
print("""transformers version:""", transformers.__version__)
except ImportError:
print("""transformers version:""", None)
| 88 |
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
_lowercase = [
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'''
''' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'''
''' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.''',
'''The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'''
''' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'''
''' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'''
''' body.''',
'''Amnesty International releases its annual report on the death penalty. The report catalogs the use of'''
''' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'''
''' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'''
''' punishment.''',
]
_lowercase = [
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'''
''' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'''
''' had informed his Lufthansa training school of an episode of severe depression, airline says .''',
'''Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'''
''' Israel and the United States opposed the move, which could open the door to war crimes investigations against'''
''' Israelis .''',
'''Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'''
''' death . Organization claims that governments around the world are using the threat of terrorism to advance'''
''' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'''
''' sentences up by 28% .''',
]
def UpperCamelCase ( ):
lowerCAmelCase_ : Any = calculate_rouge(snake_case__ , snake_case__ , bootstrap_aggregation=snake_case__ , rouge_keys=["rouge2", "rougeL"])
assert isinstance(snake_case__ , snake_case__)
lowerCAmelCase_ : str = calculate_rouge(snake_case__ , snake_case__ , bootstrap_aggregation=snake_case__ , rouge_keys=["rouge2"])
assert (
pd.DataFrame(no_aggregation["rouge2"]).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["rouge2"]).fmeasure.mean()
)
def UpperCamelCase ( ):
lowerCAmelCase_ : str = "rougeLsum"
lowerCAmelCase_ : Any = calculate_rouge(snake_case__ , snake_case__ , newline_sep=snake_case__ , rouge_keys=[k])[k]
lowerCAmelCase_ : List[Any] = calculate_rouge(snake_case__ , snake_case__ , newline_sep=snake_case__ , rouge_keys=[k])[k]
assert score > score_no_sep
def UpperCamelCase ( ):
lowerCAmelCase_ : int = ["rouge1", "rouge2", "rougeL"]
lowerCAmelCase_ : List[Any] = calculate_rouge(snake_case__ , snake_case__ , newline_sep=snake_case__ , rouge_keys=snake_case__)
lowerCAmelCase_ : List[Any] = calculate_rouge(snake_case__ , snake_case__ , newline_sep=snake_case__ , rouge_keys=snake_case__)
assert score_sep == score_no_sep
def UpperCamelCase ( ):
lowerCAmelCase_ : List[str] = [
"Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.",
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .",
]
lowerCAmelCase_ : Dict = [
"Margot Frank, died in 1945, a month earlier than previously thought.",
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"
" the final seconds on board Flight 9525.",
]
assert calculate_rouge(snake_case__ , snake_case__ , newline_sep=snake_case__) == calculate_rouge(snake_case__ , snake_case__ , newline_sep=snake_case__)
def UpperCamelCase ( ):
lowerCAmelCase_ : Optional[int] = [
"\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" "
]
lowerCAmelCase_ : Any = [
" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."
]
lowerCAmelCase_ : Any = calculate_rouge(snake_case__ , snake_case__ , rouge_keys=["rougeLsum"] , newline_sep=snake_case__)["rougeLsum"]
lowerCAmelCase_ : Any = calculate_rouge(snake_case__ , snake_case__ , rouge_keys=["rougeLsum"])["rougeLsum"]
assert new_score > prev_score
def UpperCamelCase ( ):
lowerCAmelCase_ : int = Path("examples/seq2seq/test_data/wmt_en_ro")
lowerCAmelCase_ : Dict = calculate_rouge_path(data_dir.joinpath("test.source") , data_dir.joinpath("test.target"))
assert isinstance(snake_case__ , snake_case__)
lowerCAmelCase_ : Any = calculate_rouge_path(
data_dir.joinpath("test.source") , data_dir.joinpath("test.target") , bootstrap_aggregation=snake_case__)
assert isinstance(snake_case__ , snake_case__)
| 659 | 0 |
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
SCREAMING_SNAKE_CASE : Optional[Any] = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
SCREAMING_SNAKE_CASE : str = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
SCREAMING_SNAKE_CASE : Any = spec.loader.load_module()
SCREAMING_SNAKE_CASE : Tuple = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
SCREAMING_SNAKE_CASE : str = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
SCREAMING_SNAKE_CASE : Dict = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def UpperCamelCase_( ) -> List[Any]:
_lowercase : Optional[Any] = []
for config_class in list(CONFIG_MAPPING.values() ):
_lowercase : Any = False
# source code of `config_class`
_lowercase : str = inspect.getsource(lowerCamelCase_ )
_lowercase : Optional[Any] = _re_checkpoint.findall(lowerCamelCase_ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
_lowercase , _lowercase : List[str] = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
_lowercase : Union[str, Any] = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
_lowercase : List[Any] = True
break
_lowercase : List[Any] = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
_lowercase : Union[str, Any] = '\n'.join(sorted(lowerCamelCase_ ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 89 |
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = LEDTokenizer
UpperCamelCase_ = LEDTokenizerFast
UpperCamelCase_ = True
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
super().setUp()
lowerCAmelCase_ : Union[str, Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowerCAmelCase_ : Tuple = dict(zip(lowerCAmelCase__ ,range(len(lowerCAmelCase__ ) ) ) )
lowerCAmelCase_ : int = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowerCAmelCase_ : Union[str, Any] = {"unk_token": "<unk>"}
lowerCAmelCase_ : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase__ ) )
def UpperCAmelCase_ ( self : List[Any] ,**lowerCAmelCase__ : int ) -> Tuple:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ,**lowerCAmelCase__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname ,**lowerCAmelCase__ )
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : int ) -> List[str]:
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
return LEDTokenizer.from_pretrained("allenai/led-base-16384" )
@cached_property
def UpperCAmelCase_ ( self : List[str] ) -> Dict:
'''simple docstring'''
return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" )
@require_torch
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
lowerCAmelCase_ : int = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase_ : Any = tokenizer(lowerCAmelCase__ ,max_length=len(lowerCAmelCase__ ) ,padding=lowerCAmelCase__ ,return_tensors="pt" )
self.assertIsInstance(lowerCAmelCase__ ,lowerCAmelCase__ )
self.assertEqual((2, 9) ,batch.input_ids.shape )
self.assertEqual((2, 9) ,batch.attention_mask.shape )
lowerCAmelCase_ : int = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
@require_torch
def UpperCAmelCase_ ( self : Dict ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : int = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase_ : Optional[Any] = tokenizer(lowerCAmelCase__ ,padding=lowerCAmelCase__ ,return_tensors="pt" )
self.assertIn("input_ids" ,lowerCAmelCase__ )
self.assertIn("attention_mask" ,lowerCAmelCase__ )
self.assertNotIn("labels" ,lowerCAmelCase__ )
self.assertNotIn("decoder_attention_mask" ,lowerCAmelCase__ )
@require_torch
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : int = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase_ : Optional[int] = tokenizer(text_target=lowerCAmelCase__ ,max_length=32 ,padding="max_length" ,return_tensors="pt" )
self.assertEqual(32 ,targets["input_ids"].shape[1] )
@require_torch
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase_ : Tuple = tokenizer(
["I am a small frog" * 10_24, "I am a small frog"] ,padding=lowerCAmelCase__ ,truncation=lowerCAmelCase__ ,return_tensors="pt" )
self.assertIsInstance(lowerCAmelCase__ ,lowerCAmelCase__ )
self.assertEqual(batch.input_ids.shape ,(2, 51_22) )
@require_torch
def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : Tuple = ["A long paragraph for summarization."]
lowerCAmelCase_ : Dict = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase_ : Optional[Any] = tokenizer(lowerCAmelCase__ ,return_tensors="pt" )
lowerCAmelCase_ : Optional[Any] = tokenizer(text_target=lowerCAmelCase__ ,return_tensors="pt" )
lowerCAmelCase_ : List[str] = inputs["input_ids"]
lowerCAmelCase_ : Any = targets["input_ids"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def UpperCAmelCase_ ( self : str ) -> Tuple:
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase_ : str = ["Summary of the text.", "Another summary."]
lowerCAmelCase_ : str = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
lowerCAmelCase_ : List[Any] = tokenizer(lowerCAmelCase__ ,padding=lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = [[0] * len(lowerCAmelCase__ ) for x in encoded_output["input_ids"]]
lowerCAmelCase_ : Optional[int] = tokenizer.pad(lowerCAmelCase__ )
self.assertSequenceEqual(outputs["global_attention_mask"] ,lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase_ : Dict = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ ,**lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = self.tokenizer_class.from_pretrained(lowerCAmelCase__ ,**lowerCAmelCase__ )
lowerCAmelCase_ : Dict = "A, <mask> AllenNLP sentence."
lowerCAmelCase_ : Tuple = tokenizer_r.encode_plus(lowerCAmelCase__ ,add_special_tokens=lowerCAmelCase__ ,return_token_type_ids=lowerCAmelCase__ )
lowerCAmelCase_ : int = tokenizer_p.encode_plus(lowerCAmelCase__ ,add_special_tokens=lowerCAmelCase__ ,return_token_type_ids=lowerCAmelCase__ )
self.assertEqual(sum(tokens_r["token_type_ids"] ) ,sum(tokens_p["token_type_ids"] ) )
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) ,sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) ,)
lowerCAmelCase_ : Any = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
lowerCAmelCase_ : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
self.assertSequenceEqual(tokens_p["input_ids"] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase__ ,["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
lowerCAmelCase__ ,["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 659 | 0 |
'''simple docstring'''
from bisect import bisect
from itertools import accumulate
def _snake_case ( A , A , A , A ) -> Dict:
lowerCAmelCase__ = sorted(zip(A , A ) , key=lambda A : x[0] / x[1] , reverse=A )
lowerCAmelCase__ , lowerCAmelCase__ = [i[0] for i in r], [i[1] for i in r]
lowerCAmelCase__ = list(accumulate(A ) )
lowerCAmelCase__ = bisect(A , A )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod() | 90 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''Visual-Attention-Network/van-base''': (
'''https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'''
),
}
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = 'van'
def __init__( self : List[str] ,lowerCAmelCase__ : int=2_24 ,lowerCAmelCase__ : Optional[int]=3 ,lowerCAmelCase__ : Dict=[7, 3, 3, 3] ,lowerCAmelCase__ : List[str]=[4, 2, 2, 2] ,lowerCAmelCase__ : Union[str, Any]=[64, 1_28, 3_20, 5_12] ,lowerCAmelCase__ : Union[str, Any]=[3, 3, 12, 3] ,lowerCAmelCase__ : Any=[8, 8, 4, 4] ,lowerCAmelCase__ : Optional[int]="gelu" ,lowerCAmelCase__ : List[str]=0.02 ,lowerCAmelCase__ : Optional[Any]=1e-6 ,lowerCAmelCase__ : Dict=1e-2 ,lowerCAmelCase__ : Union[str, Any]=0.0 ,lowerCAmelCase__ : Optional[Any]=0.0 ,**lowerCAmelCase__ : List[str] ,) -> Tuple:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = image_size
lowerCAmelCase_ : List[str] = num_channels
lowerCAmelCase_ : str = patch_sizes
lowerCAmelCase_ : Optional[Any] = strides
lowerCAmelCase_ : List[Any] = hidden_sizes
lowerCAmelCase_ : int = depths
lowerCAmelCase_ : int = mlp_ratios
lowerCAmelCase_ : str = hidden_act
lowerCAmelCase_ : List[str] = initializer_range
lowerCAmelCase_ : Dict = layer_norm_eps
lowerCAmelCase_ : str = layer_scale_init_value
lowerCAmelCase_ : Tuple = drop_path_rate
lowerCAmelCase_ : Dict = dropout_rate
| 659 | 0 |
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: str = RobertaTokenizer
_lowerCamelCase: int = RobertaTokenizerFast
_lowerCamelCase: Tuple = True
_lowerCamelCase: Optional[int] = {'''cls_token''': '''<s>'''}
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
A = dict(zip(A_ ,range(len(A_ ) ) ) )
A = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
A = {'unk_token': '<unk>'}
A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(A_ ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(A_ ) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,**A_ : Dict ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ,**A_ : Optional[int] ) -> Any:
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Optional[int] ) -> str:
A = 'lower newer'
A = 'lower newer'
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
A = self.tokenizer_class(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
A = 'lower newer'
A = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
A = tokenizer.tokenize(A_ ) # , add_prefix_space=True)
self.assertListEqual(A_ ,A_ )
A = tokens + [tokenizer.unk_token]
A = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) ,A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
A = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' ,add_special_tokens=A_ ) ,[0, 3_1414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' ,add_special_tokens=A_ ) ,[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] ,)
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
A = self.tokenizer_class.from_pretrained('roberta-base' )
A = tokenizer.encode('sequence builders' ,add_special_tokens=A_ )
A = tokenizer.encode('multi-sequence build' ,add_special_tokens=A_ )
A = tokenizer.encode(
'sequence builders' ,add_special_tokens=A_ ,add_prefix_space=A_ )
A = tokenizer.encode(
'sequence builders' ,'multi-sequence build' ,add_special_tokens=A_ ,add_prefix_space=A_ )
A = tokenizer.build_inputs_with_special_tokens(A_ )
A = tokenizer.build_inputs_with_special_tokens(A_ ,A_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _SCREAMING_SNAKE_CASE ( self : str ) -> Any:
A = self.get_tokenizer()
A = 'Encode this sequence.'
A = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
A = tokenizer.encode(A_ ,add_special_tokens=A_ ,add_prefix_space=A_ )
A = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(A_ ,A_ )
A = tokenizer.encode(A_ ,add_special_tokens=A_ ,add_prefix_space=A_ )
A = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(A_ ,A_ )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
A = tokenizer.encode(A_ ,add_special_tokens=A_ )
A = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(A_ ,A_ )
# Testing spaces after special tokens
A = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(A_ ,lstrip=A_ ,rstrip=A_ )} ) # mask token has a left space
A = tokenizer.convert_tokens_to_ids(A_ )
A = 'Encode <mask> sequence'
A = 'Encode <mask>sequence'
A = tokenizer.encode(A_ )
A = encoded.index(A_ )
A = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(A_ ,A_ )
A = tokenizer.encode(A_ )
A = encoded.index(A_ )
A = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
pass
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A = self.rust_tokenizer_class.from_pretrained(A_ ,**A_ )
A = self.tokenizer_class.from_pretrained(A_ ,**A_ )
A = 'A, <mask> AllenNLP sentence.'
A = tokenizer_r.encode_plus(A_ ,add_special_tokens=A_ ,return_token_type_ids=A_ )
A = tokenizer_p.encode_plus(A_ ,add_special_tokens=A_ ,return_token_type_ids=A_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) ,sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) ,sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) ,)
A = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
A = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] ,[0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] ,[0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
A_ ,['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
A_ ,['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
for trim_offsets, add_prefix_space in itertools.product([True, False] ,repeat=2 ):
A = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname ,use_fast=A_ ,add_prefix_space=A_ ,trim_offsets=A_ )
A = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
A = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] ,A_ )
self.assertEqual(post_processor_state['add_prefix_space'] ,A_ )
self.assertEqual(post_processor_state['trim_offsets'] ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
A = F'{text_of_1_token} {text_of_1_token}'
A = self.rust_tokenizer_class.from_pretrained(
A_ ,use_fast=A_ ,add_prefix_space=A_ ,trim_offsets=A_ )
A = tokenizer_r(A_ ,return_offsets_mapping=A_ ,add_special_tokens=A_ )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(A_ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(A_ ) + 1, len(A_ ) + 1 + len(A_ )) ,)
A = self.rust_tokenizer_class.from_pretrained(
A_ ,use_fast=A_ ,add_prefix_space=A_ ,trim_offsets=A_ )
A = tokenizer_r(A_ ,return_offsets_mapping=A_ ,add_special_tokens=A_ )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(A_ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(A_ ) + 1, len(A_ ) + 1 + len(A_ )) ,)
A = self.rust_tokenizer_class.from_pretrained(
A_ ,use_fast=A_ ,add_prefix_space=A_ ,trim_offsets=A_ )
A = tokenizer_r(A_ ,return_offsets_mapping=A_ ,add_special_tokens=A_ )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(A_ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(A_ ), len(A_ ) + 1 + len(A_ )) ,)
A = self.rust_tokenizer_class.from_pretrained(
A_ ,use_fast=A_ ,add_prefix_space=A_ ,trim_offsets=A_ )
A = tokenizer_r(A_ ,return_offsets_mapping=A_ ,add_special_tokens=A_ )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(A_ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(A_ ), len(A_ ) + 1 + len(A_ )) ,)
A = F' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
A = self.rust_tokenizer_class.from_pretrained(
A_ ,use_fast=A_ ,add_prefix_space=A_ ,trim_offsets=A_ )
A = tokenizer_r(A_ ,return_offsets_mapping=A_ ,add_special_tokens=A_ )
self.assertEqual(encoding.offset_mapping[0] ,(1, 1 + len(A_ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(A_ ) + 1, 1 + len(A_ ) + 1 + len(A_ )) ,)
A = self.rust_tokenizer_class.from_pretrained(
A_ ,use_fast=A_ ,add_prefix_space=A_ ,trim_offsets=A_ )
A = tokenizer_r(A_ ,return_offsets_mapping=A_ ,add_special_tokens=A_ )
self.assertEqual(encoding.offset_mapping[0] ,(0, 1 + len(A_ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(A_ ), 1 + len(A_ ) + 1 + len(A_ )) ,)
A = self.rust_tokenizer_class.from_pretrained(
A_ ,use_fast=A_ ,add_prefix_space=A_ ,trim_offsets=A_ )
A = tokenizer_r(A_ ,return_offsets_mapping=A_ ,add_special_tokens=A_ )
self.assertEqual(encoding.offset_mapping[0] ,(0, 1 + len(A_ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(A_ ), 1 + len(A_ ) + 1 + len(A_ )) ,) | 91 |
from math import factorial
def UpperCamelCase ( snake_case__ , snake_case__):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError("Please enter positive integers for n and k where n >= k")
return factorial(snake_case__) // (factorial(snake_case__) * factorial(n - k))
if __name__ == "__main__":
print(
'''The number of five-card hands possible from a standard''',
f"fifty-two card deck is: {combinations(52, 5)}\n",
)
print(
'''If a class of 40 students must be arranged into groups of''',
f"4 for group projects, there are {combinations(40, 4)} ways",
'''to arrange them.\n''',
)
print(
'''If 10 teams are competing in a Formula One race, there''',
f"are {combinations(10, 3)} ways that first, second and",
'''third place can be awarded.''',
)
| 659 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.linear_k""": """encoder.layers.*.self_attn.linear_k""",
"""self_attn.linear_v""": """encoder.layers.*.self_attn.linear_v""",
"""self_attn.linear_q""": """encoder.layers.*.self_attn.linear_q""",
"""self_attn.pos_bias_u""": """encoder.layers.*.self_attn.pos_bias_u""",
"""self_attn.pos_bias_v""": """encoder.layers.*.self_attn.pos_bias_v""",
"""self_attn.linear_out""": """encoder.layers.*.self_attn.linear_out""",
"""self_attn.linear_pos""": """encoder.layers.*.self_attn.linear_pos""",
"""self_attn.rotary_emb""": """encoder.embed_positions""",
"""self_attn_layer_norm""": """encoder.layers.*.self_attn_layer_norm""",
"""conv_module.pointwise_conv1""": """encoder.layers.*.conv_module.pointwise_conv1""",
"""conv_module.pointwise_conv2""": """encoder.layers.*.conv_module.pointwise_conv2""",
"""conv_module.depthwise_conv""": """encoder.layers.*.conv_module.depthwise_conv""",
"""conv_module.batch_norm""": """encoder.layers.*.conv_module.batch_norm""",
"""conv_module.layer_norm""": """encoder.layers.*.conv_module.layer_norm""",
"""ffn1.w_1""": """encoder.layers.*.ffn1.intermediate_dense""",
"""ffn1.w_2""": """encoder.layers.*.ffn1.output_dense""",
"""ffn1.layer_norm""": """encoder.layers.*.ffn1_layer_norm""",
"""ffn2.w_1""": """encoder.layers.*.ffn2.intermediate_dense""",
"""ffn2.w_2""": """encoder.layers.*.ffn2.output_dense""",
"""ffn2.layer_norm""": """encoder.layers.*.ffn2_layer_norm""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
UpperCamelCase_ = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ) -> str:
for attribute in key.split('''.''' ):
lowercase : Tuple =getattr(__magic_name__ , __magic_name__ )
if weight_type is not None:
lowercase : Optional[int] =getattr(__magic_name__ , __magic_name__ ).shape
else:
lowercase : List[Any] =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowercase : Any =value
elif weight_type == "weight_g":
lowercase : List[Any] =value
elif weight_type == "weight_v":
lowercase : Union[str, Any] =value
elif weight_type == "bias":
lowercase : Tuple =value
elif weight_type == "running_mean":
lowercase : Union[str, Any] =value
elif weight_type == "running_var":
lowercase : str =value
elif weight_type == "num_batches_tracked":
lowercase : Tuple =value
elif weight_type == "inv_freq":
lowercase : Optional[Any] =value
else:
lowercase : Tuple =value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def _lowerCAmelCase ( __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : str ) -> Union[str, Any]:
lowercase : Optional[int] =[]
lowercase : Tuple =fairseq_model.state_dict()
lowercase : List[Any] =hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
lowercase : Tuple =False
if "conv_layers" in name:
load_conv_layer(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , hf_model.config.feat_extract_norm == '''group''' , )
lowercase : List[Any] =True
else:
for key, mapped_key in MAPPING.items():
lowercase : Optional[int] ='''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowercase : Union[str, Any] =True
if "*" in mapped_key:
lowercase : Optional[int] =name.split(__magic_name__ )[0].split('''.''' )[-2]
lowercase : List[str] =mapped_key.replace('''*''' , __magic_name__ )
if "pos_bias_u" in name:
lowercase : Optional[Any] =None
elif "pos_bias_v" in name:
lowercase : Union[str, Any] =None
elif "weight_g" in name:
lowercase : Any ='''weight_g'''
elif "weight_v" in name:
lowercase : Tuple ='''weight_v'''
elif "bias" in name:
lowercase : Optional[int] ='''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase : Optional[int] ='''weight'''
elif "running_mean" in name:
lowercase : Union[str, Any] ='''running_mean'''
elif "inv_freq" in name:
lowercase : Any ='''inv_freq'''
elif "running_var" in name:
lowercase : Tuple ='''running_var'''
elif "num_batches_tracked" in name:
lowercase : Dict ='''num_batches_tracked'''
else:
lowercase : str =None
set_recursively(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
continue
if not is_used:
unused_weights.append(__magic_name__ )
logger.warning(f'''Unused weights: {unused_weights}''' )
def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Tuple ) -> int:
lowercase : Optional[Any] =full_name.split('''conv_layers.''' )[-1]
lowercase : Any =name.split('''.''' )
lowercase : List[str] =int(items[0] )
lowercase : Union[str, Any] =int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
lowercase : Union[str, Any] =value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
lowercase : Optional[Any] =value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
lowercase : Optional[int] =value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
lowercase : str =value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__magic_name__ )
@torch.no_grad()
def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : Tuple=None , __magic_name__ : Optional[Any]=None , __magic_name__ : Union[str, Any]=True ) -> Union[str, Any]:
if config_path is not None:
lowercase : Optional[Any] =WavaVecaConformerConfig.from_pretrained(__magic_name__ , hidden_act='''swish''' )
else:
lowercase : Optional[int] =WavaVecaConformerConfig()
if "rope" in checkpoint_path:
lowercase : Dict ='''rotary'''
if is_finetuned:
if dict_path:
lowercase : Optional[Any] =Dictionary.load(__magic_name__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowercase : str =target_dict.pad_index
lowercase : Union[str, Any] =target_dict.bos_index
lowercase : Any =target_dict.eos_index
lowercase : Tuple =len(target_dict.symbols )
lowercase : str =os.path.join(__magic_name__ , '''vocab.json''' )
if not os.path.isdir(__magic_name__ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__magic_name__ ) )
return
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
lowercase : Dict =target_dict.indices
# fairseq has the <pad> and <s> switched
lowercase : str =0
lowercase : List[Any] =1
with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(__magic_name__ , __magic_name__ )
lowercase : List[str] =WavaVecaCTCTokenizer(
__magic_name__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__magic_name__ , )
lowercase : Optional[int] =True if config.feat_extract_norm == '''layer''' else False
lowercase : str =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__magic_name__ , return_attention_mask=__magic_name__ , )
lowercase : Tuple =WavaVecaProcessor(feature_extractor=__magic_name__ , tokenizer=__magic_name__ )
processor.save_pretrained(__magic_name__ )
lowercase : str =WavaVecaConformerForCTC(__magic_name__ )
else:
lowercase : Tuple =WavaVecaConformerForPreTraining(__magic_name__ )
if is_finetuned:
lowercase , lowercase , lowercase : Optional[int] =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
lowercase : Dict =argparse.Namespace(task='''audio_pretraining''' )
lowercase : Optional[int] =fairseq.tasks.setup_task(__magic_name__ )
lowercase , lowercase , lowercase : str =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__magic_name__ )
lowercase : List[Any] =model[0].eval()
recursively_load_weights(__magic_name__ , __magic_name__ , not is_finetuned )
hf_wavavec.save_pretrained(__magic_name__ )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
UpperCamelCase_ = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 92 |
import argparse
import json
from tqdm import tqdm
def UpperCamelCase ( ):
lowerCAmelCase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src_path" , type=snake_case__ , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , )
parser.add_argument(
"--evaluation_set" , type=snake_case__ , help="where to store parsed evaluation_set file" , )
parser.add_argument(
"--gold_data_path" , type=snake_case__ , help="where to store parsed gold_data_path file" , )
lowerCAmelCase_ : Dict = parser.parse_args()
with open(args.src_path , "r") as src_file, open(args.evaluation_set , "w") as eval_file, open(
args.gold_data_path , "w") as gold_file:
lowerCAmelCase_ : Optional[int] = json.load(snake_case__)
for dpr_record in tqdm(snake_case__):
lowerCAmelCase_ : str = dpr_record["question"]
lowerCAmelCase_ : Dict = [context["title"] for context in dpr_record["positive_ctxs"]]
eval_file.write(question + "\n")
gold_file.write("\t".join(snake_case__) + "\n")
if __name__ == "__main__":
main()
| 659 | 0 |
"""simple docstring"""
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class _lowerCAmelCase ( a ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = SMALL_MODEL_IDENTIFIER
lowerCAmelCase__ :List[Any] = 'pt'
lowerCAmelCase__ :Optional[Any] = 'tf'
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Dict = TFAutoModel.from_pretrained(self.test_model , from_pt=__UpperCAmelCase )
model_tf.save_pretrained(__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = 'mock_framework'
# Framework provided - return whatever the user provides
lowerCAmelCase__ :Dict = FeaturesManager.determine_framework(self.test_model , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__UpperCAmelCase )
lowerCAmelCase__ :int = FeaturesManager.determine_framework(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = FeaturesManager.determine_framework(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = FeaturesManager.determine_framework(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = FeaturesManager.determine_framework(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ :Union[str, Any] = FeaturesManager.determine_framework(__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = MagicMock(return_value=__UpperCAmelCase )
with patch('transformers.onnx.features.is_tf_available' , __UpperCAmelCase ):
lowerCAmelCase__ :List[Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCAmelCase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
lowerCAmelCase__ :List[Any] = MagicMock(return_value=__UpperCAmelCase )
with patch('transformers.onnx.features.is_torch_available' , __UpperCAmelCase ):
lowerCAmelCase__ :Union[str, Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCAmelCase , self.framework_tf )
# Both in environment -> use PyTorch
lowerCAmelCase__ :Any = MagicMock(return_value=__UpperCAmelCase )
lowerCAmelCase__ :int = MagicMock(return_value=__UpperCAmelCase )
with patch('transformers.onnx.features.is_tf_available' , __UpperCAmelCase ), patch(
'transformers.onnx.features.is_torch_available' , __UpperCAmelCase ):
lowerCAmelCase__ :str = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCAmelCase , self.framework_pt )
# Both not in environment -> raise error
lowerCAmelCase__ :Optional[Any] = MagicMock(return_value=__UpperCAmelCase )
lowerCAmelCase__ :Tuple = MagicMock(return_value=__UpperCAmelCase )
with patch('transformers.onnx.features.is_tf_available' , __UpperCAmelCase ), patch(
'transformers.onnx.features.is_torch_available' , __UpperCAmelCase ):
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ :Union[str, Any] = FeaturesManager.determine_framework(self.test_model )
| 93 |
from collections.abc import Sequence
def UpperCamelCase ( snake_case__ = None):
if nums is None or not nums:
raise ValueError("Input sequence should not be empty")
lowerCAmelCase_ : Dict = nums[0]
for i in range(1 , len(snake_case__)):
lowerCAmelCase_ : Optional[int] = nums[i]
lowerCAmelCase_ : Optional[int] = max(snake_case__ , ans + num , snake_case__)
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
_lowercase = int(input('''Enter number of elements : ''').strip())
_lowercase = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array))
| 659 | 0 |
'''simple docstring'''
import os
from collections.abc import Iterator
def lowercase_ ( __A : str = "." ) -> Iterator[str]:
"""simple docstring"""
for dir_path, dir_names, filenames in os.walk(__A ):
lowercase : List[Any] =[d for d in dir_names if d != '''scripts''' and d[0] not in '''._''']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(__A )[1] in (".py", ".ipynb"):
yield os.path.join(__A , __A ).lstrip('''./''' )
def lowercase_ ( __A : Dict ) -> int:
"""simple docstring"""
return F'{i * " "}*' if i else "\n##"
def lowercase_ ( __A : str , __A : str ) -> str:
"""simple docstring"""
lowercase : Any =old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(__A ) or old_parts[i] != new_part) and new_part:
print(F'{md_prefix(__A )} {new_part.replace("_" , " " ).title()}' )
return new_path
def lowercase_ ( __A : str = "." ) -> None:
"""simple docstring"""
lowercase : List[str] =''''''
for filepath in sorted(good_file_paths(__A ) ):
lowercase , lowercase : List[Any] =os.path.split(__A )
if filepath != old_path:
lowercase : List[str] =print_path(__A , __A )
lowercase : List[Any] =(filepath.count(os.sep ) + 1) if filepath else 0
lowercase : str =F'{filepath}/{filename}'.replace(''' ''' , '''%20''' )
lowercase : List[str] =os.path.splitext(filename.replace('''_''' , ''' ''' ).title() )[0]
print(F'{md_prefix(__A )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md('.')
| 94 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
_lowercase = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 659 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = torch.device('''cpu''')
def snake_case ( ):
UpperCAmelCase_ : str = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ : str = Image.open(requests.get(A__ ,stream=A__ ).raw )
return im
def snake_case ( A__ ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02] )
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : Tuple = dct.pop(A__ )
UpperCAmelCase_ : Optional[Any] = val
def snake_case ( A__ ):
UpperCAmelCase_ : List[str] = []
for k in state_dict.keys():
UpperCAmelCase_ : Union[str, Any] = k
if ".pwconv" in k:
UpperCAmelCase_ : Dict = k_new.replace(".pwconv" ,".point_wise_conv" )
if ".dwconv" in k:
UpperCAmelCase_ : Any = k_new.replace(".dwconv" ,".depth_wise_conv" )
if ".Proj." in k:
UpperCAmelCase_ : Dict = k_new.replace(".Proj." ,".proj." )
if "patch_embed" in k_new:
UpperCAmelCase_ : Tuple = k_new.replace("patch_embed" ,"swiftformer.patch_embed.patch_embedding" )
if "network" in k_new:
UpperCAmelCase_ : List[Any] = k_new.split("." )
if ls[2].isdigit():
UpperCAmelCase_ : Tuple = "swiftformer.encoder.network." + ls[1] + ".blocks." + ls[2] + "." + ".".join(ls[3:] )
else:
UpperCAmelCase_ : Optional[Any] = k_new.replace("network" ,"swiftformer.encoder.network" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : Optional[int] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
UpperCAmelCase_ : Optional[Any] = 10_00
UpperCAmelCase_ : str = "huggingface/label-files"
UpperCAmelCase_ : str = "imagenet-1k-id2label.json"
UpperCAmelCase_ : List[str] = json.load(open(hf_hub_download(A__ ,A__ ,repo_type="dataset" ) ,"r" ) )
UpperCAmelCase_ : Tuple = {int(A__ ): v for k, v in idalabel.items()}
UpperCAmelCase_ : List[Any] = idalabel
UpperCAmelCase_ : Optional[Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
UpperCAmelCase_ : Tuple = [3, 3, 6, 4]
UpperCAmelCase_ : str = [48, 56, 1_12, 2_20]
elif swiftformer_name == "swiftformer_s":
UpperCAmelCase_ : Optional[Any] = [3, 3, 9, 6]
UpperCAmelCase_ : Optional[Any] = [48, 64, 1_68, 2_24]
elif swiftformer_name == "swiftformer_l1":
UpperCAmelCase_ : int = [4, 3, 10, 5]
UpperCAmelCase_ : Union[str, Any] = [48, 96, 1_92, 3_84]
elif swiftformer_name == "swiftformer_l3":
UpperCAmelCase_ : Dict = [4, 4, 12, 6]
UpperCAmelCase_ : Optional[int] = [64, 1_28, 3_20, 5_12]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("https" ):
UpperCAmelCase_ : List[Any] = torch.hub.load_state_dict_from_url(A__ ,map_location="cpu" ,check_hash=A__ )
else:
UpperCAmelCase_ : Any = torch.load(A__ ,map_location="cpu" )
UpperCAmelCase_ : List[str] = checkpoint
UpperCAmelCase_ : Dict = create_rename_keys(A__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(A__ ,A__ ,A__ )
# load HuggingFace model
UpperCAmelCase_ : Optional[int] = SwiftFormerForImageClassification(A__ ).eval()
hf_model.load_state_dict(A__ )
# prepare test inputs
UpperCAmelCase_ : Tuple = prepare_img()
UpperCAmelCase_ : int = ViTImageProcessor.from_pretrained("preprocessor_config" )
UpperCAmelCase_ : int = processor(images=A__ ,return_tensors="pt" )
# compare outputs from both models
UpperCAmelCase_ : List[Any] = get_expected_output(A__ )
UpperCAmelCase_ : int = hf_model(inputs["pixel_values"] ).logits
assert hf_logits.shape == torch.Size([1, 10_00] )
assert torch.allclose(hf_logits[0, 0:5] ,A__ ,atol=1e-3 )
Path(A__ ).mkdir(exist_ok=A__ )
print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(A__ )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swiftformer_name''',
default='''swiftformer_xs''',
choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''],
type=str,
help='''Name of the SwiftFormer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''./converted_outputs/''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''')
lowerCamelCase_ = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 95 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_lowercase = '''src/diffusers'''
_lowercase = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
_lowercase = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
_lowercase = spec.loader.load_module()
def UpperCamelCase ( snake_case__ , snake_case__):
return line.startswith(snake_case__) or len(snake_case__) <= 1 or re.search(R"^\s*\)(\s*->.*:|:)\s*$" , snake_case__) is not None
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Tuple = object_name.split(".")
lowerCAmelCase_ : Union[str, Any] = 0
# First let's find the module where our object lives.
lowerCAmelCase_ : Union[str, Any] = parts[i]
while i < len(snake_case__) and not os.path.isfile(os.path.join(snake_case__ , F'''{module}.py''')):
i += 1
if i < len(snake_case__):
lowerCAmelCase_ : Dict = os.path.join(snake_case__ , parts[i])
if i >= len(snake_case__):
raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''')
with open(os.path.join(snake_case__ , F'''{module}.py''') , "r" , encoding="utf-8" , newline="\n") as f:
lowerCAmelCase_ : Optional[Any] = f.readlines()
# Now let's find the class / func in the code!
lowerCAmelCase_ : Union[str, Any] = ""
lowerCAmelCase_ : int = 0
for name in parts[i + 1 :]:
while (
line_index < len(snake_case__) and re.search(RF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index]) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(snake_case__):
raise ValueError(F''' {object_name} does not match any function or class in {module}.''')
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
lowerCAmelCase_ : Union[str, Any] = line_index
while line_index < len(snake_case__) and _should_continue(lines[line_index] , snake_case__):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1]) <= 1:
line_index -= 1
lowerCAmelCase_ : List[str] = lines[start_index:line_index]
return "".join(snake_case__)
_lowercase = re.compile(r'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
_lowercase = re.compile(r'''^\s*(\S+)->(\S+)(\s+.*|$)''')
_lowercase = re.compile(r'''<FILL\s+[^>]*>''')
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Any = code.split("\n")
lowerCAmelCase_ : Any = 0
while idx < len(snake_case__) and len(lines[idx]) == 0:
idx += 1
if idx < len(snake_case__):
return re.search(R"^(\s*)\S" , lines[idx]).groups()[0]
return ""
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Dict = len(get_indent(snake_case__)) > 0
if has_indent:
lowerCAmelCase_ : Dict = F'''class Bla:\n{code}'''
lowerCAmelCase_ : Optional[int] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 , preview=snake_case__)
lowerCAmelCase_ : Optional[Any] = black.format_str(snake_case__ , mode=snake_case__)
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = style_docstrings_in_code(snake_case__)
return result[len("class Bla:\n") :] if has_indent else result
def UpperCamelCase ( snake_case__ , snake_case__=False):
with open(snake_case__ , "r" , encoding="utf-8" , newline="\n") as f:
lowerCAmelCase_ : Tuple = f.readlines()
lowerCAmelCase_ : Tuple = []
lowerCAmelCase_ : Union[str, Any] = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(snake_case__):
lowerCAmelCase_ : Optional[int] = _re_copy_warning.search(lines[line_index])
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = search.groups()
lowerCAmelCase_ : int = find_code_in_diffusers(snake_case__)
lowerCAmelCase_ : Dict = get_indent(snake_case__)
lowerCAmelCase_ : Union[str, Any] = line_index + 1 if indent == theoretical_indent else line_index + 2
lowerCAmelCase_ : str = theoretical_indent
lowerCAmelCase_ : Union[str, Any] = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
lowerCAmelCase_ : Optional[int] = True
while line_index < len(snake_case__) and should_continue:
line_index += 1
if line_index >= len(snake_case__):
break
lowerCAmelCase_ : Dict = lines[line_index]
lowerCAmelCase_ : List[str] = _should_continue(snake_case__ , snake_case__) and re.search(F'''^{indent}# End copy''' , snake_case__) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1]) <= 1:
line_index -= 1
lowerCAmelCase_ : Dict = lines[start_index:line_index]
lowerCAmelCase_ : Optional[int] = "".join(snake_case__)
# Remove any nested `Copied from` comments to avoid circular copies
lowerCAmelCase_ : List[Any] = [line for line in theoretical_code.split("\n") if _re_copy_warning.search(snake_case__) is None]
lowerCAmelCase_ : Optional[Any] = "\n".join(snake_case__)
# Before comparing, use the `replace_pattern` on the original code.
if len(snake_case__) > 0:
lowerCAmelCase_ : List[str] = replace_pattern.replace("with" , "").split(",")
lowerCAmelCase_ : Tuple = [_re_replace_pattern.search(snake_case__) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = pattern.groups()
lowerCAmelCase_ : int = re.sub(snake_case__ , snake_case__ , snake_case__)
if option.strip() == "all-casing":
lowerCAmelCase_ : List[str] = re.sub(obja.lower() , obja.lower() , snake_case__)
lowerCAmelCase_ : int = re.sub(obja.upper() , obja.upper() , snake_case__)
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
lowerCAmelCase_ : List[Any] = blackify(lines[start_index - 1] + theoretical_code)
lowerCAmelCase_ : Union[str, Any] = theoretical_code[len(lines[start_index - 1]) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index])
if overwrite:
lowerCAmelCase_ : List[Any] = lines[:start_index] + [theoretical_code] + lines[line_index:]
lowerCAmelCase_ : Union[str, Any] = start_index + 1
if overwrite and len(snake_case__) > 0:
# Warn the user a file has been modified.
print(F'''Detected changes, rewriting {filename}.''')
with open(snake_case__ , "w" , encoding="utf-8" , newline="\n") as f:
f.writelines(snake_case__)
return diffs
def UpperCamelCase ( snake_case__ = False):
lowerCAmelCase_ : Tuple = glob.glob(os.path.join(snake_case__ , "**/*.py") , recursive=snake_case__)
lowerCAmelCase_ : int = []
for filename in all_files:
lowerCAmelCase_ : Union[str, Any] = is_copy_consistent(snake_case__ , snake_case__)
diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(snake_case__) > 0:
lowerCAmelCase_ : Optional[Any] = "\n".join(snake_case__)
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.")
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
_lowercase = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 659 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__lowerCamelCase = {
'configuration_longt5': ['LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongT5Config', 'LongT5OnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongT5EncoderModel',
'LongT5ForConditionalGeneration',
'LongT5Model',
'LongT5PreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'FlaxLongT5ForConditionalGeneration',
'FlaxLongT5Model',
'FlaxLongT5PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 96 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''microsoft/swinv2-tiny-patch4-window8-256''': (
'''https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'''
),
}
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = 'swinv2'
UpperCamelCase_ = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : List[Any] ,lowerCAmelCase__ : Optional[int]=2_24 ,lowerCAmelCase__ : Dict=4 ,lowerCAmelCase__ : Dict=3 ,lowerCAmelCase__ : List[Any]=96 ,lowerCAmelCase__ : Optional[Any]=[2, 2, 6, 2] ,lowerCAmelCase__ : Optional[Any]=[3, 6, 12, 24] ,lowerCAmelCase__ : Optional[int]=7 ,lowerCAmelCase__ : Dict=4.0 ,lowerCAmelCase__ : Dict=True ,lowerCAmelCase__ : str=0.0 ,lowerCAmelCase__ : Tuple=0.0 ,lowerCAmelCase__ : str=0.1 ,lowerCAmelCase__ : List[str]="gelu" ,lowerCAmelCase__ : Union[str, Any]=False ,lowerCAmelCase__ : Dict=0.02 ,lowerCAmelCase__ : int=1e-5 ,lowerCAmelCase__ : List[str]=32 ,**lowerCAmelCase__ : Tuple ,) -> List[str]:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = image_size
lowerCAmelCase_ : List[Any] = patch_size
lowerCAmelCase_ : Dict = num_channels
lowerCAmelCase_ : Optional[int] = embed_dim
lowerCAmelCase_ : Optional[Any] = depths
lowerCAmelCase_ : Any = len(lowerCAmelCase__ )
lowerCAmelCase_ : str = num_heads
lowerCAmelCase_ : List[str] = window_size
lowerCAmelCase_ : List[str] = mlp_ratio
lowerCAmelCase_ : Dict = qkv_bias
lowerCAmelCase_ : str = hidden_dropout_prob
lowerCAmelCase_ : str = attention_probs_dropout_prob
lowerCAmelCase_ : Union[str, Any] = drop_path_rate
lowerCAmelCase_ : List[Any] = hidden_act
lowerCAmelCase_ : Any = use_absolute_embeddings
lowerCAmelCase_ : List[str] = layer_norm_eps
lowerCAmelCase_ : int = initializer_range
lowerCAmelCase_ : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase_ : Tuple = int(embed_dim * 2 ** (len(lowerCAmelCase__ ) - 1) )
lowerCAmelCase_ : str = (0, 0, 0, 0)
| 659 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'xlm-mlm-en-2048': 'https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json',
'xlm-mlm-ende-1024': 'https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json',
'xlm-mlm-enfr-1024': 'https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json',
'xlm-mlm-enro-1024': 'https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json',
'xlm-mlm-tlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json',
'xlm-mlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json',
'xlm-clm-enfr-1024': 'https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json',
'xlm-clm-ende-1024': 'https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json',
'xlm-mlm-17-1280': 'https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json',
'xlm-mlm-100-1280': 'https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json',
}
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :List[Any] = 'xlm'
a :Dict = {
'hidden_size': 'emb_dim',
'num_attention_heads': 'n_heads',
'num_hidden_layers': 'n_layers',
'n_words': 'vocab_size', # For backward compatibility
}
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Dict=3_0_1_4_5 , SCREAMING_SNAKE_CASE_ : Dict=2_0_4_8 , SCREAMING_SNAKE_CASE_ : Optional[int]=1_2 , SCREAMING_SNAKE_CASE_ : Optional[int]=1_6 , SCREAMING_SNAKE_CASE_ : Dict=0.1 , SCREAMING_SNAKE_CASE_ : str=0.1 , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : Any=False , SCREAMING_SNAKE_CASE_ : str=False , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , SCREAMING_SNAKE_CASE_ : Optional[Any]=1 , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : Dict=5_1_2 , SCREAMING_SNAKE_CASE_ : Optional[int]=2_0_4_8**-0.5 , SCREAMING_SNAKE_CASE_ : Dict=1e-12 , SCREAMING_SNAKE_CASE_ : Any=0.02 , SCREAMING_SNAKE_CASE_ : str=0 , SCREAMING_SNAKE_CASE_ : List[Any]=1 , SCREAMING_SNAKE_CASE_ : List[Any]=2 , SCREAMING_SNAKE_CASE_ : int=3 , SCREAMING_SNAKE_CASE_ : Dict=5 , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : List[Any]="first" , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : List[Any]=None , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : Optional[Any]=5 , SCREAMING_SNAKE_CASE_ : List[str]=5 , SCREAMING_SNAKE_CASE_ : Any=0 , SCREAMING_SNAKE_CASE_ : str=0 , SCREAMING_SNAKE_CASE_ : Optional[Any]=2 , SCREAMING_SNAKE_CASE_ : Tuple=0 , **SCREAMING_SNAKE_CASE_ : Dict , ) -> List[Any]:
lowercase_ = vocab_size
lowercase_ = emb_dim
lowercase_ = n_layers
lowercase_ = n_heads
lowercase_ = dropout
lowercase_ = attention_dropout
lowercase_ = gelu_activation
lowercase_ = sinusoidal_embeddings
lowercase_ = causal
lowercase_ = asm
lowercase_ = n_langs
lowercase_ = use_lang_emb
lowercase_ = layer_norm_eps
lowercase_ = bos_index
lowercase_ = eos_index
lowercase_ = pad_index
lowercase_ = unk_index
lowercase_ = mask_index
lowercase_ = is_encoder
lowercase_ = max_position_embeddings
lowercase_ = embed_init_std
lowercase_ = init_std
lowercase_ = summary_type
lowercase_ = summary_use_proj
lowercase_ = summary_activation
lowercase_ = summary_proj_to_labels
lowercase_ = summary_first_dropout
lowercase_ = start_n_top
lowercase_ = end_n_top
lowercase_ = mask_token_id
lowercase_ = lang_id
if "n_words" in kwargs:
lowercase_ = kwargs['''n_words''']
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
@property
def _lowercase ( self : int ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowercase_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase_ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 97 |
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
_lowercase = logging.get_logger(__name__)
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = ['input_features', 'attention_mask']
def __init__( self : Optional[Any] ,lowerCAmelCase__ : Any=80 ,lowerCAmelCase__ : Optional[Any]=1_60_00 ,lowerCAmelCase__ : List[str]=0.0 ,lowerCAmelCase__ : Tuple=10 ,lowerCAmelCase__ : Optional[Any]=25 ,lowerCAmelCase__ : Any="hamming_window" ,lowerCAmelCase__ : List[str]=32_768.0 ,lowerCAmelCase__ : Union[str, Any]=0.97 ,lowerCAmelCase__ : Any=1.0 ,lowerCAmelCase__ : str=True ,lowerCAmelCase__ : int=True ,lowerCAmelCase__ : Tuple=False ,**lowerCAmelCase__ : Optional[int] ,) -> Optional[Any]:
'''simple docstring'''
super().__init__(feature_size=lowerCAmelCase__ ,sampling_rate=lowerCAmelCase__ ,padding_value=lowerCAmelCase__ ,**lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = feature_size
lowerCAmelCase_ : List[Any] = sampling_rate
lowerCAmelCase_ : Union[str, Any] = padding_value
lowerCAmelCase_ : str = hop_length
lowerCAmelCase_ : str = win_length
lowerCAmelCase_ : str = frame_signal_scale
lowerCAmelCase_ : Any = preemphasis_coeff
lowerCAmelCase_ : Optional[Any] = mel_floor
lowerCAmelCase_ : List[str] = normalize_means
lowerCAmelCase_ : Optional[Any] = normalize_vars
lowerCAmelCase_ : Dict = win_function
lowerCAmelCase_ : List[Any] = return_attention_mask
lowerCAmelCase_ : Tuple = win_length * sampling_rate // 10_00
lowerCAmelCase_ : str = hop_length * sampling_rate // 10_00
lowerCAmelCase_ : Dict = optimal_fft_length(self.sample_size )
lowerCAmelCase_ : Optional[int] = (self.n_fft // 2) + 1
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : np.array ) -> np.ndarray:
'''simple docstring'''
if self.win_function == "hamming_window":
lowerCAmelCase_ : int = window_function(window_length=self.sample_size ,name=self.win_function ,periodic=lowerCAmelCase__ )
else:
lowerCAmelCase_ : Tuple = window_function(window_length=self.sample_size ,name=self.win_function )
lowerCAmelCase_ : List[str] = mel_filter_bank(
num_frequency_bins=self.n_freqs ,num_mel_filters=self.feature_size ,min_frequency=0.0 ,max_frequency=self.sampling_rate / 2.0 ,sampling_rate=self.sampling_rate ,)
lowerCAmelCase_ : Any = spectrogram(
one_waveform * self.frame_signal_scale ,window=lowerCAmelCase__ ,frame_length=self.sample_size ,hop_length=self.sample_stride ,fft_length=self.n_fft ,center=lowerCAmelCase__ ,preemphasis=self.preemphasis_coeff ,mel_filters=lowerCAmelCase__ ,mel_floor=self.mel_floor ,log_mel="log" ,)
return msfc_features.T
def UpperCAmelCase_ ( self : int ,lowerCAmelCase__ : List[Any] ,lowerCAmelCase__ : Optional[Any] ,lowerCAmelCase__ : Tuple ) -> Optional[Any]:
'''simple docstring'''
if self.normalize_means:
lowerCAmelCase_ : Optional[int] = x[:input_length].mean(axis=0 )
lowerCAmelCase_ : List[str] = np.subtract(lowerCAmelCase__ ,lowerCAmelCase__ )
if self.normalize_vars:
lowerCAmelCase_ : Optional[Any] = x[:input_length].std(axis=0 )
lowerCAmelCase_ : Tuple = np.divide(lowerCAmelCase__ ,lowerCAmelCase__ )
if input_length < x.shape[0]:
lowerCAmelCase_ : int = padding_value
# make sure array is in float32
lowerCAmelCase_ : Any = x.astype(np.floataa )
return x
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : List[np.ndarray] ,lowerCAmelCase__ : Optional[np.ndarray] = None ) -> List[np.ndarray]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(lowerCAmelCase__ ,lowerCAmelCase__ ,self.padding_value ) for x, n in zip(lowerCAmelCase__ ,lowerCAmelCase__ )]
def __call__( self : int ,lowerCAmelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,lowerCAmelCase__ : Union[bool, str, PaddingStrategy] = False ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : bool = False ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : Optional[bool] = None ,lowerCAmelCase__ : Optional[Union[str, TensorType]] = None ,lowerCAmelCase__ : Optional[int] = None ,**lowerCAmelCase__ : Union[str, Any] ,) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowerCAmelCase_ : List[Any] = isinstance(lowerCAmelCase__ ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
lowerCAmelCase_ : str = is_batched_numpy or (
isinstance(lowerCAmelCase__ ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase_ : Tuple = [np.asarray(lowerCAmelCase__ ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase__ ,np.ndarray ):
lowerCAmelCase_ : int = np.asarray(lowerCAmelCase__ ,dtype=np.floataa )
elif isinstance(lowerCAmelCase__ ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase_ : Union[str, Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase_ : Optional[int] = [raw_speech]
# extract fbank features
lowerCAmelCase_ : Dict = [self._extract_mfsc_features(lowerCAmelCase__ ) for one_waveform in raw_speech]
# convert into correct format for padding
lowerCAmelCase_ : int = BatchFeature({"input_features": features} )
lowerCAmelCase_ : Union[str, Any] = self.pad(
lowerCAmelCase__ ,padding=lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,truncation=lowerCAmelCase__ ,pad_to_multiple_of=lowerCAmelCase__ ,return_attention_mask=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
# make sure list is in array format
lowerCAmelCase_ : Optional[Any] = padded_inputs.get("input_features" )
if isinstance(input_features[0] ,lowerCAmelCase__ ):
lowerCAmelCase_ : Optional[int] = [np.asarray(lowerCAmelCase__ ,dtype=np.floataa ) for feature in input_features]
lowerCAmelCase_ : List[Any] = padded_inputs.get("attention_mask" )
if attention_mask is not None:
lowerCAmelCase_ : Dict = [np.asarray(lowerCAmelCase__ ,dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
lowerCAmelCase_ : Dict = (
np.array(lowerCAmelCase__ ,dtype=np.intaa )
if self._get_padding_strategies(lowerCAmelCase__ ,max_length=lowerCAmelCase__ ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
lowerCAmelCase_ : List[str] = self.normalize(
padded_inputs["input_features"] ,attention_mask=lowerCAmelCase__ )
if return_tensors is not None:
lowerCAmelCase_ : Dict = padded_inputs.convert_to_tensors(lowerCAmelCase__ )
return padded_inputs
| 659 | 0 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Any = ['image_processor', 'tokenizer']
_snake_case : Optional[Any] = 'ChineseCLIPImageProcessor'
_snake_case : List[str] = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self : str , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : List[Any]=None , **lowerCAmelCase__ : Any ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowerCAmelCase__ , )
_UpperCamelCase = kwargs.pop('''feature_extractor''' )
_UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = self.image_processor
def __call__( self : Optional[Any] , lowerCAmelCase__ : Optional[int]=None , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : List[str]=None , **lowerCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
_UpperCamelCase = self.tokenizer(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
if images is not None:
_UpperCamelCase = self.image_processor(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
if text is not None and images is not None:
_UpperCamelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase__ ) , tensor_type=lowerCAmelCase__ )
def snake_case__ ( self : Optional[int] , *lowerCAmelCase__ : str , **lowerCAmelCase__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case__ ( self : int , *lowerCAmelCase__ : Union[str, Any] , **lowerCAmelCase__ : int ) -> str:
'''simple docstring'''
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def snake_case__ ( self : str ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.tokenizer.model_input_names
_UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def snake_case__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowerCAmelCase__ , )
return self.image_processor_class
| 98 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_lowercase = 10
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
for i in range(snake_case__ , snake_case__):
if array[i] == target:
return i
return -1
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : List[str] = 0
lowerCAmelCase_ : Tuple = len(snake_case__)
while left <= right:
if right - left < precision:
return lin_search(snake_case__ , snake_case__ , snake_case__ , snake_case__)
lowerCAmelCase_ : List[str] = (left + right) // 3 + 1
lowerCAmelCase_ : Tuple = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
lowerCAmelCase_ : str = one_third - 1
elif array[two_third] < target:
lowerCAmelCase_ : Any = two_third + 1
else:
lowerCAmelCase_ : List[str] = one_third + 1
lowerCAmelCase_ : Tuple = two_third - 1
else:
return -1
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
if left < right:
if right - left < precision:
return lin_search(snake_case__ , snake_case__ , snake_case__ , snake_case__)
lowerCAmelCase_ : Dict = (left + right) // 3 + 1
lowerCAmelCase_ : List[Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(snake_case__ , one_third - 1 , snake_case__ , snake_case__)
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , snake_case__ , snake_case__ , snake_case__)
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , snake_case__ , snake_case__)
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase = input('''Enter numbers separated by comma:\n''').strip()
_lowercase = [int(item.strip()) for item in user_input.split(''',''')]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
_lowercase = int(input('''Enter the number to be found in the list:\n''').strip())
_lowercase = ite_ternary_search(collection, target)
_lowercase = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f"Iterative search: {target} found at positions: {resulta}")
print(f"Recursive search: {target} found at positions: {resulta}")
else:
print('''Not found''')
| 659 | 0 |
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self ):
__a = """laion/clap-htsat-unfused"""
__a = tempfile.mkdtemp()
def snake_case_ ( self , **__A ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **__A )
def snake_case_ ( self , **__A ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **__A )
def snake_case_ ( self ):
shutil.rmtree(self.tmpdirname )
def snake_case_ ( self ):
__a = self.get_tokenizer()
__a = self.get_feature_extractor()
__a = ClapProcessor(tokenizer=__A , feature_extractor=__A )
processor.save_pretrained(self.tmpdirname )
__a = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __A )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __A )
def snake_case_ ( self ):
__a = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
__a = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__a = self.get_feature_extractor(do_normalize=__A , padding_value=1.0 )
__a = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __A )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __A )
def snake_case_ ( self ):
__a = self.get_feature_extractor()
__a = self.get_tokenizer()
__a = ClapProcessor(tokenizer=__A , feature_extractor=__A )
__a = floats_list((3, 1000) )
__a = feature_extractor(__A , return_tensors="""np""" )
__a = processor(audios=__A , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def snake_case_ ( self ):
__a = self.get_feature_extractor()
__a = self.get_tokenizer()
__a = ClapProcessor(tokenizer=__A , feature_extractor=__A )
__a = """This is a test string"""
__a = processor(text=__A )
__a = tokenizer(__A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case_ ( self ):
__a = self.get_feature_extractor()
__a = self.get_tokenizer()
__a = ClapProcessor(tokenizer=__A , feature_extractor=__A )
__a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__a = processor.batch_decode(__A )
__a = tokenizer.batch_decode(__A )
self.assertListEqual(__A , __A )
def snake_case_ ( self ):
__a = self.get_feature_extractor()
__a = self.get_tokenizer()
__a = ClapProcessor(tokenizer=__A , feature_extractor=__A )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 99 |
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
_lowercase = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
_lowercase = {
'''facebook/blenderbot_small-90M''': 512,
}
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = BlenderbotSmallTokenizer
def __init__( self : Optional[int] ,lowerCAmelCase__ : Optional[int]=None ,lowerCAmelCase__ : Union[str, Any]=None ,lowerCAmelCase__ : Any="<|endoftext|>" ,lowerCAmelCase__ : int="<|endoftext|>" ,lowerCAmelCase__ : Optional[Any]="<|endoftext|>" ,lowerCAmelCase__ : Union[str, Any]=False ,lowerCAmelCase__ : Optional[Any]=True ,**lowerCAmelCase__ : Union[str, Any] ,) -> str:
'''simple docstring'''
super().__init__(
ByteLevelBPETokenizer(
vocab=lowerCAmelCase__ ,merges=lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ ,trim_offsets=lowerCAmelCase__ ,) ,bos_token=lowerCAmelCase__ ,eos_token=lowerCAmelCase__ ,unk_token=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
lowerCAmelCase_ : Dict = add_prefix_space
def UpperCAmelCase_ ( self : int ,lowerCAmelCase__ : List[str] ,lowerCAmelCase__ : Tuple=None ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : str = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCAmelCase_ ( self : int ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowerCAmelCase_ : Dict = [self.sep_token_id]
lowerCAmelCase_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 659 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : int = StableUnCLIPImgaImgPipeline
lowerCamelCase__ : Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
lowerCamelCase__ : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCamelCase__ : List[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCamelCase__ : int = frozenset([] )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 32
SCREAMING_SNAKE_CASE__ = embedder_hidden_size
# image encoding components
SCREAMING_SNAKE_CASE__ = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=A_ , projection_dim=A_ , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = StableUnCLIPImageNormalizer(embedding_dim=A_ )
SCREAMING_SNAKE_CASE__ = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=A_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=A_ , layers_per_block=1 , upcast_attention=A_ , use_linear_projection=A_ , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.00085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=A_ , steps_offset=1 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = AutoencoderKL()
SCREAMING_SNAKE_CASE__ = {
# image encoding components
'''feature_extractor''': feature_extractor,
'''image_encoder''': image_encoder.eval(),
# image noising components
'''image_normalizer''': image_normalizer.eval(),
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder.eval(),
'''unet''': unet.eval(),
'''scheduler''': scheduler,
'''vae''': vae.eval(),
}
return components
def lowercase_ ( self , A_ , A_=0 , A_=True ):
'''simple docstring'''
if str(A_ ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__ = torch.manual_seed(A_ )
else:
SCREAMING_SNAKE_CASE__ = torch.Generator(device=A_ ).manual_seed(A_ )
SCREAMING_SNAKE_CASE__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(A_ ) ).to(A_ )
if pil_image:
SCREAMING_SNAKE_CASE__ = input_image * 0.5 + 0.5
SCREAMING_SNAKE_CASE__ = input_image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
SCREAMING_SNAKE_CASE__ = DiffusionPipeline.numpy_to_pil(A_ )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = StableUnCLIPImgaImgPipeline(**A_ )
SCREAMING_SNAKE_CASE__ = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(A_ )
inputs.update({'''image_embeds''': None} )
SCREAMING_SNAKE_CASE__ = sd_pipe(**A_ ).images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = torch_device in ['''cpu''', '''mps''']
self._test_attention_slicing_forward_pass(test_max_difference=A_ )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=A_ )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowercase_ ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=A_ )
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
SCREAMING_SNAKE_CASE__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''' )
SCREAMING_SNAKE_CASE__ = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-l-img2img''' , torch_dtype=torch.floataa )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE__ = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ = pipe(A_ , '''anime turle''' , generator=A_ , output_type='''np''' )
SCREAMING_SNAKE_CASE__ = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(A_ , A_ )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
SCREAMING_SNAKE_CASE__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''' )
SCREAMING_SNAKE_CASE__ = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE__ = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ = pipe(A_ , '''anime turle''' , generator=A_ , output_type='''np''' )
SCREAMING_SNAKE_CASE__ = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(A_ , A_ )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE__ = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE__ = pipe(
A_ , '''anime turtle''' , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE__ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 100 |
from collections.abc import Generator
from math import sin
def UpperCamelCase ( snake_case__):
if len(snake_case__) != 32:
raise ValueError("Input must be of length 32")
lowerCAmelCase_ : Tuple = b""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def UpperCamelCase ( snake_case__):
if i < 0:
raise ValueError("Input must be non-negative")
lowerCAmelCase_ : List[str] = format(snake_case__ , "08x")[-8:]
lowerCAmelCase_ : Any = b""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8")
return little_endian_hex
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Union[str, Any] = b""
for char in message:
bit_string += format(snake_case__ , "08b").encode("utf-8")
lowerCAmelCase_ : Optional[int] = format(len(snake_case__) , "064b").encode("utf-8")
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(snake_case__) % 5_12 != 4_48:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:]) + to_little_endian(start_len[:32])
return bit_string
def UpperCamelCase ( snake_case__):
if len(snake_case__) % 5_12 != 0:
raise ValueError("Input must have length that's a multiple of 512")
for pos in range(0 , len(snake_case__) , 5_12):
lowerCAmelCase_ : List[str] = bit_string[pos : pos + 5_12]
lowerCAmelCase_ : Union[str, Any] = []
for i in range(0 , 5_12 , 32):
block_words.append(int(to_little_endian(block[i : i + 32]) , 2))
yield block_words
def UpperCamelCase ( snake_case__):
if i < 0:
raise ValueError("Input must be non-negative")
lowerCAmelCase_ : Dict = format(snake_case__ , "032b")
lowerCAmelCase_ : str = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(snake_case__ , 2)
def UpperCamelCase ( snake_case__ , snake_case__):
return (a + b) % 2**32
def UpperCamelCase ( snake_case__ , snake_case__):
if i < 0:
raise ValueError("Input must be non-negative")
if shift < 0:
raise ValueError("Shift must be non-negative")
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Optional[Any] = preprocess(snake_case__)
lowerCAmelCase_ : Optional[Any] = [int(2**32 * abs(sin(i + 1))) for i in range(64)]
# Starting states
lowerCAmelCase_ : List[str] = 0x67_45_23_01
lowerCAmelCase_ : Union[str, Any] = 0xef_cd_ab_89
lowerCAmelCase_ : List[Any] = 0x98_ba_dc_fe
lowerCAmelCase_ : Tuple = 0x10_32_54_76
lowerCAmelCase_ : Any = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(snake_case__):
lowerCAmelCase_ : Optional[int] = aa
lowerCAmelCase_ : List[str] = ba
lowerCAmelCase_ : Any = ca
lowerCAmelCase_ : Union[str, Any] = da
# Hash current chunk
for i in range(64):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
lowerCAmelCase_ : Any = d ^ (b & (c ^ d))
lowerCAmelCase_ : Dict = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
lowerCAmelCase_ : Any = c ^ (d & (b ^ c))
lowerCAmelCase_ : List[str] = (5 * i + 1) % 16
elif i <= 47:
lowerCAmelCase_ : int = b ^ c ^ d
lowerCAmelCase_ : Optional[Any] = (3 * i + 5) % 16
else:
lowerCAmelCase_ : List[Any] = c ^ (b | not_aa(snake_case__))
lowerCAmelCase_ : List[Any] = (7 * i) % 16
lowerCAmelCase_ : Optional[Any] = (f + a + added_consts[i] + block_words[g]) % 2**32
lowerCAmelCase_ : Optional[Any] = d
lowerCAmelCase_ : Dict = c
lowerCAmelCase_ : List[str] = b
lowerCAmelCase_ : Any = sum_aa(snake_case__ , left_rotate_aa(snake_case__ , shift_amounts[i]))
# Add hashed chunk to running total
lowerCAmelCase_ : Dict = sum_aa(snake_case__ , snake_case__)
lowerCAmelCase_ : str = sum_aa(snake_case__ , snake_case__)
lowerCAmelCase_ : Optional[int] = sum_aa(snake_case__ , snake_case__)
lowerCAmelCase_ : int = sum_aa(snake_case__ , snake_case__)
lowerCAmelCase_ : Union[str, Any] = reformat_hex(snake_case__) + reformat_hex(snake_case__) + reformat_hex(snake_case__) + reformat_hex(snake_case__)
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 659 | 0 |
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowercase :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=3_0 , lowerCAmelCase__=2 , lowerCAmelCase__=3 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=3_2 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=3_7 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=1_0 , lowerCAmelCase__=0.02 , lowerCAmelCase__=None , lowerCAmelCase__=2 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = parent
SCREAMING_SNAKE_CASE_ : Dict = batch_size
SCREAMING_SNAKE_CASE_ : Optional[int] = image_size
SCREAMING_SNAKE_CASE_ : Tuple = patch_size
SCREAMING_SNAKE_CASE_ : str = num_channels
SCREAMING_SNAKE_CASE_ : Optional[int] = is_training
SCREAMING_SNAKE_CASE_ : Optional[int] = use_labels
SCREAMING_SNAKE_CASE_ : Tuple = hidden_size
SCREAMING_SNAKE_CASE_ : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Any = intermediate_size
SCREAMING_SNAKE_CASE_ : str = hidden_act
SCREAMING_SNAKE_CASE_ : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Dict = initializer_range
SCREAMING_SNAKE_CASE_ : List[Any] = scope
SCREAMING_SNAKE_CASE_ : List[Any] = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE_ : Dict = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_patches + 1
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = ViTModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE_ : str = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = ViTForMaskedImageModeling(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
SCREAMING_SNAKE_CASE_ : Optional[int] = 1
SCREAMING_SNAKE_CASE_ : Dict = ViTForMaskedImageModeling(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ : List[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.type_sequence_label_size
SCREAMING_SNAKE_CASE_ : List[Any] = ViTForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 1
SCREAMING_SNAKE_CASE_ : Any = ViTForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE_ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ : Any = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE_ : Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_UpperCAmelCase = (
{"""feature-extraction""": ViTModel, """image-classification""": ViTForImageClassification}
if is_torch_available()
else {}
)
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = ViTModelTester(self )
SCREAMING_SNAKE_CASE_ : int = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=3_7 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Dict = model_class(lowerCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE_ : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : str = model_class(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ : Any = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ : int = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Optional[int] = ViTModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def a__ ( ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224' ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = self.default_image_processor
SCREAMING_SNAKE_CASE_ : List[Any] = prepare_img()
SCREAMING_SNAKE_CASE_ : Dict = image_processor(images=lowerCAmelCase__ , return_tensors='pt' ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(**lowerCAmelCase__ )
# verify the logits
SCREAMING_SNAKE_CASE_ : int = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.tensor([-0.2_744, 0.8_215, -0.0_836] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = ViTModel.from_pretrained('facebook/dino-vits8' ).to(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = ViTImageProcessor.from_pretrained('facebook/dino-vits8' , size=4_8_0 )
SCREAMING_SNAKE_CASE_ : Optional[int] = prepare_img()
SCREAMING_SNAKE_CASE_ : Dict = image_processor(images=lowerCAmelCase__ , return_tensors='pt' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = inputs.pixel_values.to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Optional[int] = model(lowerCAmelCase__ , interpolate_pos_encoding=lowerCAmelCase__ )
# verify the logits
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.Size((1, 3_6_0_1, 3_8_4) )
self.assertEqual(outputs.last_hidden_state.shape , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = torch.tensor(
[[4.2_340, 4.3_906, -6.6_692], [4.5_463, 1.8_928, -6.7_257], [4.4_429, 0.8_496, -5.8_585]] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ViTModel.from_pretrained('facebook/dino-vits8' , torch_dtype=torch.floataa , device_map='auto' )
SCREAMING_SNAKE_CASE_ : Tuple = self.default_image_processor
SCREAMING_SNAKE_CASE_ : Tuple = prepare_img()
SCREAMING_SNAKE_CASE_ : List[Any] = image_processor(images=lowerCAmelCase__ , return_tensors='pt' )
SCREAMING_SNAKE_CASE_ : Optional[int] = inputs.pixel_values.to(lowerCAmelCase__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(lowerCAmelCase__ )
| 101 |
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('''1.6'''):
_lowercase = True
from torch.cuda.amp import autocast
_lowercase = logging.getLogger(__name__)
@dataclass
class __snake_case :
"""simple docstring"""
UpperCamelCase_ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
UpperCamelCase_ = field(
default=snake_case__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
UpperCamelCase_ = field(
default=snake_case__ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
UpperCamelCase_ = field(
default=snake_case__ , metadata={'help': 'Whether to log verbose messages or not.'} , )
UpperCamelCase_ = field(
default=2.0 , metadata={'help': 'Maximum temperature for gumbel softmax.'} )
UpperCamelCase_ = field(
default=0.5 , metadata={'help': 'Minimum temperature for gumbel softmax.'} )
UpperCamelCase_ = field(
default=0.99_99_95 , metadata={'help': 'Decay of gumbel temperature during training.'} )
def UpperCamelCase ( snake_case__ , snake_case__):
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout)] , )
lowerCAmelCase_ : str = logging.WARNING
if model_args.verbose_logging:
lowerCAmelCase_ : int = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank):
lowerCAmelCase_ : Any = logging.INFO
logger.setLevel(snake_case__)
@dataclass
class __snake_case :
"""simple docstring"""
UpperCamelCase_ = field(
default=snake_case__ , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
UpperCamelCase_ = field(
default=snake_case__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
UpperCamelCase_ = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
UpperCamelCase_ = field(
default='validation' , metadata={
'help': (
'The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
UpperCamelCase_ = field(
default='file' , metadata={'help': 'Column in the dataset that contains speech file path. Defaults to \'file\''} , )
UpperCamelCase_ = field(
default=snake_case__ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
UpperCamelCase_ = field(
default=1 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
UpperCamelCase_ = field(
default=snake_case__ , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
UpperCamelCase_ = field(
default=20.0 , metadata={'help': 'Filter audio files that are longer than `max_duration_in_seconds` seconds'} )
@dataclass
class __snake_case :
"""simple docstring"""
UpperCamelCase_ = 42
UpperCamelCase_ = 42
UpperCamelCase_ = "longest"
UpperCamelCase_ = None
UpperCamelCase_ = None
def __call__( self : str ,lowerCAmelCase__ : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
lowerCAmelCase_ : Tuple = self.feature_extractor.pad(
lowerCAmelCase__ ,max_length=self.max_length ,padding=self.padding ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors="pt" ,)
lowerCAmelCase_ : Union[str, Any] = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1] )
lowerCAmelCase_ : List[str] = batch["input_values"].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
lowerCAmelCase_ : Tuple = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1 ) ).to(
torch.long )
lowerCAmelCase_ : Optional[Any] = torch.zeros(
(batch_size, mask_indices_seq_length) ,dtype=torch.long ,device=batch["input_values"].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
lowerCAmelCase_ : Tuple = 1
lowerCAmelCase_ : int = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
lowerCAmelCase_ : str = _compute_mask_indices(
(batch_size, mask_indices_seq_length) ,self.model.config.mask_time_prob ,self.model.config.mask_time_length ,attention_mask=lowerCAmelCase__ ,min_masks=2 ,)
return batch
class __snake_case ( snake_case__ ):
"""simple docstring"""
def __init__( self : List[str] ,*lowerCAmelCase__ : Optional[int] ,lowerCAmelCase__ : Tuple=1 ,lowerCAmelCase__ : Optional[int]=0 ,lowerCAmelCase__ : Optional[Any]=1.0 ,**lowerCAmelCase__ : Any ) -> str:
'''simple docstring'''
super().__init__(*lowerCAmelCase__ ,**lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : int = max_gumbel_temp
lowerCAmelCase_ : Union[str, Any] = min_gumbel_temp
lowerCAmelCase_ : str = gumbel_temp_decay
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : nn.Module ,lowerCAmelCase__ : Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor:
'''simple docstring'''
model.train()
lowerCAmelCase_ : str = self._prepare_inputs(lowerCAmelCase__ )
if self.use_amp:
with autocast():
lowerCAmelCase_ : List[Any] = self.compute_loss(lowerCAmelCase__ ,lowerCAmelCase__ )
else:
lowerCAmelCase_ : List[Any] = self.compute_loss(lowerCAmelCase__ ,lowerCAmelCase__ )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
lowerCAmelCase_ : List[Any] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
lowerCAmelCase_ : Optional[Any] = loss.sum() / (inputs["mask_time_indices"]).sum()
else:
raise ValueError(f'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
lowerCAmelCase_ : int = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(lowerCAmelCase__ ).backward()
elif self.use_apex:
with amp.scale_loss(lowerCAmelCase__ ,self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(lowerCAmelCase__ )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step ,self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step ,self.min_gumbel_temp ) )
return loss.detach()
def UpperCamelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCAmelCase_ : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Dict = parser.parse_args_into_dataclasses()
configure_logger(snake_case__ , snake_case__)
# Downloading and loading a dataset from the hub.
lowerCAmelCase_ : List[str] = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir)
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
lowerCAmelCase_ : Any = DatasetDict()
lowerCAmelCase_ : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[:{data_args.validation_split_percentage}%]''' , cache_dir=model_args.cache_dir , )
lowerCAmelCase_ : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[{data_args.validation_split_percentage}%:]''' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
lowerCAmelCase_ : Union[str, Any] = DatasetDict()
lowerCAmelCase_ : int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , )
lowerCAmelCase_ : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}''' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
lowerCAmelCase_ : Dict = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=snake_case__)
def prepare_dataset(snake_case__):
# check that all files have the correct sampling rate
lowerCAmelCase_ , lowerCAmelCase_ : str = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate)
return batch
# load audio files into numpy arrays
lowerCAmelCase_ : int = datasets.map(
snake_case__ , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names)
# filter audio files that are too long
lowerCAmelCase_ : int = vectorized_datasets.filter(
lambda snake_case__: len(data["speech"]) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate))
def normalize(snake_case__):
return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate)
# normalize and transform to `BatchFeatures`
lowerCAmelCase_ : str = vectorized_datasets.map(
snake_case__ , batched=snake_case__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
lowerCAmelCase_ : Optional[Any] = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm='layer'")
lowerCAmelCase_ : Dict = WavaVecaForPreTraining(snake_case__)
lowerCAmelCase_ : int = DataCollatorForWavaVecaPretraining(model=snake_case__ , feature_extractor=snake_case__)
lowerCAmelCase_ : List[Any] = WavaVecaPreTrainer(
model=snake_case__ , data_collator=snake_case__ , args=snake_case__ , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=snake_case__ , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 659 | 0 |
"""simple docstring"""
from __future__ import annotations
__magic_name__ : Any = """Muhammad Umer Farooq"""
__magic_name__ : int = """MIT"""
__magic_name__ : List[Any] = """1.0.0"""
__magic_name__ : Optional[Any] = """Muhammad Umer Farooq"""
__magic_name__ : Tuple = """contact@muhammadumerfarooq.me"""
__magic_name__ : Tuple = """Alpha"""
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , _A ):
'''simple docstring'''
super().__init__()
UpperCamelCase : list[str] = []
UpperCamelCase : Dict = domain
def _a ( self , _A , _A ):
'''simple docstring'''
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
UpperCamelCase : Any = parse.urljoin(self.domain , _A )
self.urls.append(_A )
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
return ".".join(get_sub_domain_name(SCREAMING_SNAKE_CASE ).split(""".""" )[-2:] )
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
return parse.urlparse(SCREAMING_SNAKE_CASE ).netloc
def UpperCamelCase (SCREAMING_SNAKE_CASE = "https://github.com" ):
UpperCamelCase : Dict = get_domain_name(SCREAMING_SNAKE_CASE )
# Initialize the parser
UpperCamelCase : int = Parser(SCREAMING_SNAKE_CASE )
try:
# Open URL
UpperCamelCase : str = requests.get(SCREAMING_SNAKE_CASE )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
UpperCamelCase : List[str] = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
UpperCamelCase : Dict = requests.get(SCREAMING_SNAKE_CASE )
# Get the valid email.
UpperCamelCase : List[Any] = re.findall("""[a-zA-Z0-9]+@""" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(SCREAMING_SNAKE_CASE )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__magic_name__ : str = emails_from_url("""https://github.com""")
print(f'''{len(emails)} emails found:''')
print("""\n""".join(sorted(emails)))
| 102 |
from __future__ import annotations
from collections.abc import Callable
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = 1_00 , ):
lowerCAmelCase_ : Any = x_start
lowerCAmelCase_ : Optional[Any] = fnc(snake_case__)
lowerCAmelCase_ : Union[str, Any] = 0.0
for _ in range(snake_case__):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
lowerCAmelCase_ : Any = (x_end - x_start) / steps + xa
lowerCAmelCase_ : Dict = fnc(snake_case__)
area += abs(fxa + fxa) * (xa - xa) / 2
# Increment step
lowerCAmelCase_ : int = xa
lowerCAmelCase_ : str = fxa
return area
if __name__ == "__main__":
def UpperCamelCase ( snake_case__):
return x**3 + x**2
print('''f(x) = x^3 + x^2''')
print('''The area between the curve, x = -5, x = 5 and the x axis is:''')
_lowercase = 10
while i <= 100000:
print(f"with {i} steps: {trapezoidal_area(f, -5, 5, i)}")
i *= 10
| 659 | 0 |
"""simple docstring"""
import datasets
from .evaluate import evaluate
snake_case = '''\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
'''
snake_case = '''
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
'''
snake_case = '''
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the CUAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
\'aupr\': Area Under the Precision-Recall curve
\'prec_at_80_recall\': Precision at 80% recall
\'prec_at_90_recall\': Precision at 90% recall
Examples:
>>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> cuad_metric = datasets.load_metric("cuad")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION,_KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': {
'''id''': datasets.Value('''string''' ),
'''prediction_text''': datasets.features.Sequence(datasets.Value('''string''' ) ),
},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ) , codebase_urls=['''https://www.atticusprojectai.org/cuad'''] , reference_urls=['''https://www.atticusprojectai.org/cuad'''] , )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] ):
"""simple docstring"""
_snake_case = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
_snake_case = [
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
_snake_case = evaluate(dataset=__lowerCamelCase , predictions=__lowerCamelCase )
return score
| 103 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = StableDiffusionLDMaDPipeline
UpperCamelCase_ = TEXT_TO_IMAGE_PARAMS
UpperCamelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase_ ( self : Tuple ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase_ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") ,up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") ,cross_attention_dim=32 ,)
lowerCAmelCase_ : Any = DDIMScheduler(
beta_start=0.00_085 ,beta_end=0.012 ,beta_schedule="scaled_linear" ,clip_sample=lowerCAmelCase__ ,set_alpha_to_one=lowerCAmelCase__ ,)
torch.manual_seed(0 )
lowerCAmelCase_ : str = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=6 ,out_channels=6 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,)
torch.manual_seed(0 )
lowerCAmelCase_ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,)
lowerCAmelCase_ : Optional[int] = CLIPTextModel(lowerCAmelCase__ )
lowerCAmelCase_ : Dict = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCAmelCase_ : List[Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : List[Any] ,lowerCAmelCase__ : List[str]=0 ) -> Dict:
'''simple docstring'''
if str(lowerCAmelCase__ ).startswith("mps" ):
lowerCAmelCase_ : Optional[int] = torch.manual_seed(lowerCAmelCase__ )
else:
lowerCAmelCase_ : Dict = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
lowerCAmelCase_ : str = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ : List[str] = self.get_dummy_components()
lowerCAmelCase_ : Union[str, Any] = StableDiffusionLDMaDPipeline(**lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = ldmad_pipe.to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : Any = self.get_dummy_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = ldmad_pipe(**lowerCAmelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ : Any = output.rgb, output.depth
lowerCAmelCase_ : Dict = rgb[0, -3:, -3:, -1]
lowerCAmelCase_ : Tuple = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
lowerCAmelCase_ : Optional[Any] = np.array(
[0.37_338_176, 0.70_247, 0.74_203_193, 0.51_643_604, 0.58_256_793, 0.60_932_136, 0.4_181_095, 0.48_355_877, 0.46_535_262] )
lowerCAmelCase_ : Tuple = np.array([103.46_727, 85.812_004, 87.849_236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Dict = self.get_dummy_components()
lowerCAmelCase_ : List[str] = StableDiffusionLDMaDPipeline(**lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = ldmad_pipe.to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : str = 3 * [inputs["prompt"]]
# forward
lowerCAmelCase_ : Union[str, Any] = ldmad_pipe(**lowerCAmelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = output.rgb, output.depth
lowerCAmelCase_ : str = rgb_slice_a[0, -3:, -3:, -1]
lowerCAmelCase_ : List[str] = depth_slice_a[0, -3:, -1]
lowerCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = 3 * [inputs.pop("prompt" )]
lowerCAmelCase_ : str = ldmad_pipe.tokenizer(
lowerCAmelCase__ ,padding="max_length" ,max_length=ldmad_pipe.tokenizer.model_max_length ,truncation=lowerCAmelCase__ ,return_tensors="pt" ,)
lowerCAmelCase_ : Union[str, Any] = text_inputs["input_ids"].to(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = ldmad_pipe.text_encoder(lowerCAmelCase__ )[0]
lowerCAmelCase_ : Optional[int] = prompt_embeds
# forward
lowerCAmelCase_ : str = ldmad_pipe(**lowerCAmelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ : str = output.rgb, output.depth
lowerCAmelCase_ : Optional[Any] = rgb_slice_a[0, -3:, -3:, -1]
lowerCAmelCase_ : Tuple = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Any = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ : Optional[int] = self.get_dummy_components()
lowerCAmelCase_ : Dict = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = StableDiffusionLDMaDPipeline(**lowerCAmelCase__ )
lowerCAmelCase_ : Any = ldmad_pipe.to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = self.get_dummy_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = "french fries"
lowerCAmelCase_ : Optional[int] = ldmad_pipe(**lowerCAmelCase__ ,negative_prompt=lowerCAmelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = output.rgb, output.depth
lowerCAmelCase_ : Any = rgb[0, -3:, -3:, -1]
lowerCAmelCase_ : Tuple = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
lowerCAmelCase_ : int = np.array(
[0.37_044, 0.71_811_503, 0.7_223_251, 0.48_603_675, 0.5_638_391, 0.6_364_948, 0.42_833_704, 0.4_901_315, 0.47_926_217] )
lowerCAmelCase_ : Union[str, Any] = np.array([107.84_738, 84.62_802, 89.962_135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : Tuple ,lowerCAmelCase__ : Dict="cpu" ,lowerCAmelCase__ : Union[str, Any]=torch.floataa ,lowerCAmelCase__ : List[str]=0 ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Any = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = np.random.RandomState(lowerCAmelCase__ ).standard_normal((1, 4, 64, 64) )
lowerCAmelCase_ : Optional[Any] = torch.from_numpy(lowerCAmelCase__ ).to(device=lowerCAmelCase__ ,dtype=lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase_ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" )
lowerCAmelCase_ : List[str] = ldmad_pipe.to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : Dict = self.get_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = ldmad_pipe(**lowerCAmelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ : Dict = output.rgb, output.depth
lowerCAmelCase_ : List[str] = rgb[0, -3:, -3:, -1].flatten()
lowerCAmelCase_ : Optional[int] = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 5_12, 5_12, 3)
assert depth.shape == (1, 5_12, 5_12)
lowerCAmelCase_ : int = np.array(
[0.53_805_465, 0.56_707_305, 0.5_486_515, 0.57_012_236, 0.5_814_511, 0.56_253_487, 0.54_843_014, 0.55_092_263, 0.6_459_706] )
lowerCAmelCase_ : Optional[Any] = np.array(
[0.9_263_781, 0.6_678_672, 0.5_486_515, 0.92_202_145, 0.67_831_135, 0.56_253_487, 0.9_241_694, 0.7_551_478, 0.6_459_706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3
@nightly
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : Tuple ,lowerCAmelCase__ : Dict="cpu" ,lowerCAmelCase__ : List[str]=torch.floataa ,lowerCAmelCase__ : Optional[int]=0 ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Dict = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = np.random.RandomState(lowerCAmelCase__ ).standard_normal((1, 4, 64, 64) )
lowerCAmelCase_ : Any = torch.from_numpy(lowerCAmelCase__ ).to(device=lowerCAmelCase__ ,dtype=lowerCAmelCase__ )
lowerCAmelCase_ : int = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 50,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase_ ( self : Dict ) -> int:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" ).to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = self.get_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = ldmad_pipe(**lowerCAmelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ : Any = output.rgb, output.depth
lowerCAmelCase_ : Dict = 0.495_586
lowerCAmelCase_ : Optional[Any] = 0.33_795_515
lowerCAmelCase_ : Any = 112.48_518
lowerCAmelCase_ : List[Any] = 98.489_746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : int = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d-4c" ).to(lowerCAmelCase__ )
ldmad_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : str = self.get_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = ldmad_pipe(**lowerCAmelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = output.rgb, output.depth
lowerCAmelCase_ : List[str] = 0.4_194_127
lowerCAmelCase_ : List[str] = 0.35_375_586
lowerCAmelCase_ : str = 0.5_638_502
lowerCAmelCase_ : Optional[Any] = 0.34_686_103
assert rgb.shape == (1, 5_12, 5_12, 3)
assert depth.shape == (1, 5_12, 5_12, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
| 659 | 0 |
"""simple docstring"""
def _lowerCamelCase ( UpperCAmelCase_ : int ) -> int:
"""simple docstring"""
if not isinstance(UpperCAmelCase_, UpperCAmelCase_ ):
raise TypeError("only integers accepted as input" )
else:
A__ = str(abs(UpperCAmelCase_ ) )
A__ = [list(UpperCAmelCase_ ) for char in range(len(UpperCAmelCase_ ) )]
for index in range(len(UpperCAmelCase_ ) ):
num_transpositions[index].pop(UpperCAmelCase_ )
return max(
int("".join(list(UpperCAmelCase_ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 104 |
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
_lowercase = {
'''iou_prediction_head.layers.0''': '''iou_prediction_head.proj_in''',
'''iou_prediction_head.layers.1''': '''iou_prediction_head.layers.0''',
'''iou_prediction_head.layers.2''': '''iou_prediction_head.proj_out''',
'''mask_decoder.output_upscaling.0''': '''mask_decoder.upscale_conv1''',
'''mask_decoder.output_upscaling.1''': '''mask_decoder.upscale_layer_norm''',
'''mask_decoder.output_upscaling.3''': '''mask_decoder.upscale_conv2''',
'''mask_downscaling.0''': '''mask_embed.conv1''',
'''mask_downscaling.1''': '''mask_embed.layer_norm1''',
'''mask_downscaling.3''': '''mask_embed.conv2''',
'''mask_downscaling.4''': '''mask_embed.layer_norm2''',
'''mask_downscaling.6''': '''mask_embed.conv3''',
'''point_embeddings''': '''point_embed''',
'''pe_layer.positional_encoding_gaussian_matrix''': '''shared_embedding.positional_embedding''',
'''image_encoder''': '''vision_encoder''',
'''neck.0''': '''neck.conv1''',
'''neck.1''': '''neck.layer_norm1''',
'''neck.2''': '''neck.conv2''',
'''neck.3''': '''neck.layer_norm2''',
'''patch_embed.proj''': '''patch_embed.projection''',
'''.norm''': '''.layer_norm''',
'''blocks''': '''layers''',
}
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : int = {}
state_dict.pop("pixel_mean" , snake_case__)
state_dict.pop("pixel_std" , snake_case__)
lowerCAmelCase_ : List[Any] = R".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
lowerCAmelCase_ : Dict = key.replace(snake_case__ , snake_case__)
if re.match(snake_case__ , snake_case__):
lowerCAmelCase_ : Any = int(re.match(snake_case__ , snake_case__).group(2))
if layer_nb == 0:
lowerCAmelCase_ : List[Any] = key.replace("layers.0" , "proj_in")
elif layer_nb == 1:
lowerCAmelCase_ : List[Any] = key.replace("layers.1" , "layers.0")
elif layer_nb == 2:
lowerCAmelCase_ : int = key.replace("layers.2" , "proj_out")
lowerCAmelCase_ : int = value
lowerCAmelCase_ : Optional[int] = model_state_dict[
"prompt_encoder.shared_embedding.positional_embedding"
]
return model_state_dict
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__="ybelkada/segment-anything"):
lowerCAmelCase_ : Optional[int] = hf_hub_download(snake_case__ , F'''checkpoints/{model_name}.pth''')
if "sam_vit_b" in model_name:
lowerCAmelCase_ : Optional[Any] = SamConfig()
elif "sam_vit_l" in model_name:
lowerCAmelCase_ : Optional[int] = SamVisionConfig(
hidden_size=10_24 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
lowerCAmelCase_ : Union[str, Any] = SamConfig(
vision_config=snake_case__ , )
elif "sam_vit_h" in model_name:
lowerCAmelCase_ : Optional[Any] = SamVisionConfig(
hidden_size=12_80 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
lowerCAmelCase_ : Tuple = SamConfig(
vision_config=snake_case__ , )
lowerCAmelCase_ : Optional[Any] = torch.load(snake_case__ , map_location="cpu")
lowerCAmelCase_ : Union[str, Any] = replace_keys(snake_case__)
lowerCAmelCase_ : List[Any] = SamImageProcessor()
lowerCAmelCase_ : Any = SamProcessor(image_processor=snake_case__)
lowerCAmelCase_ : Any = SamModel(snake_case__)
hf_model.load_state_dict(snake_case__)
lowerCAmelCase_ : Dict = hf_model.to("cuda")
lowerCAmelCase_ : List[str] = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
lowerCAmelCase_ : List[Any] = Image.open(requests.get(snake_case__ , stream=snake_case__).raw).convert("RGB")
lowerCAmelCase_ : Optional[int] = [[[4_00, 6_50]]]
lowerCAmelCase_ : int = [[1]]
lowerCAmelCase_ : Optional[Any] = processor(images=np.array(snake_case__) , return_tensors="pt").to("cuda")
with torch.no_grad():
lowerCAmelCase_ : Optional[Any] = hf_model(**snake_case__)
lowerCAmelCase_ : Optional[int] = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_890_251_159_668
lowerCAmelCase_ : Any = processor(
images=np.array(snake_case__) , input_points=snake_case__ , input_labels=snake_case__ , return_tensors="pt").to("cuda")
with torch.no_grad():
lowerCAmelCase_ : Optional[Any] = hf_model(**snake_case__)
lowerCAmelCase_ : Union[str, Any] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_712_603_092_193_604
lowerCAmelCase_ : Tuple = ((75, 2_75, 17_25, 8_50),)
lowerCAmelCase_ : Optional[Any] = processor(images=np.array(snake_case__) , input_boxes=snake_case__ , return_tensors="pt").to("cuda")
with torch.no_grad():
lowerCAmelCase_ : List[Any] = hf_model(**snake_case__)
lowerCAmelCase_ : str = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8_686_015_605_926_514
# Test with 2 points and 1 image.
lowerCAmelCase_ : int = [[[4_00, 6_50], [8_00, 6_50]]]
lowerCAmelCase_ : Optional[Any] = [[1, 1]]
lowerCAmelCase_ : List[Any] = processor(
images=np.array(snake_case__) , input_points=snake_case__ , input_labels=snake_case__ , return_tensors="pt").to("cuda")
with torch.no_grad():
lowerCAmelCase_ : Tuple = hf_model(**snake_case__)
lowerCAmelCase_ : str = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_936_047_792_434_692
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
_lowercase = ['''sam_vit_b_01ec64''', '''sam_vit_h_4b8939''', '''sam_vit_l_0b3195''']
parser.add_argument(
'''--model_name''',
default='''sam_vit_h_4b8939''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
parser.add_argument(
'''--model_hub_id''',
default='''ybelkada/segment-anything''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
_lowercase = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 659 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self ,snake_case__ ,snake_case__=7 ,snake_case__=3 ,snake_case__=30 ,snake_case__=400 ,snake_case__=True ,snake_case__=None ,snake_case__=True ,snake_case__=[0.5, 0.5, 0.5] ,snake_case__=[0.5, 0.5, 0.5] ,snake_case__=True ,snake_case__=1 / 255 ,snake_case__=True ,):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
SCREAMING_SNAKE_CASE_ : List[str] = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
SCREAMING_SNAKE_CASE_ : List[Any] = parent
SCREAMING_SNAKE_CASE_ : str = batch_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE_ : Tuple = min_resolution
SCREAMING_SNAKE_CASE_ : Any = max_resolution
SCREAMING_SNAKE_CASE_ : str = do_resize
SCREAMING_SNAKE_CASE_ : Union[str, Any] = size
SCREAMING_SNAKE_CASE_ : Tuple = do_normalize
SCREAMING_SNAKE_CASE_ : Optional[int] = image_mean
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_std
SCREAMING_SNAKE_CASE_ : Tuple = do_rescale
SCREAMING_SNAKE_CASE_ : Any = rescale_factor
SCREAMING_SNAKE_CASE_ : Any = do_pad
def snake_case ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def snake_case ( self ,snake_case__ ,snake_case__=False ):
if not batched:
SCREAMING_SNAKE_CASE_ : List[str] = image_inputs[0]
if isinstance(snake_case__ ,Image.Image ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = image.size
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE_ : Dict = int(self.size['shortest_edge'] * h / w )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.size['shortest_edge']
elif w > h:
SCREAMING_SNAKE_CASE_ : str = self.size['shortest_edge']
SCREAMING_SNAKE_CASE_ : Optional[int] = int(self.size['shortest_edge'] * w / h )
else:
SCREAMING_SNAKE_CASE_ : int = self.size['shortest_edge']
SCREAMING_SNAKE_CASE_ : List[Any] = self.size['shortest_edge']
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
for image in image_inputs:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = max(snake_case__ ,key=lambda snake_case__ : item[0] )[0]
SCREAMING_SNAKE_CASE_ : Optional[int] = max(snake_case__ ,key=lambda snake_case__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : Dict = DetaImageProcessor if is_vision_available() else None
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DetaImageProcessingTester(self )
@property
def snake_case ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ ,'image_mean' ) )
self.assertTrue(hasattr(snake_case__ ,'image_std' ) )
self.assertTrue(hasattr(snake_case__ ,'do_normalize' ) )
self.assertTrue(hasattr(snake_case__ ,'do_resize' ) )
self.assertTrue(hasattr(snake_case__ ,'do_rescale' ) )
self.assertTrue(hasattr(snake_case__ ,'do_pad' ) )
self.assertTrue(hasattr(snake_case__ ,'size' ) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad ,snake_case__ )
def snake_case ( self ):
pass
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ : List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ : List[Any] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self.image_processor_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.image_processor_tester.get_expected_values(snake_case__ ,batched=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ ,numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = self.image_processor_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = self.image_processor_tester.get_expected_values(snake_case__ ,batched=snake_case__ )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ ,torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ : List[Any] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = self.image_processor_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
SCREAMING_SNAKE_CASE_ : List[str] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = self.image_processor_tester.get_expected_values(snake_case__ ,batched=snake_case__ )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
@slow
def snake_case ( self ):
# prepare image and target
SCREAMING_SNAKE_CASE_ : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' ,'r' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] = json.loads(f.read() )
SCREAMING_SNAKE_CASE_ : Any = {'image_id': 39769, 'annotations': target}
# encode them
SCREAMING_SNAKE_CASE_ : int = DetaImageProcessor()
SCREAMING_SNAKE_CASE_ : int = image_processing(images=snake_case__ ,annotations=snake_case__ ,return_tensors='pt' )
# verify pixel values
SCREAMING_SNAKE_CASE_ : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape ,snake_case__ )
SCREAMING_SNAKE_CASE_ : int = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,snake_case__ ,atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,snake_case__ ) )
# verify boxes
SCREAMING_SNAKE_CASE_ : int = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape ,snake_case__ )
SCREAMING_SNAKE_CASE_ : str = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,snake_case__ ,atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE_ : int = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,snake_case__ ) )
# verify is_crowd
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,snake_case__ ) )
# verify class_labels
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,snake_case__ ) )
# verify orig_size
SCREAMING_SNAKE_CASE_ : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,snake_case__ ) )
# verify size
SCREAMING_SNAKE_CASE_ : Any = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,snake_case__ ) )
@slow
def snake_case ( self ):
# prepare image, target and masks_path
SCREAMING_SNAKE_CASE_ : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' ,'r' ) as f:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = json.loads(f.read() )
SCREAMING_SNAKE_CASE_ : Optional[Any] = {'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target}
SCREAMING_SNAKE_CASE_ : List[str] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
SCREAMING_SNAKE_CASE_ : Any = DetaImageProcessor(format='coco_panoptic' )
SCREAMING_SNAKE_CASE_ : List[Any] = image_processing(images=snake_case__ ,annotations=snake_case__ ,masks_path=snake_case__ ,return_tensors='pt' )
# verify pixel values
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,snake_case__ ,atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,snake_case__ ) )
# verify boxes
SCREAMING_SNAKE_CASE_ : Dict = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,snake_case__ ,atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,snake_case__ ) )
# verify is_crowd
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,snake_case__ ) )
# verify class_labels
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,snake_case__ ) )
# verify masks
SCREAMING_SNAKE_CASE_ : Any = 822873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() ,snake_case__ )
# verify orig_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,snake_case__ ) )
# verify size
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,snake_case__ ) )
| 105 |
class __snake_case :
"""simple docstring"""
def __init__( self : Union[str, Any] ,lowerCAmelCase__ : str = "" ,lowerCAmelCase__ : bool = False ) -> None:
'''simple docstring'''
lowerCAmelCase_ : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
lowerCAmelCase_ : Optional[int] = is_leaf
lowerCAmelCase_ : List[str] = prefix
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : str ) -> tuple[str, str, str]:
'''simple docstring'''
lowerCAmelCase_ : List[str] = 0
for q, w in zip(self.prefix ,lowerCAmelCase__ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def UpperCAmelCase_ ( self : Optional[Any] ,lowerCAmelCase__ : list[str] ) -> None:
'''simple docstring'''
for word in words:
self.insert(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : str ) -> None:
'''simple docstring'''
if self.prefix == word:
lowerCAmelCase_ : Optional[Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowerCAmelCase_ : Optional[int] = RadixNode(prefix=lowerCAmelCase__ ,is_leaf=lowerCAmelCase__ )
else:
lowerCAmelCase_ : Optional[Any] = self.nodes[word[0]]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Any = incoming_node.match(
lowerCAmelCase__ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(lowerCAmelCase__ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowerCAmelCase_ : Dict = remaining_prefix
lowerCAmelCase_ : str = self.nodes[matching_string[0]]
lowerCAmelCase_ : Dict = RadixNode(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Any = aux_node
if remaining_word == "":
lowerCAmelCase_ : Optional[Any] = True
else:
self.nodes[matching_string[0]].insert(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ,lowerCAmelCase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ : List[str] = self.nodes.get(word[0] ,lowerCAmelCase__ )
if not incoming_node:
return False
else:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = incoming_node.match(
lowerCAmelCase__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ : int = self.nodes.get(word[0] ,lowerCAmelCase__ )
if not incoming_node:
return False
else:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = incoming_node.match(
lowerCAmelCase__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(lowerCAmelCase__ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
lowerCAmelCase_ : int = list(self.nodes.values() )[0]
lowerCAmelCase_ : List[Any] = merging_node.is_leaf
self.prefix += merging_node.prefix
lowerCAmelCase_ : int = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
lowerCAmelCase_ : List[str] = False
# If there is 1 edge, we merge it with its child
else:
lowerCAmelCase_ : Union[str, Any] = list(incoming_node.nodes.values() )[0]
lowerCAmelCase_ : Optional[int] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowerCAmelCase_ : List[str] = merging_node.nodes
return True
def UpperCAmelCase_ ( self : int ,lowerCAmelCase__ : int = 0 ) -> None:
'''simple docstring'''
if self.prefix != "":
print("-" * height ,self.prefix ," (leaf)" if self.is_leaf else "" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def UpperCamelCase ( ):
lowerCAmelCase_ : List[Any] = "banana bananas bandana band apple all beast".split()
lowerCAmelCase_ : Optional[Any] = RadixNode()
root.insert_many(snake_case__)
assert all(root.find(snake_case__) for word in words)
assert not root.find("bandanas")
assert not root.find("apps")
root.delete("all")
assert not root.find("all")
root.delete("banana")
assert not root.find("banana")
assert root.find("bananas")
return True
def UpperCamelCase ( ):
assert test_trie()
def UpperCamelCase ( ):
lowerCAmelCase_ : str = RadixNode()
lowerCAmelCase_ : str = "banana bananas bandanas bandana band apple all beast".split()
root.insert_many(snake_case__)
print("Words:" , snake_case__)
print("Tree:")
root.print_tree()
if __name__ == "__main__":
main()
| 659 | 0 |
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__snake_case :Tuple =True
except ImportError:
__snake_case :Dict =False
__snake_case :str =logging.get_logger(__name__) # pylint: disable=invalid-name
def lowerCamelCase_ ( lowerCAmelCase__ : Namespace ) -> str:
'''simple docstring'''
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class lowerCAmelCase__ ( _lowerCamelCase ):
@staticmethod
def __UpperCamelCase ( __UpperCamelCase : ArgumentParser ) -> Tuple:
A = parser.add_parser('add-new-model' )
add_new_model_parser.add_argument('--testing' , action='store_true' , help='If in testing mode.' )
add_new_model_parser.add_argument('--testing_file' , type=__UpperCamelCase , help='Configuration file on which to run.' )
add_new_model_parser.add_argument(
'--path' , type=__UpperCamelCase , help='Path to cookiecutter. Should only be used for testing purposes.' )
add_new_model_parser.set_defaults(func=__UpperCamelCase )
def __init__( self : Any , __UpperCamelCase : bool , __UpperCamelCase : str , __UpperCamelCase : Dict=None , *__UpperCamelCase : List[str] ) -> Optional[Any]:
A = testing
A = testing_file
A = path
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
warnings.warn(
'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '
'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '
'checks, you should use `transformers-cli add-new-model-like` instead.' )
if not _has_cookiecutter:
raise ImportError(
'Model creation dependencies are required to use the `add_new_model` command. Install them by running '
'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
A = [directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:22]]
if len(__UpperCamelCase ) > 0:
raise ValueError(
'Several directories starting with `cookiecutter-template-` in current working directory. '
'Please clean your directory by removing all folders starting with `cookiecutter-template-` or '
'change your working directory.' )
A = (
Path(__UpperCamelCase ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
A = path_to_transformer_root / 'templates' / 'adding_a_new_model'
# Execute cookiecutter
if not self._testing:
cookiecutter(str(__UpperCamelCase ) )
else:
with open(self._testing_file , 'r' ) as configuration_file:
A = json.load(__UpperCamelCase )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=__UpperCamelCase , extra_context=__UpperCamelCase , )
A = [directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:22]][0]
# Retrieve configuration
with open(directory + '/configuration.json' , 'r' ) as configuration_file:
A = json.load(__UpperCamelCase )
A = configuration['lowercase_modelname']
A = configuration['generate_tensorflow_pytorch_and_flax']
os.remove(f'''{directory}/configuration.json''' )
A = 'PyTorch' in generate_tensorflow_pytorch_and_flax
A = 'TensorFlow' in generate_tensorflow_pytorch_and_flax
A = 'Flax' in generate_tensorflow_pytorch_and_flax
A = f'''{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'''
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
os.makedirs(f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}''' , exist_ok=__UpperCamelCase )
# Tests require submodules as they have parent imports
with open(f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py''' , 'w' ):
pass
shutil.move(
f'''{directory}/__init__.py''' , f'''{model_dir}/__init__.py''' , )
shutil.move(
f'''{directory}/configuration_{lowercase_model_name}.py''' , f'''{model_dir}/configuration_{lowercase_model_name}.py''' , )
def remove_copy_lines(__UpperCamelCase : List[str] ):
with open(__UpperCamelCase , 'r' ) as f:
A = f.readlines()
with open(__UpperCamelCase , 'w' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(__UpperCamelCase )
if output_pytorch:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_{lowercase_model_name}.py''' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_tf_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_tf_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' )
if output_flax:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_flax_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_flax_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/{lowercase_model_name}.md''' , f'''{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md''' , )
shutil.move(
f'''{directory}/tokenization_{lowercase_model_name}.py''' , f'''{model_dir}/tokenization_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/tokenization_fast_{lowercase_model_name}.py''' , f'''{model_dir}/tokenization_{lowercase_model_name}_fast.py''' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(__UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : List[str] ):
# Create temp file
A , A = mkstemp()
A = False
with fdopen(__UpperCamelCase , 'w' ) as new_file:
with open(__UpperCamelCase ) as old_file:
for line in old_file:
new_file.write(__UpperCamelCase )
if line_to_copy_below in line:
A = True
for line_to_copy in lines_to_copy:
new_file.write(__UpperCamelCase )
if not line_found:
raise ValueError(f'''Line {line_to_copy_below} was not found in file.''' )
# Copy the file permissions from the old file to the new file
copymode(__UpperCamelCase , __UpperCamelCase )
# Remove original file
remove(__UpperCamelCase )
# Move new file
move(__UpperCamelCase , __UpperCamelCase )
def skip_units(__UpperCamelCase : List[Any] ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(__UpperCamelCase : Tuple ):
with open(__UpperCamelCase ) as datafile:
A = []
A = False
A = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
A = line.split('"' )[1]
A = skip_units(__UpperCamelCase )
elif "# Below: " in line and "##" not in line:
A = line.split('"' )[1]
A = skip_units(__UpperCamelCase )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A = []
elif "# Replace with" in line and "##" not in line:
A = []
elif "##" not in line:
lines_to_copy.append(__UpperCamelCase )
remove(__UpperCamelCase )
replace_in_files(f'''{directory}/to_replace_{lowercase_model_name}.py''' )
os.rmdir(__UpperCamelCase ) | 106 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case :
"""simple docstring"""
def __init__( self : Tuple ,lowerCAmelCase__ : List[str] ,lowerCAmelCase__ : Optional[Any]=12 ,lowerCAmelCase__ : Union[str, Any]=7 ,lowerCAmelCase__ : Union[str, Any]=True ,lowerCAmelCase__ : List[str]=True ,lowerCAmelCase__ : Any=True ,lowerCAmelCase__ : Optional[Any]=99 ,lowerCAmelCase__ : List[str]=32 ,lowerCAmelCase__ : Dict=32 ,lowerCAmelCase__ : str=2 ,lowerCAmelCase__ : Optional[int]=4 ,lowerCAmelCase__ : str=37 ,lowerCAmelCase__ : Dict=0.1 ,lowerCAmelCase__ : List[str]=0.1 ,lowerCAmelCase__ : str=5_12 ,lowerCAmelCase__ : Union[str, Any]=0.02 ,lowerCAmelCase__ : Tuple=0 ,lowerCAmelCase__ : str=None ,) -> str:
'''simple docstring'''
lowerCAmelCase_ : int = parent
lowerCAmelCase_ : str = batch_size
lowerCAmelCase_ : int = seq_length
lowerCAmelCase_ : Union[str, Any] = is_training
lowerCAmelCase_ : int = use_input_mask
lowerCAmelCase_ : List[Any] = use_labels
lowerCAmelCase_ : Dict = vocab_size
lowerCAmelCase_ : Union[str, Any] = hidden_size
lowerCAmelCase_ : Union[str, Any] = projection_dim
lowerCAmelCase_ : List[Any] = num_hidden_layers
lowerCAmelCase_ : Any = num_attention_heads
lowerCAmelCase_ : List[Any] = intermediate_size
lowerCAmelCase_ : Any = dropout
lowerCAmelCase_ : Optional[int] = attention_dropout
lowerCAmelCase_ : int = max_position_embeddings
lowerCAmelCase_ : Optional[int] = initializer_range
lowerCAmelCase_ : Any = scope
lowerCAmelCase_ : Tuple = bos_token_id
def UpperCAmelCase_ ( self : str ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowerCAmelCase_ : Dict = None
if self.use_input_mask:
lowerCAmelCase_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
lowerCAmelCase_ : List[Any] = input_mask.numpy()
lowerCAmelCase_ , lowerCAmelCase_ : str = input_mask.shape
lowerCAmelCase_ : Dict = np.random.randint(1 ,seq_length - 1 ,size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCAmelCase__ ):
lowerCAmelCase_ : Union[str, Any] = 1
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : List[Any] = self.get_config()
return config, input_ids, tf.convert_to_tensor(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : List[str] ) -> str:
'''simple docstring'''
return BlipTextConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,projection_dim=self.projection_dim ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,dropout=self.dropout ,attention_dropout=self.attention_dropout ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,bos_token_id=self.bos_token_id ,)
def UpperCAmelCase_ ( self : Optional[Any] ,lowerCAmelCase__ : str ,lowerCAmelCase__ : Any ,lowerCAmelCase__ : Dict ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = TFBlipTextModel(config=lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = model(lowerCAmelCase__ ,attention_mask=lowerCAmelCase__ ,training=lowerCAmelCase__ )
lowerCAmelCase_ : str = model(lowerCAmelCase__ ,training=lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
'''simple docstring'''
lowerCAmelCase_ : List[str] = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Dict = config_and_inputs
lowerCAmelCase_ : Tuple = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class __snake_case ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (TFBlipTextModel,) if is_tf_available() else ()
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
def UpperCAmelCase_ ( self : Optional[Any] ) -> str:
'''simple docstring'''
lowerCAmelCase_ : List[str] = BlipTextModelTester(self )
lowerCAmelCase_ : Tuple = ConfigTester(self ,config_class=lowerCAmelCase__ ,hidden_size=37 )
def UpperCAmelCase_ ( self : str ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason="Blip does not use inputs_embeds" )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def UpperCAmelCase_ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
pass
@slow
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Tuple = TFBlipTextModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : str=True ) -> List[Any]:
'''simple docstring'''
super().test_pt_tf_model_equivalence(allow_missing_keys=lowerCAmelCase__ )
| 659 | 0 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( __snake_case : list ):
if len(__snake_case ) <= 1:
return [tuple(__snake_case )]
_A = []
def generate(__snake_case : int , __snake_case : list ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , __snake_case )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
_A , _A = arr[k - 1], arr[i]
else: # k is odd
_A , _A = arr[k - 1], arr[0]
generate(k - 1 , __snake_case )
generate(len(__snake_case ) , __snake_case )
return res
if __name__ == "__main__":
_UpperCAmelCase : List[Any] = input('''Enter numbers separated by a comma:\n''').strip()
_UpperCAmelCase : Optional[int] = [int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 107 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
_lowercase = logging.get_logger(__name__)
_lowercase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
_lowercase = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
_lowercase = {
'''allenai/led-base-16384''': 16384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def UpperCamelCase ( ):
lowerCAmelCase_ : Optional[int] = (
list(range(ord("!") , ord("~") + 1)) + list(range(ord("¡") , ord("¬") + 1)) + list(range(ord("®") , ord("ÿ") + 1))
)
lowerCAmelCase_ : List[Any] = bs[:]
lowerCAmelCase_ : Optional[int] = 0
for b in range(2**8):
if b not in bs:
bs.append(snake_case__)
cs.append(2**8 + n)
n += 1
lowerCAmelCase_ : Tuple = [chr(snake_case__) for n in cs]
return dict(zip(snake_case__ , snake_case__))
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : str = set()
lowerCAmelCase_ : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
lowerCAmelCase_ : Union[str, Any] = char
return pairs
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ['input_ids', 'attention_mask']
def __init__( self : int ,lowerCAmelCase__ : Tuple ,lowerCAmelCase__ : Any ,lowerCAmelCase__ : Tuple="replace" ,lowerCAmelCase__ : Optional[int]="<s>" ,lowerCAmelCase__ : Optional[int]="</s>" ,lowerCAmelCase__ : Tuple="</s>" ,lowerCAmelCase__ : int="<s>" ,lowerCAmelCase__ : Union[str, Any]="<unk>" ,lowerCAmelCase__ : str="<pad>" ,lowerCAmelCase__ : Tuple="<mask>" ,lowerCAmelCase__ : Optional[int]=False ,**lowerCAmelCase__ : Tuple ,) -> Any:
'''simple docstring'''
lowerCAmelCase_ : int = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else bos_token
lowerCAmelCase_ : int = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else eos_token
lowerCAmelCase_ : int = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else sep_token
lowerCAmelCase_ : Any = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else cls_token
lowerCAmelCase_ : Tuple = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else unk_token
lowerCAmelCase_ : Any = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ : Optional[int] = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else mask_token
super().__init__(
errors=lowerCAmelCase__ ,bos_token=lowerCAmelCase__ ,eos_token=lowerCAmelCase__ ,unk_token=lowerCAmelCase__ ,sep_token=lowerCAmelCase__ ,cls_token=lowerCAmelCase__ ,pad_token=lowerCAmelCase__ ,mask_token=lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
with open(lowerCAmelCase__ ,encoding="utf-8" ) as vocab_handle:
lowerCAmelCase_ : List[str] = json.load(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = {v: k for k, v in self.encoder.items()}
lowerCAmelCase_ : Optional[int] = errors # how to handle errors in decoding
lowerCAmelCase_ : Optional[int] = bytes_to_unicode()
lowerCAmelCase_ : str = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ ,encoding="utf-8" ) as merges_handle:
lowerCAmelCase_ : List[str] = merges_handle.read().split("\n" )[1:-1]
lowerCAmelCase_ : List[Any] = [tuple(merge.split() ) for merge in bpe_merges]
lowerCAmelCase_ : Union[str, Any] = dict(zip(lowerCAmelCase__ ,range(len(lowerCAmelCase__ ) ) ) )
lowerCAmelCase_ : Dict = {}
lowerCAmelCase_ : List[str] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCAmelCase_ : Any = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def UpperCAmelCase_ ( self : Dict ) -> Dict:
'''simple docstring'''
return len(self.encoder )
def UpperCAmelCase_ ( self : Dict ) -> str:
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : Dict ) -> Dict:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowerCAmelCase_ : Union[str, Any] = tuple(lowerCAmelCase__ )
lowerCAmelCase_ : str = get_pairs(lowerCAmelCase__ )
if not pairs:
return token
while True:
lowerCAmelCase_ : Optional[int] = min(lowerCAmelCase__ ,key=lambda lowerCAmelCase__ : self.bpe_ranks.get(lowerCAmelCase__ ,float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = bigram
lowerCAmelCase_ : Tuple = []
lowerCAmelCase_ : str = 0
while i < len(lowerCAmelCase__ ):
try:
lowerCAmelCase_ : Union[str, Any] = word.index(lowerCAmelCase__ ,lowerCAmelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase_ : List[str] = j
if word[i] == first and i < len(lowerCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase_ : Optional[int] = tuple(lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = new_word
if len(lowerCAmelCase__ ) == 1:
break
else:
lowerCAmelCase_ : Dict = get_pairs(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = " ".join(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = word
return word
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : Dict ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ : Any = []
for token in re.findall(self.pat ,lowerCAmelCase__ ):
lowerCAmelCase_ : Optional[int] = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__ ).split(" " ) )
return bpe_tokens
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
return self.encoder.get(lowerCAmelCase__ ,self.encoder.get(self.unk_token ) )
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return self.decoder.get(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : List[Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : int = "".join(lowerCAmelCase__ )
lowerCAmelCase_ : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" ,errors=self.errors )
return text
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : str ,lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase_ : Optional[int] = os.path.join(
lowerCAmelCase__ ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ : List[str] = os.path.join(
lowerCAmelCase__ ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(lowerCAmelCase__ ,"w" ,encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=lowerCAmelCase__ ,ensure_ascii=lowerCAmelCase__ ) + "\n" )
lowerCAmelCase_ : Dict = 0
with open(lowerCAmelCase__ ,"w" ,encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda lowerCAmelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
lowerCAmelCase_ : List[Any] = token_index
writer.write(" ".join(lowerCAmelCase__ ) + "\n" )
index += 1
return vocab_file, merge_file
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase_ : Union[str, Any] = [self.cls_token_id]
lowerCAmelCase_ : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ,lowerCAmelCase__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ ,token_ids_a=lowerCAmelCase__ ,already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = [self.sep_token_id]
lowerCAmelCase_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : Union[str, Any] ,lowerCAmelCase__ : Optional[int]=False ,**lowerCAmelCase__ : str ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = kwargs.pop("add_prefix_space" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__ ) > 0 and not text[0].isspace()):
lowerCAmelCase_ : List[str] = " " + text
return (text, kwargs)
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : Union[Dict[str, EncodedInput], BatchEncoding] ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : Optional[bool] = None ,) -> dict:
'''simple docstring'''
lowerCAmelCase_ : int = super()._pad(
encoded_inputs=lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding_strategy=lowerCAmelCase__ ,pad_to_multiple_of=lowerCAmelCase__ ,return_attention_mask=lowerCAmelCase__ ,)
# Load from model defaults
if return_attention_mask is None:
lowerCAmelCase_ : List[Any] = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowerCAmelCase_ : Dict = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowerCAmelCase_ : List[Any] = len(encoded_inputs["global_attention_mask"] ) != len(lowerCAmelCase__ )
if needs_to_be_padded:
lowerCAmelCase_ : Union[str, Any] = len(lowerCAmelCase__ ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowerCAmelCase_ : Optional[int] = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
lowerCAmelCase_ : List[Any] = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 659 | 0 |
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
__a: Union[str, Any] = logging.get_logger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
_lowerCamelCase = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(glue_processors.keys() )} )
_lowerCamelCase = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
_lowerCamelCase = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_lowerCamelCase = field(
default=UpperCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def lowerCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.task_name.lower()
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = '''train'''
_lowerCamelCase = '''dev'''
_lowerCamelCase = '''test'''
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
def __init__( self : Any , lowerCamelCase : GlueDataTrainingArguments , lowerCamelCase : PreTrainedTokenizerBase , lowerCamelCase : Optional[int] = None , lowerCamelCase : Union[str, Split] = Split.train , lowerCamelCase : Optional[str] = None , ) -> int:
"""simple docstring"""
warnings.warn(
"""This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" , lowerCamelCase , )
_UpperCAmelCase = args
_UpperCAmelCase = glue_processors[args.task_name]()
_UpperCAmelCase = glue_output_modes[args.task_name]
if isinstance(lowerCamelCase , lowerCamelCase ):
try:
_UpperCAmelCase = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
# Load data features from cache or dataset file
_UpperCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
_UpperCAmelCase = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_UpperCAmelCase , _UpperCAmelCase = label_list[2], label_list[1]
_UpperCAmelCase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_UpperCAmelCase = cached_features_file + """.lock"""
with FileLock(lowerCamelCase ):
if os.path.exists(lowerCamelCase ) and not args.overwrite_cache:
_UpperCAmelCase = time.time()
_UpperCAmelCase = torch.load(lowerCamelCase )
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
else:
logger.info(f"""Creating features from dataset file at {args.data_dir}""" )
if mode == Split.dev:
_UpperCAmelCase = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
_UpperCAmelCase = self.processor.get_test_examples(args.data_dir )
else:
_UpperCAmelCase = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
_UpperCAmelCase = examples[:limit_length]
_UpperCAmelCase = glue_convert_examples_to_features(
lowerCamelCase , lowerCamelCase , max_length=args.max_seq_length , label_list=lowerCamelCase , output_mode=self.output_mode , )
_UpperCAmelCase = time.time()
torch.save(self.features , lowerCamelCase )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self : Union[str, Any] ) -> Any:
"""simple docstring"""
return len(self.features )
def __getitem__( self : str , lowerCamelCase : Any ) -> InputFeatures:
"""simple docstring"""
return self.features[i]
def lowerCamelCase ( self : int ) -> List[Any]:
"""simple docstring"""
return self.label_list | 108 |
import os
_lowercase = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1000}
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : List[str] = 0
lowerCAmelCase_ : Any = 0
while index < len(snake_case__) - 1:
lowerCAmelCase_ : Optional[Any] = SYMBOLS[numerals[index]]
lowerCAmelCase_ : int = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Optional[int] = ""
lowerCAmelCase_ : Tuple = num // 10_00
numerals += m_count * "M"
num %= 10_00
lowerCAmelCase_ : int = num // 1_00
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_00
lowerCAmelCase_ : int = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def UpperCamelCase ( snake_case__ = "/p089_roman.txt"):
lowerCAmelCase_ : int = 0
with open(os.path.dirname(snake_case__) + roman_numerals_filename) as filea:
lowerCAmelCase_ : List[Any] = filea.readlines()
for line in lines:
lowerCAmelCase_ : Any = line.strip()
lowerCAmelCase_ : Tuple = parse_roman_numerals(snake_case__)
lowerCAmelCase_ : List[Any] = generate_roman_numerals(snake_case__)
savings += len(snake_case__) - len(snake_case__)
return savings
if __name__ == "__main__":
print(f"{solution() = }")
| 659 | 0 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = inspect.getfile(accelerate.test_utils )
__SCREAMING_SNAKE_CASE = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps""", """test_metrics.py"""] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
__SCREAMING_SNAKE_CASE = test_metrics
@require_cpu
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
debug_launcher(self.test_metrics.main ,num_processes=1 )
@require_cpu
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
debug_launcher(self.test_metrics.main )
@require_single_gpu
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
self.test_metrics.main()
@require_multi_gpu
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
print(f"""Found {torch.cuda.device_count()} devices.""" )
__SCREAMING_SNAKE_CASE = ["""torchrun""", f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCamelCase ,env=os.environ.copy() )
| 109 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def UpperCamelCase ( ):
lowerCAmelCase_ : Dict = HfArgumentParser(snake_case__)
lowerCAmelCase_ : Dict = parser.parse_args_into_dataclasses()[0]
lowerCAmelCase_ : List[Any] = TensorFlowBenchmark(args=snake_case__)
try:
lowerCAmelCase_ : str = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowerCAmelCase_ : Optional[Any] = "Arg --no_{0} is no longer used, please use --no-{0} instead."
lowerCAmelCase_ : Tuple = " ".join(str(snake_case__).split(" ")[:-1])
lowerCAmelCase_ : List[Any] = ""
lowerCAmelCase_ : Optional[Any] = eval(str(snake_case__).split(" ")[-1])
lowerCAmelCase_ : List[Any] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:])
else:
wrong_args.append(snake_case__)
if len(snake_case__) > 0:
lowerCAmelCase_ : int = full_error_msg + begin_error_msg + str(snake_case__)
raise ValueError(snake_case__)
benchmark.run()
if __name__ == "__main__":
main()
| 659 | 0 |
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def __UpperCamelCase ( _A = "" ):
lowerCAmelCase_ = url or "https://www.imdb.com/chart/top/?ref_=nv_mv_250"
lowerCAmelCase_ = BeautifulSoup(requests.get(snake_case__ ).text , '''html.parser''' )
lowerCAmelCase_ = soup.find_all('''td''' , attrs='''titleColumn''' )
lowerCAmelCase_ = soup.find_all('''td''' , class_='''ratingColumn imdbRating''' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(snake_case__ , snake_case__ )
}
def __UpperCamelCase ( _A = "IMDb_Top_250_Movies.csv" ):
lowerCAmelCase_ = get_imdb_top_aaa_movies()
with open(snake_case__ , '''w''' , newline='''''' ) as out_file:
lowerCAmelCase_ = csv.writer(snake_case__ )
writer.writerow(['''Movie title''', '''IMDb rating'''] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 431 |
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
_lowercase = [
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'''
''' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'''
''' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.''',
'''The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'''
''' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'''
''' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'''
''' body.''',
'''Amnesty International releases its annual report on the death penalty. The report catalogs the use of'''
''' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'''
''' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'''
''' punishment.''',
]
_lowercase = [
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'''
''' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'''
''' had informed his Lufthansa training school of an episode of severe depression, airline says .''',
'''Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'''
''' Israel and the United States opposed the move, which could open the door to war crimes investigations against'''
''' Israelis .''',
'''Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'''
''' death . Organization claims that governments around the world are using the threat of terrorism to advance'''
''' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'''
''' sentences up by 28% .''',
]
def UpperCamelCase ( ):
lowerCAmelCase_ : Any = calculate_rouge(snake_case__ , snake_case__ , bootstrap_aggregation=snake_case__ , rouge_keys=["rouge2", "rougeL"])
assert isinstance(snake_case__ , snake_case__)
lowerCAmelCase_ : str = calculate_rouge(snake_case__ , snake_case__ , bootstrap_aggregation=snake_case__ , rouge_keys=["rouge2"])
assert (
pd.DataFrame(no_aggregation["rouge2"]).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["rouge2"]).fmeasure.mean()
)
def UpperCamelCase ( ):
lowerCAmelCase_ : str = "rougeLsum"
lowerCAmelCase_ : Any = calculate_rouge(snake_case__ , snake_case__ , newline_sep=snake_case__ , rouge_keys=[k])[k]
lowerCAmelCase_ : List[Any] = calculate_rouge(snake_case__ , snake_case__ , newline_sep=snake_case__ , rouge_keys=[k])[k]
assert score > score_no_sep
def UpperCamelCase ( ):
lowerCAmelCase_ : int = ["rouge1", "rouge2", "rougeL"]
lowerCAmelCase_ : List[Any] = calculate_rouge(snake_case__ , snake_case__ , newline_sep=snake_case__ , rouge_keys=snake_case__)
lowerCAmelCase_ : List[Any] = calculate_rouge(snake_case__ , snake_case__ , newline_sep=snake_case__ , rouge_keys=snake_case__)
assert score_sep == score_no_sep
def UpperCamelCase ( ):
lowerCAmelCase_ : List[str] = [
"Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.",
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .",
]
lowerCAmelCase_ : Dict = [
"Margot Frank, died in 1945, a month earlier than previously thought.",
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"
" the final seconds on board Flight 9525.",
]
assert calculate_rouge(snake_case__ , snake_case__ , newline_sep=snake_case__) == calculate_rouge(snake_case__ , snake_case__ , newline_sep=snake_case__)
def UpperCamelCase ( ):
lowerCAmelCase_ : Optional[int] = [
"\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" "
]
lowerCAmelCase_ : Any = [
" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."
]
lowerCAmelCase_ : Any = calculate_rouge(snake_case__ , snake_case__ , rouge_keys=["rougeLsum"] , newline_sep=snake_case__)["rougeLsum"]
lowerCAmelCase_ : Any = calculate_rouge(snake_case__ , snake_case__ , rouge_keys=["rougeLsum"])["rougeLsum"]
assert new_score > prev_score
def UpperCamelCase ( ):
lowerCAmelCase_ : int = Path("examples/seq2seq/test_data/wmt_en_ro")
lowerCAmelCase_ : Dict = calculate_rouge_path(data_dir.joinpath("test.source") , data_dir.joinpath("test.target"))
assert isinstance(snake_case__ , snake_case__)
lowerCAmelCase_ : Any = calculate_rouge_path(
data_dir.joinpath("test.source") , data_dir.joinpath("test.target") , bootstrap_aggregation=snake_case__)
assert isinstance(snake_case__ , snake_case__)
| 659 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
UpperCamelCase_ = list[list[float | int]]
def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : List[Any] ) -> int:
lowercase : int =len(snake_case__ )
lowercase : Matrix =[[0 for _ in range(size + 1 )] for _ in range(snake_case__ )]
lowercase : int
lowercase : int
lowercase : int
lowercase : int
lowercase : int
lowercase : float
for row in range(snake_case__ ):
for col in range(snake_case__ ):
lowercase : Optional[Any] =matrix[row][col]
lowercase : Any =vector[row][0]
lowercase : List[Any] =0
lowercase : Optional[Any] =0
while row < size and col < size:
# pivoting
lowercase : List[str] =max((abs(augmented[rowa][col] ), rowa) for rowa in range(snake_case__ , snake_case__ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
lowercase : Optional[int] =augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , snake_case__ ):
lowercase : Optional[int] =augmented[rowa][col] / augmented[row][col]
lowercase : Dict =0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , snake_case__ ):
for row in range(snake_case__ ):
lowercase : Any =augmented[row][col] / augmented[col][col]
for cola in range(snake_case__ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(snake_case__ )
]
def _lowerCAmelCase ( __magic_name__ : int ) -> List[Any]:
lowercase : int =len(snake_case__ )
lowercase : Matrix =[[0 for _ in range(snake_case__ )] for _ in range(snake_case__ )]
lowercase : Matrix =[[0] for _ in range(snake_case__ )]
lowercase : Matrix
lowercase : int
lowercase : int
lowercase : int
for x_val, y_val in enumerate(snake_case__ ):
for col in range(snake_case__ ):
lowercase : Union[str, Any] =(x_val + 1) ** (size - col - 1)
lowercase : Tuple =y_val
lowercase : Optional[int] =solve(snake_case__ , snake_case__ )
def interpolated_func(__magic_name__ : str ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(snake_case__ ) )
return interpolated_func
def _lowerCAmelCase ( __magic_name__ : Dict ) -> Union[str, Any]:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def _lowerCAmelCase ( __magic_name__ : Optional[int] = question_function , __magic_name__ : Optional[Any] = 10 ) -> Union[str, Any]:
lowercase : list[int] =[func(snake_case__ ) for x_val in range(1 , order + 1 )]
lowercase : list[Callable[[int], int]] =[
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
lowercase : int =0
lowercase : Callable[[int], int]
lowercase : int
for poly in polynomials:
lowercase : List[Any] =1
while func(snake_case__ ) == poly(snake_case__ ):
x_val += 1
ret += poly(snake_case__ )
return ret
if __name__ == "__main__":
print(f'''{solution() = }''')
| 92 |
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = LEDTokenizer
UpperCamelCase_ = LEDTokenizerFast
UpperCamelCase_ = True
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
super().setUp()
lowerCAmelCase_ : Union[str, Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowerCAmelCase_ : Tuple = dict(zip(lowerCAmelCase__ ,range(len(lowerCAmelCase__ ) ) ) )
lowerCAmelCase_ : int = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowerCAmelCase_ : Union[str, Any] = {"unk_token": "<unk>"}
lowerCAmelCase_ : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase__ ) )
def UpperCAmelCase_ ( self : List[Any] ,**lowerCAmelCase__ : int ) -> Tuple:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ,**lowerCAmelCase__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname ,**lowerCAmelCase__ )
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : int ) -> List[str]:
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
return LEDTokenizer.from_pretrained("allenai/led-base-16384" )
@cached_property
def UpperCAmelCase_ ( self : List[str] ) -> Dict:
'''simple docstring'''
return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" )
@require_torch
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
lowerCAmelCase_ : int = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase_ : Any = tokenizer(lowerCAmelCase__ ,max_length=len(lowerCAmelCase__ ) ,padding=lowerCAmelCase__ ,return_tensors="pt" )
self.assertIsInstance(lowerCAmelCase__ ,lowerCAmelCase__ )
self.assertEqual((2, 9) ,batch.input_ids.shape )
self.assertEqual((2, 9) ,batch.attention_mask.shape )
lowerCAmelCase_ : int = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
@require_torch
def UpperCAmelCase_ ( self : Dict ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : int = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase_ : Optional[Any] = tokenizer(lowerCAmelCase__ ,padding=lowerCAmelCase__ ,return_tensors="pt" )
self.assertIn("input_ids" ,lowerCAmelCase__ )
self.assertIn("attention_mask" ,lowerCAmelCase__ )
self.assertNotIn("labels" ,lowerCAmelCase__ )
self.assertNotIn("decoder_attention_mask" ,lowerCAmelCase__ )
@require_torch
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : int = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase_ : Optional[int] = tokenizer(text_target=lowerCAmelCase__ ,max_length=32 ,padding="max_length" ,return_tensors="pt" )
self.assertEqual(32 ,targets["input_ids"].shape[1] )
@require_torch
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase_ : Tuple = tokenizer(
["I am a small frog" * 10_24, "I am a small frog"] ,padding=lowerCAmelCase__ ,truncation=lowerCAmelCase__ ,return_tensors="pt" )
self.assertIsInstance(lowerCAmelCase__ ,lowerCAmelCase__ )
self.assertEqual(batch.input_ids.shape ,(2, 51_22) )
@require_torch
def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : Tuple = ["A long paragraph for summarization."]
lowerCAmelCase_ : Dict = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase_ : Optional[Any] = tokenizer(lowerCAmelCase__ ,return_tensors="pt" )
lowerCAmelCase_ : Optional[Any] = tokenizer(text_target=lowerCAmelCase__ ,return_tensors="pt" )
lowerCAmelCase_ : List[str] = inputs["input_ids"]
lowerCAmelCase_ : Any = targets["input_ids"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def UpperCAmelCase_ ( self : str ) -> Tuple:
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase_ : str = ["Summary of the text.", "Another summary."]
lowerCAmelCase_ : str = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
lowerCAmelCase_ : List[Any] = tokenizer(lowerCAmelCase__ ,padding=lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = [[0] * len(lowerCAmelCase__ ) for x in encoded_output["input_ids"]]
lowerCAmelCase_ : Optional[int] = tokenizer.pad(lowerCAmelCase__ )
self.assertSequenceEqual(outputs["global_attention_mask"] ,lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase_ : Dict = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ ,**lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = self.tokenizer_class.from_pretrained(lowerCAmelCase__ ,**lowerCAmelCase__ )
lowerCAmelCase_ : Dict = "A, <mask> AllenNLP sentence."
lowerCAmelCase_ : Tuple = tokenizer_r.encode_plus(lowerCAmelCase__ ,add_special_tokens=lowerCAmelCase__ ,return_token_type_ids=lowerCAmelCase__ )
lowerCAmelCase_ : int = tokenizer_p.encode_plus(lowerCAmelCase__ ,add_special_tokens=lowerCAmelCase__ ,return_token_type_ids=lowerCAmelCase__ )
self.assertEqual(sum(tokens_r["token_type_ids"] ) ,sum(tokens_p["token_type_ids"] ) )
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) ,sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) ,)
lowerCAmelCase_ : Any = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
lowerCAmelCase_ : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
self.assertSequenceEqual(tokens_p["input_ids"] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] ,[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase__ ,["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
lowerCAmelCase__ ,["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 659 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class _SCREAMING_SNAKE_CASE ( snake_case__ ):
"""simple docstring"""
_a : Union[str, Any] = '''gpt_bigcode'''
_a : Any = ['''past_key_values''']
_a : Optional[int] = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , lowerCamelCase__=5_0257 , lowerCamelCase__=1024 , lowerCamelCase__=768 , lowerCamelCase__=12 , lowerCamelCase__=12 , lowerCamelCase__=None , lowerCamelCase__="gelu_pytorch_tanh" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=1E-5 , lowerCamelCase__=0.02 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=5_0256 , lowerCamelCase__=5_0256 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , **lowerCamelCase__ , ) -> Optional[int]:
lowercase__ : Dict = vocab_size
lowercase__ : List[str] = n_positions
lowercase__ : Optional[int] = n_embd
lowercase__ : Optional[int] = n_layer
lowercase__ : List[str] = n_head
lowercase__ : str = n_inner
lowercase__ : str = activation_function
lowercase__ : List[str] = resid_pdrop
lowercase__ : Optional[Any] = embd_pdrop
lowercase__ : List[Any] = attn_pdrop
lowercase__ : Any = layer_norm_epsilon
lowercase__ : List[str] = initializer_range
lowercase__ : Tuple = scale_attn_weights
lowercase__ : Optional[int] = use_cache
lowercase__ : List[Any] = attention_softmax_in_fpaa
lowercase__ : Dict = scale_attention_softmax_in_fpaa
lowercase__ : Union[str, Any] = multi_query
lowercase__ : Optional[Any] = bos_token_id
lowercase__ : Union[str, Any] = eos_token_id
super().__init__(bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ ) | 200 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''Visual-Attention-Network/van-base''': (
'''https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'''
),
}
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = 'van'
def __init__( self : List[str] ,lowerCAmelCase__ : int=2_24 ,lowerCAmelCase__ : Optional[int]=3 ,lowerCAmelCase__ : Dict=[7, 3, 3, 3] ,lowerCAmelCase__ : List[str]=[4, 2, 2, 2] ,lowerCAmelCase__ : Union[str, Any]=[64, 1_28, 3_20, 5_12] ,lowerCAmelCase__ : Union[str, Any]=[3, 3, 12, 3] ,lowerCAmelCase__ : Any=[8, 8, 4, 4] ,lowerCAmelCase__ : Optional[int]="gelu" ,lowerCAmelCase__ : List[str]=0.02 ,lowerCAmelCase__ : Optional[Any]=1e-6 ,lowerCAmelCase__ : Dict=1e-2 ,lowerCAmelCase__ : Union[str, Any]=0.0 ,lowerCAmelCase__ : Optional[Any]=0.0 ,**lowerCAmelCase__ : List[str] ,) -> Tuple:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = image_size
lowerCAmelCase_ : List[str] = num_channels
lowerCAmelCase_ : str = patch_sizes
lowerCAmelCase_ : Optional[Any] = strides
lowerCAmelCase_ : List[Any] = hidden_sizes
lowerCAmelCase_ : int = depths
lowerCAmelCase_ : int = mlp_ratios
lowerCAmelCase_ : str = hidden_act
lowerCAmelCase_ : List[str] = initializer_range
lowerCAmelCase_ : Dict = layer_norm_eps
lowerCAmelCase_ : str = layer_scale_init_value
lowerCAmelCase_ : Tuple = drop_path_rate
lowerCAmelCase_ : Dict = dropout_rate
| 659 | 0 |
"""simple docstring"""
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
A_ = logging.get_logger(__name__)
def _UpperCamelCase ( A , A , A ):
UpperCamelCase_ =nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(snake_case__ ) == len(snake_case__ ), f"""{len(snake_case__ )} != {len(snake_case__ )}"""
dest_layers.load_state_dict(layers_to_copy.state_dict() )
A_ = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
A_ = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def _UpperCamelCase ( A , A ):
try:
UpperCamelCase_ =LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
f"""no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"""
f""" {n_student}""" )
return list(range(snake_case__ ) )
def _UpperCamelCase ( A , A ):
if n_student > n_teacher:
raise ValueError(f"""Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}""" )
elif n_teacher == n_student:
return list(range(snake_case__ ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def _UpperCamelCase ( A , A = "student" , A = None , A = None , A=False , A=None , A=None , **A , ):
UpperCamelCase_ ="encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."
assert (e is not None) or (d is not None), _msg
if isinstance(snake_case__ , snake_case__ ):
AutoTokenizer.from_pretrained(snake_case__ ).save_pretrained(snake_case__ ) # purely for convenience
UpperCamelCase_ =AutoModelForSeqaSeqLM.from_pretrained(snake_case__ ).eval()
else:
assert isinstance(snake_case__ , snake_case__ ), f"""teacher must be a model or string got type {type(snake_case__ )}"""
UpperCamelCase_ =teacher.config.to_diff_dict()
try:
UpperCamelCase_ =teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
UpperCamelCase_ =teacher_e
if d is None:
UpperCamelCase_ =teacher_d
init_kwargs.update({"encoder_layers": e, "decoder_layers": d} )
except AttributeError: # T5
if hasattr(teacher.config , "num_encoder_layers" ):
UpperCamelCase_ =teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
UpperCamelCase_ =teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
UpperCamelCase_ =teacher_e
if d is None:
UpperCamelCase_ =teacher_d
if hasattr(teacher.config , "num_encoder_layers" ):
init_kwargs.update({"num_encoder_layers": e, "num_decoder_layers": d} )
else:
init_kwargs.update({"num_layers": e, "num_decoder_layers": d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(snake_case__ )
# Copy weights
UpperCamelCase_ =teacher.config_class(**snake_case__ )
UpperCamelCase_ =AutoModelForSeqaSeqLM.from_config(snake_case__ )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
UpperCamelCase_ =student.load_state_dict(teacher.state_dict() , strict=snake_case__ )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
UpperCamelCase_ =list(range(snake_case__ ) ), list(range(snake_case__ ) )
logger.info(
f"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"""
f""" {save_path}""" )
student.save_pretrained(snake_case__ )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
UpperCamelCase_ =pick_layers_to_copy(snake_case__ , snake_case__ )
if d_layers_to_copy is None:
UpperCamelCase_ =pick_layers_to_copy(snake_case__ , snake_case__ )
try:
if hasattr(
snake_case__ , "prophetnet" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , snake_case__ )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , snake_case__ )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , snake_case__ )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , snake_case__ )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , snake_case__ )
copy_layers(teacher.decoder.block , student.decoder.block , snake_case__ )
logger.info(
f"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}""" )
UpperCamelCase_ ={
"teacher_type": teacher.config.model_type,
"copied_encoder_layers": e_layers_to_copy,
"copied_decoder_layers": d_layers_to_copy,
}
student.save_pretrained(snake_case__ )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 391 |
from math import factorial
def UpperCamelCase ( snake_case__ , snake_case__):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError("Please enter positive integers for n and k where n >= k")
return factorial(snake_case__) // (factorial(snake_case__) * factorial(n - k))
if __name__ == "__main__":
print(
'''The number of five-card hands possible from a standard''',
f"fifty-two card deck is: {combinations(52, 5)}\n",
)
print(
'''If a class of 40 students must be arranged into groups of''',
f"4 for group projects, there are {combinations(40, 4)} ways",
'''to arrange them.\n''',
)
print(
'''If 10 teams are competing in a Formula One race, there''',
f"are {combinations(10, 3)} ways that first, second and",
'''third place can be awarded.''',
)
| 659 | 0 |
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
UpperCAmelCase = WebClient(token=os.environ['CI_SLACK_BOT_TOKEN'])
def _snake_case ( _SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = test_results.split(""" """ )
lowerCAmelCase = 0
lowerCAmelCase = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
lowerCAmelCase = expressions[-2] if "=" in expressions[-1] else expressions[-1]
for i, expression in enumerate(snake_case__ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def _snake_case ( _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Any:
"""simple docstring"""
lowerCAmelCase = {}
lowerCAmelCase = None
lowerCAmelCase = False
for line in failures_short_lines.split("""\n""" ):
if re.search(R"""_ \[doctest\]""" , snake_case__ ):
lowerCAmelCase = True
lowerCAmelCase = line.split(""" """ )[2]
elif in_error and not line.split(""" """ )[0].isdigit():
lowerCAmelCase = line
lowerCAmelCase = False
return failures
class __snake_case:
'''simple docstring'''
def __init__( self , A_ , A_ ) -> List[str]:
lowerCAmelCase = title
lowerCAmelCase = doc_test_results["time_spent"].split(""",""" )[0]
lowerCAmelCase = doc_test_results["success"]
lowerCAmelCase = doc_test_results["failures"]
lowerCAmelCase = self.n_success + self.n_failures
# Failures and success of the modeling tests
lowerCAmelCase = doc_test_results
@property
def __snake_case ( self ) -> str:
lowerCAmelCase = [self._time_spent]
lowerCAmelCase = 0
for time in time_spent:
lowerCAmelCase = time.split(""":""" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(lowerCAmelCase__ ) == 1:
lowerCAmelCase = [0, 0, time_parts[0]]
lowerCAmelCase = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
lowerCAmelCase = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return f'{int(lowerCAmelCase__ )}h{int(lowerCAmelCase__ )}m{int(lowerCAmelCase__ )}s'
@property
def __snake_case ( self ) -> Dict:
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def __snake_case ( self ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def __snake_case ( self ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
f' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def __snake_case ( self ) -> Dict:
lowerCAmelCase = 40
lowerCAmelCase = {k: v["failed"] for k, v in doc_test_results.items() if isinstance(lowerCAmelCase__ , lowerCAmelCase__ )}
lowerCAmelCase = ""
for category, failures in category_failures.items():
if len(lowerCAmelCase__ ) == 0:
continue
if report != "":
report += "\n\n"
report += f'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(lowerCAmelCase__ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def __snake_case ( self ) -> str:
lowerCAmelCase = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(lowerCAmelCase__ )
@staticmethod
def __snake_case ( ) -> str:
lowerCAmelCase = [
{
"type": "section",
"text": {
"type": "plain_text",
"text": "There was an issue running the tests.",
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(lowerCAmelCase__ )} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text="""There was an issue running the tests.""" , blocks=lowerCAmelCase__ , )
def __snake_case ( self ) -> Tuple:
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(self.payload )} ) )
lowerCAmelCase = f'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else "All tests passed."
lowerCAmelCase = client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , blocks=self.payload , text=lowerCAmelCase__ , )
def __snake_case ( self , A_ , A_ , A_ , A_ ) -> Dict:
lowerCAmelCase = ""
for key, value in failures.items():
lowerCAmelCase = value[:200] + " [Truncated]" if len(lowerCAmelCase__ ) > 250 else value
failures_text += f'*{key}*\n_{value}_\n\n'
lowerCAmelCase = job_name
lowerCAmelCase = {"type": "section", "text": {"type": "mrkdwn", "text": text}}
if job_link is not None:
lowerCAmelCase = {
"type": "button",
"text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True},
"url": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def __snake_case ( self ) -> List[Any]:
if self.thread_ts is None:
raise ValueError("""Can only post reply if a post has been made.""" )
lowerCAmelCase = self.doc_test_results.pop("""job_link""" )
self.doc_test_results.pop("""failures""" )
self.doc_test_results.pop("""success""" )
self.doc_test_results.pop("""time_spent""" )
lowerCAmelCase = sorted(self.doc_test_results.items() , key=lambda A_ : t[0] )
for job, job_result in sorted_dict:
if len(job_result["""failures"""] ):
lowerCAmelCase = f'*Num failures* :{len(job_result["failed"] )} \n'
lowerCAmelCase = job_result["failures"]
lowerCAmelCase = self.get_reply_blocks(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , text=lowerCAmelCase__ )
print("""Sending the following reply""" )
print(json.dumps({"""blocks""": blocks} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text=f'Results for {job}' , blocks=lowerCAmelCase__ , thread_ts=self.thread_ts["""ts"""] , )
time.sleep(1 )
def _snake_case ( ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = os.environ["GITHUB_RUN_ID"]
lowerCAmelCase = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
lowerCAmelCase = requests.get(snake_case__ ).json()
lowerCAmelCase = {}
try:
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
lowerCAmelCase = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(snake_case__ ):
lowerCAmelCase = requests.get(url + f'&page={i + 2}' ).json()
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return jobs
except Exception as e:
print("""Unknown error, could not fetch links.""" , snake_case__ )
return {}
def _snake_case ( _SCREAMING_SNAKE_CASE : Any ) -> Dict:
"""simple docstring"""
lowerCAmelCase = {}
if os.path.exists(snake_case__ ):
lowerCAmelCase = os.listdir(snake_case__ )
for file in files:
try:
with open(os.path.join(snake_case__ , snake_case__ ) , encoding="""utf-8""" ) as f:
lowerCAmelCase = f.read()
except UnicodeDecodeError as e:
raise ValueError(f'Could not open {os.path.join(snake_case__ , snake_case__ )}.' ) from e
return _artifact
def _snake_case ( ) -> Optional[int]:
"""simple docstring"""
class __snake_case:
'''simple docstring'''
def __init__( self , A_ ) -> Union[str, Any]:
lowerCAmelCase = name
lowerCAmelCase = []
def __str__( self ) -> Dict:
return self.name
def __snake_case ( self , A_ ) -> Dict:
self.paths.append({"""name""": self.name, """path""": path} )
lowerCAmelCase = {}
lowerCAmelCase = filter(os.path.isdir , os.listdir() )
for directory in directories:
lowerCAmelCase = directory
if artifact_name not in _available_artifacts:
lowerCAmelCase = Artifact(snake_case__ )
_available_artifacts[artifact_name].add_path(snake_case__ )
return _available_artifacts
if __name__ == "__main__":
UpperCAmelCase = get_job_links()
UpperCAmelCase = retrieve_available_artifacts()
UpperCAmelCase = collections.OrderedDict(
[
('*.py', 'API Examples'),
('*.md', 'MD Examples'),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
UpperCAmelCase = {
v: {
'failed': [],
'failures': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
UpperCAmelCase = github_actions_job_links.get('run_doctests')
UpperCAmelCase = available_artifacts['doc_tests_gpu_test_reports'].paths[0]
UpperCAmelCase = retrieve_artifact(artifact_path['name'])
if "stats" in artifact:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = handle_test_results(artifact['stats'])
UpperCAmelCase = failed
UpperCAmelCase = success
UpperCAmelCase = time_spent[1:-1] + ', '
UpperCAmelCase = extract_first_line_failure(artifact['failures_short'])
for line in artifact["summary_short"].split('\n'):
if re.search('FAILED', line):
UpperCAmelCase = line.replace('FAILED ', '')
UpperCAmelCase = line.split()[0].replace('\n', '')
if "::" in line:
UpperCAmelCase , UpperCAmelCase = line.split('::')
else:
UpperCAmelCase , UpperCAmelCase = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
UpperCAmelCase = docs[file_regex]
doc_test_results[category]["failed"].append(test)
UpperCAmelCase = all_failures[test] if test in all_failures else 'N/A'
UpperCAmelCase = failure
break
UpperCAmelCase = Message('🤗 Results of the doc tests.', doc_test_results)
message.post()
message.post_reply() | 433 |
import argparse
import json
from tqdm import tqdm
def UpperCamelCase ( ):
lowerCAmelCase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src_path" , type=snake_case__ , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , )
parser.add_argument(
"--evaluation_set" , type=snake_case__ , help="where to store parsed evaluation_set file" , )
parser.add_argument(
"--gold_data_path" , type=snake_case__ , help="where to store parsed gold_data_path file" , )
lowerCAmelCase_ : Dict = parser.parse_args()
with open(args.src_path , "r") as src_file, open(args.evaluation_set , "w") as eval_file, open(
args.gold_data_path , "w") as gold_file:
lowerCAmelCase_ : Optional[int] = json.load(snake_case__)
for dpr_record in tqdm(snake_case__):
lowerCAmelCase_ : str = dpr_record["question"]
lowerCAmelCase_ : Dict = [context["title"] for context in dpr_record["positive_ctxs"]]
eval_file.write(question + "\n")
gold_file.write("\t".join(snake_case__) + "\n")
if __name__ == "__main__":
main()
| 659 | 0 |
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def UpperCAmelCase ( _lowercase : Any , _lowercase : List[str] , _lowercase : Any ) -> List[str]:
"""simple docstring"""
lowerCAmelCase_ = OmegaConf.load(snake_case__ )
lowerCAmelCase_ = torch.load(snake_case__ , map_location='''cpu''' )["model"]
lowerCAmelCase_ = list(state_dict.keys() )
# extract state_dict for VQVAE
lowerCAmelCase_ = {}
lowerCAmelCase_ = "first_stage_model."
for key in keys:
if key.startswith(snake_case__ ):
lowerCAmelCase_ = state_dict[key]
# extract state_dict for UNetLDM
lowerCAmelCase_ = {}
lowerCAmelCase_ = "model.diffusion_model."
for key in keys:
if key.startswith(snake_case__ ):
lowerCAmelCase_ = state_dict[key]
lowerCAmelCase_ = config.model.params.first_stage_config.params
lowerCAmelCase_ = config.model.params.unet_config.params
lowerCAmelCase_ = VQModel(**snake_case__ ).eval()
vqvae.load_state_dict(snake_case__ )
lowerCAmelCase_ = UNetLDMModel(**snake_case__ ).eval()
unet.load_state_dict(snake_case__ )
lowerCAmelCase_ = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=snake_case__ , )
lowerCAmelCase_ = LDMPipeline(snake_case__ , snake_case__ , snake_case__ )
pipeline.save_pretrained(snake_case__ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', type=str, required=True)
parser.add_argument('--config_path', type=str, required=True)
parser.add_argument('--output_path', type=str, required=True)
lowercase_ = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path) | 552 |
from collections.abc import Sequence
def UpperCamelCase ( snake_case__ = None):
if nums is None or not nums:
raise ValueError("Input sequence should not be empty")
lowerCAmelCase_ : Dict = nums[0]
for i in range(1 , len(snake_case__)):
lowerCAmelCase_ : Optional[int] = nums[i]
lowerCAmelCase_ : Optional[int] = max(snake_case__ , ans + num , snake_case__)
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
_lowercase = int(input('''Enter number of elements : ''').strip())
_lowercase = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array))
| 659 | 0 |
"""simple docstring"""
import os
from typing import Dict, List, Tuple, TypeVar, Union
__A = TypeVar("""T""")
__A = Union[List[T], Tuple[T, ...]]
__A = Union[T, List[T], Dict[str, T]]
__A = Union[str, bytes, os.PathLike]
| 93 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
_lowercase = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 659 | 0 |
"""simple docstring"""
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class UpperCamelCase__ ( snake_case__):
"""simple docstring"""
__UpperCAmelCase = (DPMSolverSDEScheduler,)
__UpperCAmelCase = 10
def a__ ( self : Optional[Any] , **UpperCamelCase_ : Tuple ):
'''simple docstring'''
__magic_name__ = {
"num_train_timesteps": 1_1_0_0,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"noise_sampler_seed": 0,
}
config.update(**lowerCAmelCase__ )
return config
def a__ ( self : List[Any] ):
'''simple docstring'''
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase__ )
def a__ ( self : str ):
'''simple docstring'''
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCAmelCase__ , beta_end=lowerCAmelCase__ )
def a__ ( self : List[str] ):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCAmelCase__ )
def a__ ( self : List[Any] ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase__ )
def a__ ( self : Optional[Any] ):
'''simple docstring'''
__magic_name__ = self.scheduler_classes[0]
__magic_name__ = self.get_scheduler_config()
__magic_name__ = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
__magic_name__ = self.dummy_model()
__magic_name__ = self.dummy_sample_deter * scheduler.init_noise_sigma
__magic_name__ = sample.to(lowerCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
__magic_name__ = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ = model(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ = output.prev_sample
__magic_name__ = torch.sum(torch.abs(lowerCAmelCase__ ) )
__magic_name__ = torch.mean(torch.abs(lowerCAmelCase__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_821_044_921_875 ) < 1e-2
assert abs(result_mean.item() - 0.2_178_705_964_565_277 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_352_111_816_406 ) < 1e-2
assert abs(result_mean.item() - 0.22_342_906_892_299_652 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1e-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1e-3
def a__ ( self : str ):
'''simple docstring'''
__magic_name__ = self.scheduler_classes[0]
__magic_name__ = self.get_scheduler_config(prediction_type='v_prediction' )
__magic_name__ = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
__magic_name__ = self.dummy_model()
__magic_name__ = self.dummy_sample_deter * scheduler.init_noise_sigma
__magic_name__ = sample.to(lowerCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
__magic_name__ = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ = model(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ = output.prev_sample
__magic_name__ = torch.sum(torch.abs(lowerCAmelCase__ ) )
__magic_name__ = torch.mean(torch.abs(lowerCAmelCase__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_149_200_439_453 ) < 1e-2
assert abs(result_mean.item() - 0.16_226_289_014_816_284 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_663_360_595_703 ) < 1e-2
assert abs(result_mean.item() - 0.16_688_326_001_167_297 ) < 1e-3
else:
assert abs(result_sum.item() - 119.8_487_548_828_125 ) < 1e-2
assert abs(result_mean.item() - 0.1_560_530_662_536_621 ) < 1e-3
def a__ ( self : int ):
'''simple docstring'''
__magic_name__ = self.scheduler_classes[0]
__magic_name__ = self.get_scheduler_config()
__magic_name__ = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase__ )
__magic_name__ = self.dummy_model()
__magic_name__ = self.dummy_sample_deter.to(lowerCAmelCase__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
__magic_name__ = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ = model(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ = output.prev_sample
__magic_name__ = torch.sum(torch.abs(lowerCAmelCase__ ) )
__magic_name__ = torch.mean(torch.abs(lowerCAmelCase__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_957_397_460_938 ) < 1e-2
assert abs(result_mean.item() - 0.21_805_934_607_982_635 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_353_637_695_312 ) < 1e-2
assert abs(result_mean.item() - 0.22_342_908_382_415_771 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1e-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1e-3
def a__ ( self : Tuple ):
'''simple docstring'''
__magic_name__ = self.scheduler_classes[0]
__magic_name__ = self.get_scheduler_config()
__magic_name__ = scheduler_class(**lowerCAmelCase__ , use_karras_sigmas=lowerCAmelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase__ )
__magic_name__ = self.dummy_model()
__magic_name__ = self.dummy_sample_deter.to(lowerCAmelCase__ ) * scheduler.init_noise_sigma
__magic_name__ = sample.to(lowerCAmelCase__ )
for t in scheduler.timesteps:
__magic_name__ = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ = model(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ = output.prev_sample
__magic_name__ = torch.sum(torch.abs(lowerCAmelCase__ ) )
__magic_name__ = torch.mean(torch.abs(lowerCAmelCase__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_974_135_742_188 ) < 1e-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_653_564_453_125 ) < 1e-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1e-2
else:
assert abs(result_sum.item() - 170.3_135_223_388_672 ) < 1e-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1e-2 | 545 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_lowercase = '''src/diffusers'''
_lowercase = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
_lowercase = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
_lowercase = spec.loader.load_module()
def UpperCamelCase ( snake_case__ , snake_case__):
return line.startswith(snake_case__) or len(snake_case__) <= 1 or re.search(R"^\s*\)(\s*->.*:|:)\s*$" , snake_case__) is not None
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Tuple = object_name.split(".")
lowerCAmelCase_ : Union[str, Any] = 0
# First let's find the module where our object lives.
lowerCAmelCase_ : Union[str, Any] = parts[i]
while i < len(snake_case__) and not os.path.isfile(os.path.join(snake_case__ , F'''{module}.py''')):
i += 1
if i < len(snake_case__):
lowerCAmelCase_ : Dict = os.path.join(snake_case__ , parts[i])
if i >= len(snake_case__):
raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''')
with open(os.path.join(snake_case__ , F'''{module}.py''') , "r" , encoding="utf-8" , newline="\n") as f:
lowerCAmelCase_ : Optional[Any] = f.readlines()
# Now let's find the class / func in the code!
lowerCAmelCase_ : Union[str, Any] = ""
lowerCAmelCase_ : int = 0
for name in parts[i + 1 :]:
while (
line_index < len(snake_case__) and re.search(RF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index]) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(snake_case__):
raise ValueError(F''' {object_name} does not match any function or class in {module}.''')
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
lowerCAmelCase_ : Union[str, Any] = line_index
while line_index < len(snake_case__) and _should_continue(lines[line_index] , snake_case__):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1]) <= 1:
line_index -= 1
lowerCAmelCase_ : List[str] = lines[start_index:line_index]
return "".join(snake_case__)
_lowercase = re.compile(r'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
_lowercase = re.compile(r'''^\s*(\S+)->(\S+)(\s+.*|$)''')
_lowercase = re.compile(r'''<FILL\s+[^>]*>''')
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Any = code.split("\n")
lowerCAmelCase_ : Any = 0
while idx < len(snake_case__) and len(lines[idx]) == 0:
idx += 1
if idx < len(snake_case__):
return re.search(R"^(\s*)\S" , lines[idx]).groups()[0]
return ""
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Dict = len(get_indent(snake_case__)) > 0
if has_indent:
lowerCAmelCase_ : Dict = F'''class Bla:\n{code}'''
lowerCAmelCase_ : Optional[int] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 , preview=snake_case__)
lowerCAmelCase_ : Optional[Any] = black.format_str(snake_case__ , mode=snake_case__)
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = style_docstrings_in_code(snake_case__)
return result[len("class Bla:\n") :] if has_indent else result
def UpperCamelCase ( snake_case__ , snake_case__=False):
with open(snake_case__ , "r" , encoding="utf-8" , newline="\n") as f:
lowerCAmelCase_ : Tuple = f.readlines()
lowerCAmelCase_ : Tuple = []
lowerCAmelCase_ : Union[str, Any] = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(snake_case__):
lowerCAmelCase_ : Optional[int] = _re_copy_warning.search(lines[line_index])
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = search.groups()
lowerCAmelCase_ : int = find_code_in_diffusers(snake_case__)
lowerCAmelCase_ : Dict = get_indent(snake_case__)
lowerCAmelCase_ : Union[str, Any] = line_index + 1 if indent == theoretical_indent else line_index + 2
lowerCAmelCase_ : str = theoretical_indent
lowerCAmelCase_ : Union[str, Any] = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
lowerCAmelCase_ : Optional[int] = True
while line_index < len(snake_case__) and should_continue:
line_index += 1
if line_index >= len(snake_case__):
break
lowerCAmelCase_ : Dict = lines[line_index]
lowerCAmelCase_ : List[str] = _should_continue(snake_case__ , snake_case__) and re.search(F'''^{indent}# End copy''' , snake_case__) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1]) <= 1:
line_index -= 1
lowerCAmelCase_ : Dict = lines[start_index:line_index]
lowerCAmelCase_ : Optional[int] = "".join(snake_case__)
# Remove any nested `Copied from` comments to avoid circular copies
lowerCAmelCase_ : List[Any] = [line for line in theoretical_code.split("\n") if _re_copy_warning.search(snake_case__) is None]
lowerCAmelCase_ : Optional[Any] = "\n".join(snake_case__)
# Before comparing, use the `replace_pattern` on the original code.
if len(snake_case__) > 0:
lowerCAmelCase_ : List[str] = replace_pattern.replace("with" , "").split(",")
lowerCAmelCase_ : Tuple = [_re_replace_pattern.search(snake_case__) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = pattern.groups()
lowerCAmelCase_ : int = re.sub(snake_case__ , snake_case__ , snake_case__)
if option.strip() == "all-casing":
lowerCAmelCase_ : List[str] = re.sub(obja.lower() , obja.lower() , snake_case__)
lowerCAmelCase_ : int = re.sub(obja.upper() , obja.upper() , snake_case__)
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
lowerCAmelCase_ : List[Any] = blackify(lines[start_index - 1] + theoretical_code)
lowerCAmelCase_ : Union[str, Any] = theoretical_code[len(lines[start_index - 1]) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index])
if overwrite:
lowerCAmelCase_ : List[Any] = lines[:start_index] + [theoretical_code] + lines[line_index:]
lowerCAmelCase_ : Union[str, Any] = start_index + 1
if overwrite and len(snake_case__) > 0:
# Warn the user a file has been modified.
print(F'''Detected changes, rewriting {filename}.''')
with open(snake_case__ , "w" , encoding="utf-8" , newline="\n") as f:
f.writelines(snake_case__)
return diffs
def UpperCamelCase ( snake_case__ = False):
lowerCAmelCase_ : Tuple = glob.glob(os.path.join(snake_case__ , "**/*.py") , recursive=snake_case__)
lowerCAmelCase_ : int = []
for filename in all_files:
lowerCAmelCase_ : Union[str, Any] = is_copy_consistent(snake_case__ , snake_case__)
diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(snake_case__) > 0:
lowerCAmelCase_ : Optional[Any] = "\n".join(snake_case__)
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.")
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
_lowercase = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 659 | 0 |
'''simple docstring'''
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
_A = [
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"""
""" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"""
""" depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.""",
"""The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"""
""" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s"""
""" founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the"""
""" body.""",
"""Amnesty International releases its annual report on the death penalty. The report catalogs the use of"""
""" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"""
""" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"""
""" punishment.""",
]
_A = [
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."""
""" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"""
""" had informed his Lufthansa training school of an episode of severe depression, airline says .""",
"""Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."""
""" Israel and the United States opposed the move, which could open the door to war crimes investigations against"""
""" Israelis .""",
"""Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"""
""" death . Organization claims that governments around the world are using the threat of terrorism to advance"""
""" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"""
""" sentences up by 28% .""",
]
def A_ ( ) -> Any:
__SCREAMING_SNAKE_CASE : Any = calculate_rouge(snake_case__ , snake_case__ , bootstrap_aggregation=snake_case__ , rouge_keys=['''rouge2''', '''rougeL'''] )
assert isinstance(snake_case__ , snake_case__ )
__SCREAMING_SNAKE_CASE : str = calculate_rouge(snake_case__ , snake_case__ , bootstrap_aggregation=snake_case__ , rouge_keys=['''rouge2'''] )
assert (
pd.DataFrame(no_aggregation['''rouge2'''] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra['''rouge2'''] ).fmeasure.mean()
)
def A_ ( ) -> int:
__SCREAMING_SNAKE_CASE : str = "rougeLsum"
__SCREAMING_SNAKE_CASE : Any = calculate_rouge(snake_case__ , snake_case__ , newline_sep=snake_case__ , rouge_keys=[k] )[k]
__SCREAMING_SNAKE_CASE : List[Any] = calculate_rouge(snake_case__ , snake_case__ , newline_sep=snake_case__ , rouge_keys=[k] )[k]
assert score > score_no_sep
def A_ ( ) -> Tuple:
__SCREAMING_SNAKE_CASE : int = ["rouge1", "rouge2", "rougeL"]
__SCREAMING_SNAKE_CASE : List[Any] = calculate_rouge(snake_case__ , snake_case__ , newline_sep=snake_case__ , rouge_keys=snake_case__ )
__SCREAMING_SNAKE_CASE : List[Any] = calculate_rouge(snake_case__ , snake_case__ , newline_sep=snake_case__ , rouge_keys=snake_case__ )
assert score_sep == score_no_sep
def A_ ( ) -> str:
__SCREAMING_SNAKE_CASE : List[str] = [
"Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.",
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .",
]
__SCREAMING_SNAKE_CASE : Dict = [
"Margot Frank, died in 1945, a month earlier than previously thought.",
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"
" the final seconds on board Flight 9525.",
]
assert calculate_rouge(snake_case__ , snake_case__ , newline_sep=snake_case__ ) == calculate_rouge(snake_case__ , snake_case__ , newline_sep=snake_case__ )
def A_ ( ) -> str:
__SCREAMING_SNAKE_CASE : Optional[int] = [
"\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" "
]
__SCREAMING_SNAKE_CASE : Any = [
" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."
]
__SCREAMING_SNAKE_CASE : Any = calculate_rouge(snake_case__ , snake_case__ , rouge_keys=['''rougeLsum'''] , newline_sep=snake_case__ )["rougeLsum"]
__SCREAMING_SNAKE_CASE : Any = calculate_rouge(snake_case__ , snake_case__ , rouge_keys=['''rougeLsum'''] )["rougeLsum"]
assert new_score > prev_score
def A_ ( ) -> Tuple:
__SCREAMING_SNAKE_CASE : int = Path('''examples/seq2seq/test_data/wmt_en_ro''' )
__SCREAMING_SNAKE_CASE : Dict = calculate_rouge_path(data_dir.joinpath('''test.source''' ) , data_dir.joinpath('''test.target''' ) )
assert isinstance(snake_case__ , snake_case__ )
__SCREAMING_SNAKE_CASE : Any = calculate_rouge_path(
data_dir.joinpath('''test.source''' ) , data_dir.joinpath('''test.target''' ) , bootstrap_aggregation=snake_case__ )
assert isinstance(snake_case__ , snake_case__ )
| 158 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''microsoft/swinv2-tiny-patch4-window8-256''': (
'''https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'''
),
}
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = 'swinv2'
UpperCamelCase_ = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : List[Any] ,lowerCAmelCase__ : Optional[int]=2_24 ,lowerCAmelCase__ : Dict=4 ,lowerCAmelCase__ : Dict=3 ,lowerCAmelCase__ : List[Any]=96 ,lowerCAmelCase__ : Optional[Any]=[2, 2, 6, 2] ,lowerCAmelCase__ : Optional[Any]=[3, 6, 12, 24] ,lowerCAmelCase__ : Optional[int]=7 ,lowerCAmelCase__ : Dict=4.0 ,lowerCAmelCase__ : Dict=True ,lowerCAmelCase__ : str=0.0 ,lowerCAmelCase__ : Tuple=0.0 ,lowerCAmelCase__ : str=0.1 ,lowerCAmelCase__ : List[str]="gelu" ,lowerCAmelCase__ : Union[str, Any]=False ,lowerCAmelCase__ : Dict=0.02 ,lowerCAmelCase__ : int=1e-5 ,lowerCAmelCase__ : List[str]=32 ,**lowerCAmelCase__ : Tuple ,) -> List[str]:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = image_size
lowerCAmelCase_ : List[Any] = patch_size
lowerCAmelCase_ : Dict = num_channels
lowerCAmelCase_ : Optional[int] = embed_dim
lowerCAmelCase_ : Optional[Any] = depths
lowerCAmelCase_ : Any = len(lowerCAmelCase__ )
lowerCAmelCase_ : str = num_heads
lowerCAmelCase_ : List[str] = window_size
lowerCAmelCase_ : List[str] = mlp_ratio
lowerCAmelCase_ : Dict = qkv_bias
lowerCAmelCase_ : str = hidden_dropout_prob
lowerCAmelCase_ : str = attention_probs_dropout_prob
lowerCAmelCase_ : Union[str, Any] = drop_path_rate
lowerCAmelCase_ : List[Any] = hidden_act
lowerCAmelCase_ : Any = use_absolute_embeddings
lowerCAmelCase_ : List[str] = layer_norm_eps
lowerCAmelCase_ : int = initializer_range
lowerCAmelCase_ : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase_ : Tuple = int(embed_dim * 2 ** (len(lowerCAmelCase__ ) - 1) )
lowerCAmelCase_ : str = (0, 0, 0, 0)
| 659 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Union[str, Any] = {
'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
snake_case__ : Optional[Any] = '''lxmert'''
snake_case__ : Any = {}
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str]=3_0_5_2_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=7_6_8 , SCREAMING_SNAKE_CASE__ : Dict=1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=9_5_0_0 , SCREAMING_SNAKE_CASE__ : Optional[int]=1_6_0_0 , SCREAMING_SNAKE_CASE__ : Any=4_0_0 , SCREAMING_SNAKE_CASE__ : List[Any]=3_0_7_2 , SCREAMING_SNAKE_CASE__ : Optional[int]="gelu" , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : int=5_1_2 , SCREAMING_SNAKE_CASE__ : Dict=2 , SCREAMING_SNAKE_CASE__ : Tuple=0.02 , SCREAMING_SNAKE_CASE__ : Any=1E-12 , SCREAMING_SNAKE_CASE__ : List[Any]=9 , SCREAMING_SNAKE_CASE__ : Optional[Any]=5 , SCREAMING_SNAKE_CASE__ : Tuple=5 , SCREAMING_SNAKE_CASE__ : Dict=2_0_4_8 , SCREAMING_SNAKE_CASE__ : Any=4 , SCREAMING_SNAKE_CASE__ : str=6.67 , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Tuple=True , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> Tuple:
a_ : Tuple = vocab_size
a_ : Optional[Any] = hidden_size
a_ : int = num_attention_heads
a_ : List[str] = hidden_act
a_ : str = intermediate_size
a_ : Tuple = hidden_dropout_prob
a_ : Optional[Any] = attention_probs_dropout_prob
a_ : Optional[int] = max_position_embeddings
a_ : List[Any] = type_vocab_size
a_ : List[str] = initializer_range
a_ : Tuple = layer_norm_eps
a_ : Tuple = num_qa_labels
a_ : Tuple = num_object_labels
a_ : Any = num_attr_labels
a_ : List[Any] = l_layers
a_ : List[Any] = x_layers
a_ : str = r_layers
a_ : List[Any] = visual_feat_dim
a_ : int = visual_pos_dim
a_ : int = visual_loss_normalizer
a_ : Optional[Any] = task_matched
a_ : int = task_mask_lm
a_ : Union[str, Any] = task_obj_predict
a_ : List[Any] = task_qa
a_ : Dict = visual_obj_loss
a_ : List[Any] = visual_attr_loss
a_ : str = visual_feat_loss
a_ : str = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers}
super().__init__(**lowerCAmelCase__ )
| 570 |
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
_lowercase = logging.get_logger(__name__)
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = ['input_features', 'attention_mask']
def __init__( self : Optional[Any] ,lowerCAmelCase__ : Any=80 ,lowerCAmelCase__ : Optional[Any]=1_60_00 ,lowerCAmelCase__ : List[str]=0.0 ,lowerCAmelCase__ : Tuple=10 ,lowerCAmelCase__ : Optional[Any]=25 ,lowerCAmelCase__ : Any="hamming_window" ,lowerCAmelCase__ : List[str]=32_768.0 ,lowerCAmelCase__ : Union[str, Any]=0.97 ,lowerCAmelCase__ : Any=1.0 ,lowerCAmelCase__ : str=True ,lowerCAmelCase__ : int=True ,lowerCAmelCase__ : Tuple=False ,**lowerCAmelCase__ : Optional[int] ,) -> Optional[Any]:
'''simple docstring'''
super().__init__(feature_size=lowerCAmelCase__ ,sampling_rate=lowerCAmelCase__ ,padding_value=lowerCAmelCase__ ,**lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = feature_size
lowerCAmelCase_ : List[Any] = sampling_rate
lowerCAmelCase_ : Union[str, Any] = padding_value
lowerCAmelCase_ : str = hop_length
lowerCAmelCase_ : str = win_length
lowerCAmelCase_ : str = frame_signal_scale
lowerCAmelCase_ : Any = preemphasis_coeff
lowerCAmelCase_ : Optional[Any] = mel_floor
lowerCAmelCase_ : List[str] = normalize_means
lowerCAmelCase_ : Optional[Any] = normalize_vars
lowerCAmelCase_ : Dict = win_function
lowerCAmelCase_ : List[Any] = return_attention_mask
lowerCAmelCase_ : Tuple = win_length * sampling_rate // 10_00
lowerCAmelCase_ : str = hop_length * sampling_rate // 10_00
lowerCAmelCase_ : Dict = optimal_fft_length(self.sample_size )
lowerCAmelCase_ : Optional[int] = (self.n_fft // 2) + 1
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : np.array ) -> np.ndarray:
'''simple docstring'''
if self.win_function == "hamming_window":
lowerCAmelCase_ : int = window_function(window_length=self.sample_size ,name=self.win_function ,periodic=lowerCAmelCase__ )
else:
lowerCAmelCase_ : Tuple = window_function(window_length=self.sample_size ,name=self.win_function )
lowerCAmelCase_ : List[str] = mel_filter_bank(
num_frequency_bins=self.n_freqs ,num_mel_filters=self.feature_size ,min_frequency=0.0 ,max_frequency=self.sampling_rate / 2.0 ,sampling_rate=self.sampling_rate ,)
lowerCAmelCase_ : Any = spectrogram(
one_waveform * self.frame_signal_scale ,window=lowerCAmelCase__ ,frame_length=self.sample_size ,hop_length=self.sample_stride ,fft_length=self.n_fft ,center=lowerCAmelCase__ ,preemphasis=self.preemphasis_coeff ,mel_filters=lowerCAmelCase__ ,mel_floor=self.mel_floor ,log_mel="log" ,)
return msfc_features.T
def UpperCAmelCase_ ( self : int ,lowerCAmelCase__ : List[Any] ,lowerCAmelCase__ : Optional[Any] ,lowerCAmelCase__ : Tuple ) -> Optional[Any]:
'''simple docstring'''
if self.normalize_means:
lowerCAmelCase_ : Optional[int] = x[:input_length].mean(axis=0 )
lowerCAmelCase_ : List[str] = np.subtract(lowerCAmelCase__ ,lowerCAmelCase__ )
if self.normalize_vars:
lowerCAmelCase_ : Optional[Any] = x[:input_length].std(axis=0 )
lowerCAmelCase_ : Tuple = np.divide(lowerCAmelCase__ ,lowerCAmelCase__ )
if input_length < x.shape[0]:
lowerCAmelCase_ : int = padding_value
# make sure array is in float32
lowerCAmelCase_ : Any = x.astype(np.floataa )
return x
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : List[np.ndarray] ,lowerCAmelCase__ : Optional[np.ndarray] = None ) -> List[np.ndarray]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(lowerCAmelCase__ ,lowerCAmelCase__ ,self.padding_value ) for x, n in zip(lowerCAmelCase__ ,lowerCAmelCase__ )]
def __call__( self : int ,lowerCAmelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,lowerCAmelCase__ : Union[bool, str, PaddingStrategy] = False ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : bool = False ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : Optional[bool] = None ,lowerCAmelCase__ : Optional[Union[str, TensorType]] = None ,lowerCAmelCase__ : Optional[int] = None ,**lowerCAmelCase__ : Union[str, Any] ,) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowerCAmelCase_ : List[Any] = isinstance(lowerCAmelCase__ ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
lowerCAmelCase_ : str = is_batched_numpy or (
isinstance(lowerCAmelCase__ ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase_ : Tuple = [np.asarray(lowerCAmelCase__ ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase__ ,np.ndarray ):
lowerCAmelCase_ : int = np.asarray(lowerCAmelCase__ ,dtype=np.floataa )
elif isinstance(lowerCAmelCase__ ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase_ : Union[str, Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase_ : Optional[int] = [raw_speech]
# extract fbank features
lowerCAmelCase_ : Dict = [self._extract_mfsc_features(lowerCAmelCase__ ) for one_waveform in raw_speech]
# convert into correct format for padding
lowerCAmelCase_ : int = BatchFeature({"input_features": features} )
lowerCAmelCase_ : Union[str, Any] = self.pad(
lowerCAmelCase__ ,padding=lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,truncation=lowerCAmelCase__ ,pad_to_multiple_of=lowerCAmelCase__ ,return_attention_mask=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
# make sure list is in array format
lowerCAmelCase_ : Optional[Any] = padded_inputs.get("input_features" )
if isinstance(input_features[0] ,lowerCAmelCase__ ):
lowerCAmelCase_ : Optional[int] = [np.asarray(lowerCAmelCase__ ,dtype=np.floataa ) for feature in input_features]
lowerCAmelCase_ : List[Any] = padded_inputs.get("attention_mask" )
if attention_mask is not None:
lowerCAmelCase_ : Dict = [np.asarray(lowerCAmelCase__ ,dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
lowerCAmelCase_ : Dict = (
np.array(lowerCAmelCase__ ,dtype=np.intaa )
if self._get_padding_strategies(lowerCAmelCase__ ,max_length=lowerCAmelCase__ ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
lowerCAmelCase_ : List[str] = self.normalize(
padded_inputs["input_features"] ,attention_mask=lowerCAmelCase__ )
if return_tensors is not None:
lowerCAmelCase_ : Dict = padded_inputs.convert_to_tensors(lowerCAmelCase__ )
return padded_inputs
| 659 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.