code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
def __snake_case ( _UpperCamelCase ) -> int:
if not numbers:
return 0
if not isinstance(_UpperCamelCase , (list, tuple) ) or not all(
isinstance(_UpperCamelCase , _UpperCamelCase ) for number in numbers ):
raise ValueError('''numbers must be an iterable of integers''' )
_a = _a = _a = numbers[0]
for i in range(1 , len(_UpperCamelCase ) ):
# update the maximum and minimum subarray products
_a = numbers[i]
if number < 0:
_a , _a = min_till_now, max_till_now
_a = max(_UpperCamelCase , max_till_now * number )
_a = min(_UpperCamelCase , min_till_now * number )
# update the maximum product found till now
_a = max(_UpperCamelCase , _UpperCamelCase )
return max_prod
| 487
|
lowerCamelCase :Optional[Any] = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
def __snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> list[str]:
_a = set()
# keep track of all the paths to be checked
_a = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
_a = queue.pop(0 )
# get the last node from the path
_a = path[-1]
if node not in explored:
_a = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
_a = list(_UpperCamelCase )
new_path.append(_UpperCamelCase )
queue.append(_UpperCamelCase )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(_UpperCamelCase )
# in case there's no path between the 2 nodes
return []
def __snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
_a = [start]
_a = set(_UpperCamelCase )
# Keep tab on distances from `start` node.
_a = {start: 0, target: -1}
while queue:
_a = queue.pop(0 )
if node == target:
_a = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(_UpperCamelCase )
queue.append(_UpperCamelCase )
_a = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4
| 487
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case = {
"configuration_poolformer": [
"POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"PoolFormerConfig",
"PoolFormerOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ["PoolFormerFeatureExtractor"]
snake_case = ["PoolFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PoolFormerForImageClassification",
"PoolFormerModel",
"PoolFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 700
|
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _A ( self : List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self : str ):
SCREAMING_SNAKE_CASE : List[str] = StableDiffusionKDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
SCREAMING_SNAKE_CASE : int = sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
sd_pipe.set_scheduler("sample_euler" )
SCREAMING_SNAKE_CASE : str = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = sd_pipe([prompt] , generator=UpperCAmelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
SCREAMING_SNAKE_CASE : List[str] = output.images
SCREAMING_SNAKE_CASE : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array([0.0_447, 0.0_492, 0.0_468, 0.0_408, 0.0_383, 0.0_408, 0.0_354, 0.0_380, 0.0_339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE : str = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
SCREAMING_SNAKE_CASE : str = sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
sd_pipe.set_scheduler("sample_euler" )
SCREAMING_SNAKE_CASE : List[str] = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe([prompt] , generator=UpperCAmelCase_ , guidance_scale=9.0 , num_inference_steps=20 , output_type="np" )
SCREAMING_SNAKE_CASE : Optional[int] = output.images
SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : int = np.array([0.1_237, 0.1_320, 0.1_438, 0.1_359, 0.1_390, 0.1_132, 0.1_277, 0.1_175, 0.1_112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : List[str] = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
SCREAMING_SNAKE_CASE : Any = sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
sd_pipe.set_scheduler("sample_dpmpp_2m" )
SCREAMING_SNAKE_CASE : List[str] = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = sd_pipe(
[prompt] , generator=UpperCAmelCase_ , guidance_scale=7.5 , num_inference_steps=15 , output_type="np" , use_karras_sigmas=UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : Optional[Any] = output.images
SCREAMING_SNAKE_CASE : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(
[0.11_381_689, 0.12_112_921, 0.1_389_457, 0.12_549_606, 0.1_244_964, 0.10_831_517, 0.11_562_866, 0.10_867_816, 0.10_499_048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 488
| 0
|
"""simple docstring"""
def _snake_case ( __snake_case : int = 10**9 ):
"""simple docstring"""
_lowerCamelCase : Any = 1
_lowerCamelCase : int = 2
_lowerCamelCase : Tuple = 0
_lowerCamelCase : Any = 0
_lowerCamelCase : Optional[int] = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
_lowerCamelCase : str = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'''{solution() = }''')
| 88
|
"""simple docstring"""
from __future__ import annotations
import queue
class lowercase__ :
def __init__( self , SCREAMING_SNAKE_CASE) -> int:
_lowerCamelCase : int = data
_lowerCamelCase : List[str] = None
_lowerCamelCase : Any = None
def _snake_case ( ):
"""simple docstring"""
print("""\n********Press N to stop entering at any point of time********\n""" )
_lowerCamelCase : Optional[int] = input("""Enter the value of the root node: """ ).strip().lower()
_lowerCamelCase : queue.Queue = queue.Queue()
_lowerCamelCase : Optional[int] = TreeNode(int(__snake_case ) )
q.put(__snake_case )
while not q.empty():
_lowerCamelCase : Tuple = q.get()
_lowerCamelCase : Any = F'Enter the left node of {node_found.data}: '
_lowerCamelCase : Union[str, Any] = input(__snake_case ).strip().lower() or """n"""
if check == "n":
return tree_node
_lowerCamelCase : Dict = TreeNode(int(__snake_case ) )
_lowerCamelCase : List[str] = left_node
q.put(__snake_case )
_lowerCamelCase : Optional[int] = F'Enter the right node of {node_found.data}: '
_lowerCamelCase : Optional[Any] = input(__snake_case ).strip().lower() or """n"""
if check == "n":
return tree_node
_lowerCamelCase : List[Any] = TreeNode(int(__snake_case ) )
_lowerCamelCase : List[Any] = right_node
q.put(__snake_case )
raise
def _snake_case ( __snake_case : TreeNode ):
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ) or not node:
return
print(node.data , end=""",""" )
pre_order(node.left )
pre_order(node.right )
def _snake_case ( __snake_case : TreeNode ):
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ) or not node:
return
in_order(node.left )
print(node.data , end=""",""" )
in_order(node.right )
def _snake_case ( __snake_case : TreeNode ):
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=""",""" )
def _snake_case ( __snake_case : TreeNode ):
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ) or not node:
return
_lowerCamelCase : queue.Queue = queue.Queue()
q.put(__snake_case )
while not q.empty():
_lowerCamelCase : Any = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def _snake_case ( __snake_case : TreeNode ):
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ) or not node:
return
_lowerCamelCase : queue.Queue = queue.Queue()
q.put(__snake_case )
while not q.empty():
_lowerCamelCase : Optional[Any] = []
while not q.empty():
_lowerCamelCase : Dict = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(__snake_case )
def _snake_case ( __snake_case : TreeNode ):
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ) or not node:
return
_lowerCamelCase : list[TreeNode] = []
_lowerCamelCase : Optional[int] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=""",""" )
stack.append(__snake_case )
_lowerCamelCase : Tuple = n.left
# end of while means current node doesn't have left child
_lowerCamelCase : Optional[Any] = stack.pop()
# start to traverse its right child
_lowerCamelCase : Dict = n.right
def _snake_case ( __snake_case : TreeNode ):
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ) or not node:
return
_lowerCamelCase : list[TreeNode] = []
_lowerCamelCase : int = node
while n or stack:
while n:
stack.append(__snake_case )
_lowerCamelCase : Any = n.left
_lowerCamelCase : Optional[Any] = stack.pop()
print(n.data , end=""",""" )
_lowerCamelCase : List[Any] = n.right
def _snake_case ( __snake_case : TreeNode ):
"""simple docstring"""
if not isinstance(__snake_case , __snake_case ) or not node:
return
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = [], []
_lowerCamelCase : Optional[Any] = node
stacka.append(__snake_case )
while stacka: # to find the reversed order of post order, store it in stack2
_lowerCamelCase : Union[str, Any] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(__snake_case )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=""",""" )
def _snake_case ( __snake_case : str = "" , __snake_case : Any=50 , __snake_case : List[str]="*" ):
"""simple docstring"""
if not s:
return "\n" + width * char
_lowerCamelCase , _lowerCamelCase : Optional[int] = divmod(width - len(__snake_case ) - 2 , 2 )
return F'{left * char} {s} {(left + extra) * char}'
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("""Binary Tree Traversals"""))
UpperCAmelCase = build_tree()
print(prompt("""Pre Order Traversal"""))
pre_order(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal"""))
in_order(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal"""))
post_order(node)
print(prompt() + """\n""")
print(prompt("""Level Order Traversal"""))
level_order(node)
print(prompt() + """\n""")
print(prompt("""Actual Level Order Traversal"""))
level_order_actual(node)
print("""*""" * 50 + """\n""")
print(prompt("""Pre Order Traversal - Iteration Version"""))
pre_order_iter(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal - Iteration Version"""))
in_order_iter(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal - Iteration Version"""))
post_order_iter(node)
print(prompt())
| 88
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class lowerCAmelCase__ ( _UpperCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = "roformer"
def __init__( self , a_=5_0000 , a_=None , a_=768 , a_=12 , a_=12 , a_=3072 , a_="gelu" , a_=0.1 , a_=0.1 , a_=1536 , a_=2 , a_=0.02 , a_=1E-12 , a_=0 , a_=False , a_=True , **a_ , ):
super().__init__(pad_token_id=__a , **__a )
lowerCamelCase_ : Any = vocab_size
lowerCamelCase_ : Optional[Any] = hidden_size if embedding_size is None else embedding_size
lowerCamelCase_ : str = hidden_size
lowerCamelCase_ : Optional[Any] = num_hidden_layers
lowerCamelCase_ : List[str] = num_attention_heads
lowerCamelCase_ : Optional[Any] = hidden_act
lowerCamelCase_ : Union[str, Any] = intermediate_size
lowerCamelCase_ : List[str] = hidden_dropout_prob
lowerCamelCase_ : Tuple = attention_probs_dropout_prob
lowerCamelCase_ : Any = max_position_embeddings
lowerCamelCase_ : Optional[Any] = type_vocab_size
lowerCamelCase_ : List[str] = initializer_range
lowerCamelCase_ : int = layer_norm_eps
lowerCamelCase_ : Optional[int] = rotary_value
lowerCamelCase_ : int = use_cache
class lowerCAmelCase__ ( _UpperCamelCase ):
"""simple docstring"""
@property
def _UpperCamelCase ( self ):
if self.task == "multiple-choice":
lowerCamelCase_ : List[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCamelCase_ : int = {0: "batch", 1: "sequence"}
lowerCamelCase_ : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 707
|
def __magic_name__ ( lowerCAmelCase_ = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
lowerCamelCase_ : Any = set()
# Replace all the whitespace in our sentence
lowerCamelCase_ : str = input_str.replace(" " , "")
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower())
return len(lowerCAmelCase_) == 26
def __magic_name__ ( lowerCAmelCase_ = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
lowerCamelCase_ : List[Any] = [False] * 26
for char in input_str:
if char.islower():
lowerCamelCase_ : List[Any] = True
elif char.isupper():
lowerCamelCase_ : Optional[int] = True
return all(lowerCAmelCase_)
def __magic_name__ ( lowerCAmelCase_ = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
return len({char for char in input_str.lower() if char.isalpha()}) == 26
def __magic_name__ ( ):
'''simple docstring'''
from timeit import timeit
lowerCamelCase_ : Optional[int] = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"
print(timeit("is_pangram()" , setup=lowerCAmelCase_))
print(timeit("is_pangram_faster()" , setup=lowerCAmelCase_))
print(timeit("is_pangram_fastest()" , setup=lowerCAmelCase_))
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 73
| 0
|
def lowercase ( _lowerCAmelCase = 6008_5147_5143 ):
try:
UpperCAmelCase__ = int(_lowerCAmelCase )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
UpperCAmelCase__ = 2
UpperCAmelCase__ = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
UpperCAmelCase__ = i
while n % i == 0:
UpperCAmelCase__ = n // i
i += 1
return int(_lowerCAmelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 392
|
def lowercase ( _lowerCAmelCase ):
UpperCAmelCase__ = len(_lowerCAmelCase )
while cur > 1:
# Find the maximum number in arr
UpperCAmelCase__ = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
UpperCAmelCase__ = arr[mi::-1] + arr[mi + 1 : len(_lowerCAmelCase )]
# Reverse whole list
UpperCAmelCase__ = arr[cur - 1 :: -1] + arr[cur : len(_lowerCAmelCase )]
cur -= 1
return arr
if __name__ == "__main__":
snake_case__ : List[str] = input('''Enter numbers separated by a comma:\n''').strip()
snake_case__ : str = [int(item) for item in user_input.split(''',''')]
print(pancake_sort(unsorted))
| 392
| 1
|
'''simple docstring'''
def UpperCAmelCase ( lowerCAmelCase__ ):
'''simple docstring'''
assert column_title.isupper()
__A = 0
__A = len(lowerCAmelCase__ ) - 1
__A = 0
while index >= 0:
__A = (ord(column_title[index] ) - 64) * pow(26 , lowerCAmelCase__ )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 701
|
class a__ :
def __init__( self ) -> str:
__A = 0
__A = 0
__A = {}
def _lowerCamelCase ( self , lowercase__ ) -> List[Any]:
if vertex not in self.adjacency:
__A = {}
self.num_vertices += 1
def _lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ ) -> Union[str, Any]:
self.add_vertex(lowercase__ )
self.add_vertex(lowercase__ )
if head == tail:
return
__A = weight
__A = weight
def _lowerCamelCase ( self ) -> List[str]:
__A = self.get_edges()
for edge in edges:
__A , __A , __A = edge
edges.remove((tail, head, weight) )
for i in range(len(lowercase__ ) ):
__A = list(edges[i] )
edges.sort(key=lambda lowercase__ : e[2] )
for i in range(len(lowercase__ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
__A = edges[i][2] + 1
for edge in edges:
__A , __A , __A = edge
__A = weight
__A = weight
def __str__( self ) -> Union[str, Any]:
__A = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
__A = self.adjacency[head][tail]
string += F"""{head} -> {tail} == {weight}\n"""
return string.rstrip("\n" )
def _lowerCamelCase ( self ) -> Union[str, Any]:
__A = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def _lowerCamelCase ( self ) -> Tuple:
return self.adjacency.keys()
@staticmethod
def _lowerCamelCase ( lowercase__=None , lowercase__=None ) -> Any:
__A = Graph()
if vertices is None:
__A = []
if edges is None:
__A = []
for vertex in vertices:
g.add_vertex(lowercase__ )
for edge in edges:
g.add_edge(*lowercase__ )
return g
class a__ :
def __init__( self ) -> List[str]:
__A = {}
__A = {}
def __len__( self ) -> Union[str, Any]:
return len(self.parent )
def _lowerCamelCase ( self , lowercase__ ) -> Any:
if item in self.parent:
return self.find(lowercase__ )
__A = item
__A = 0
return item
def _lowerCamelCase ( self , lowercase__ ) -> str:
if item not in self.parent:
return self.make_set(lowercase__ )
if item != self.parent[item]:
__A = self.find(self.parent[item] )
return self.parent[item]
def _lowerCamelCase ( self , lowercase__ , lowercase__ ) -> List[Any]:
__A = self.find(lowercase__ )
__A = self.find(lowercase__ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
__A = roota
return roota
if self.rank[roota] < self.rank[roota]:
__A = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
__A = roota
return roota
return None
@staticmethod
def _lowerCamelCase ( lowercase__ ) -> Any:
__A = graph.num_vertices
__A = Graph.UnionFind()
__A = []
while num_components > 1:
__A = {}
for vertex in graph.get_vertices():
__A = -1
__A = graph.get_edges()
for edge in edges:
__A , __A , __A = edge
edges.remove((tail, head, weight) )
for edge in edges:
__A , __A , __A = edge
__A = union_find.find(lowercase__ )
__A = union_find.find(lowercase__ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__A = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__A = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
__A , __A , __A = cheap_edge[vertex]
if union_find.find(lowercase__ ) != union_find.find(lowercase__ ):
union_find.union(lowercase__ , lowercase__ )
mst_edges.append(cheap_edge[vertex] )
__A = num_components - 1
__A = Graph.build(edges=lowercase__ )
return mst
| 205
| 0
|
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
return abs(_UpperCAmelCase ) if a == 0 else greatest_common_divisor(b % a , _UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
while y: # --> when y=0 then loop will terminate and return x as final GCD.
__snake_case , __snake_case = y, x % y
return abs(_UpperCAmelCase )
def __UpperCAmelCase ( ) -> Union[str, Any]:
try:
__snake_case = input("Enter two integers separated by comma (,): " ).split("," )
__snake_case = int(nums[0] )
__snake_case = int(nums[1] )
print(
F'''greatest_common_divisor({num_a}, {num_a}) = '''
F'''{greatest_common_divisor(_UpperCAmelCase , _UpperCAmelCase )}''' )
print(F'''By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(_UpperCAmelCase , _UpperCAmelCase )}''' )
except (IndexError, UnboundLocalError, ValueError):
print("Wrong input" )
if __name__ == "__main__":
main()
| 69
|
'''simple docstring'''
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
__SCREAMING_SNAKE_CASE = get_logger(__name__)
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCamelCase = "dummy_data"
__UpperCamelCase = "datasets"
__UpperCamelCase = False
def __init__( self : Any , A__ : str , A__ : str , A__ : Union[Version, str] , A__ : Optional[str] = None , A__ : bool = False , A__ : bool = True , A__ : Optional[List[Callable]] = None , ) -> int:
'''simple docstring'''
a__ : Tuple = 0
a__ : Any = dataset_name
a__ : int = cache_dir
a__ : str = use_local_dummy_data
a__ : List[str] = config
# download_callbacks take a single url as input
a__ : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
a__ : str = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
a__ : Optional[Any] = str(A__ )
# to be downloaded
a__ : Tuple = None
a__ : Tuple = None
@property
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
if self._dummy_file is None:
a__ : Dict = self.download_dummy_data()
return self._dummy_file
@property
def __lowerCAmelCase ( self : Any ) -> Optional[int]:
'''simple docstring'''
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('''dummy''' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('''dummy''' , self.version_name )
@property
def __lowerCAmelCase ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
return os.path.join(self.dummy_data_folder , '''dummy_data.zip''' )
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
a__ : int = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
a__ : str = cached_path(
A__ , cache_dir=self.cache_dir , extract_compressed_file=A__ , force_extract=A__ )
return os.path.join(A__ , self.dummy_file_name )
@property
def __lowerCAmelCase ( self : int ) -> Optional[int]:
'''simple docstring'''
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if self._bucket_url is None:
a__ : int = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '''/''' ) )
return self._bucket_url
@property
def __lowerCAmelCase ( self : List[Any] ) -> Dict:
'''simple docstring'''
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '''/''' ).split('''/''' )[:-1] )
def __lowerCAmelCase ( self : Union[str, Any] , A__ : Optional[int] , *A__ : int ) -> Union[str, Any]:
'''simple docstring'''
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
a__ : Tuple = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
a__ : Union[str, Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(A__ , A__ ):
return self.create_dummy_data_dict(A__ , A__ )
elif isinstance(A__ , (list, tuple) ):
return self.create_dummy_data_list(A__ , A__ )
else:
return self.create_dummy_data_single(A__ , A__ )
def __lowerCAmelCase ( self : List[str] , A__ : Any , *A__ : int ) -> Any:
'''simple docstring'''
return self.download_and_extract(A__ )
def __lowerCAmelCase ( self : Any , A__ : Optional[int] , A__ : Optional[Any] ) -> int:
'''simple docstring'''
return self.download_and_extract(A__ )
def __lowerCAmelCase ( self : Union[str, Any] , A__ : int , *A__ : List[Any] , **A__ : str ) -> Optional[Any]:
'''simple docstring'''
return path
def __lowerCAmelCase ( self : List[Any] ) -> str:
'''simple docstring'''
return {}
def __lowerCAmelCase ( self : int , A__ : Union[str, Any] , A__ : List[str] ) -> Any:
'''simple docstring'''
a__ : int = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(A__ , A__ ):
for single_url in single_urls:
download_callback(A__ )
else:
a__ : Dict = single_urls
download_callback(A__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(A__ , A__ ):
a__ : Optional[int] = [os.path.join(A__ , urllib.parse.quote_plus(Path(A__ ).name ) ) for x in single_urls]
else:
a__ : Optional[Any] = single_urls
a__ : Tuple = os.path.join(A__ , urllib.parse.quote_plus(Path(A__ ).name ) )
a__ : List[str] = value
# make sure that values are unique
if all(isinstance(A__ , A__ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
a__ : Optional[int] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __lowerCAmelCase ( self : Dict , A__ : str , A__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
a__ : str = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
a__ : Union[str, Any] = all(bool(re.findall('''[0-9]{3,}-of-[0-9]{3,}''' , A__ ) ) for url in data_url )
a__ : Optional[Any] = all(
url.startswith('''https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed''' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
a__ : Dict = [data_url[0]] * len(A__ )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(A__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
a__ : Optional[int] = os.path.join(A__ , urllib.parse.quote_plus(single_url.split('''/''' )[-1] ) )
dummy_data_list.append(A__ )
return dummy_data_list
def __lowerCAmelCase ( self : Dict , A__ : Dict , A__ : str ) -> Optional[int]:
'''simple docstring'''
for download_callback in self.download_callbacks:
download_callback(A__ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
a__ : Union[str, Any] = os.path.join(A__ , urllib.parse.quote_plus(data_url.split('''/''' )[-1] ) )
if os.path.exists(A__ ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __lowerCAmelCase ( self : int ) -> str:
'''simple docstring'''
pass
def __lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
pass
def __lowerCAmelCase ( self : Any , A__ : Tuple ) -> Any:
'''simple docstring'''
def _iter_archive_members(A__ : str ):
# this preserves the order of the members inside the ZIP archive
a__ : Dict = Path(self.dummy_file ).parent
a__ : Tuple = path.relative_to(A__ )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
a__ : Optional[Any] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(A__ )
a__ : str = Path(A__ )
a__ : Optional[Any] = _iter_archive_members(A__ ) if self.use_local_dummy_data else path.rglob('''*''' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('''.''', '''__''') ):
yield file_path.relative_to(A__ ).as_posix(), file_path.open('''rb''' )
def __lowerCAmelCase ( self : Tuple , A__ : Tuple ) -> Tuple:
'''simple docstring'''
if not isinstance(A__ , A__ ):
a__ : int = [paths]
for path in paths:
if os.path.isfile(A__ ):
if os.path.basename(A__ ).startswith(('''.''', '''__''') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(A__ ):
if os.path.basename(A__ ).startswith(('''.''', '''__''') ):
continue
dirnames.sort()
for filename in sorted(A__ ):
if filename.startswith(('''.''', '''__''') ):
continue
yield os.path.join(A__ , A__ )
| 688
| 0
|
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCamelCase ( UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
snake_case__ = XLMTokenizer
snake_case__ = False
def a ( self : int ) -> Dict:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase__ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
lowerCAmelCase__ = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
lowerCAmelCase__ = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE__ ) )
def a ( self : int , SCREAMING_SNAKE_CASE__ : Any ) -> Any:
lowerCAmelCase__ = "lower newer"
lowerCAmelCase__ = "lower newer"
return input_text, output_text
def a ( self : Optional[Any] ) -> str:
lowerCAmelCase__ = XLMTokenizer(self.vocab_file , self.merges_file )
lowerCAmelCase__ = "lower"
lowerCAmelCase__ = ["low", "er</w>"]
lowerCAmelCase__ = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = tokens + ["<unk>"]
lowerCAmelCase__ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
@slow
def a ( self : str ) -> Union[str, Any]:
lowerCAmelCase__ = XLMTokenizer.from_pretrained("xlm-mlm-en-2048" )
lowerCAmelCase__ = tokenizer.encode("sequence builders" , add_special_tokens=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = tokenizer.encode("multi-sequence build" , add_special_tokens=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 125
|
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = [randint(-1000 , 1000 ) for i in range(10 )]
lowerCAmelCase__ = randint(-5000 , 5000 )
return (arr, r)
UpperCamelCase = make_dataset()
def _A ( lowerCAmelCase_ : list[int] , lowerCAmelCase_ : int ):
"""simple docstring"""
for triplet in permutations(lowerCAmelCase_ , 3 ):
if sum(lowerCAmelCase_ ) == target:
return tuple(sorted(lowerCAmelCase_ ) )
return (0, 0, 0)
def _A ( lowerCAmelCase_ : list[int] , lowerCAmelCase_ : int ):
"""simple docstring"""
arr.sort()
lowerCAmelCase__ = len(lowerCAmelCase_ )
for i in range(n - 1 ):
lowerCAmelCase__ , lowerCAmelCase__ = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = "\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n"
lowerCAmelCase__ = "\ntriplet_sum1(*dataset)\n"
lowerCAmelCase__ = "\ntriplet_sum2(*dataset)\n"
lowerCAmelCase__ = repeat(setup=lowerCAmelCase_ , stmt=lowerCAmelCase_ , repeat=5 , number=1_0000 )
lowerCAmelCase__ = repeat(setup=lowerCAmelCase_ , stmt=lowerCAmelCase_ , repeat=5 , number=1_0000 )
return (min(lowerCAmelCase_ ), min(lowerCAmelCase_ ))
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCamelCase = solution_times()
print(F"""The time for naive implementation is {times[0]}.""")
print(F"""The time for optimized implementation is {times[1]}.""")
| 125
| 1
|
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
SCREAMING_SNAKE_CASE__ = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
SCREAMING_SNAKE_CASE__ = "\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n"
SCREAMING_SNAKE_CASE__ = "\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {'mse': 0.6123724356957945}\n\n If you're using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mse': array([0.41666667, 1. ])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case (datasets.Metric ):
def _a ( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(self._get_feature_types() ) ,reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"
] ,)
def _a ( self ) -> Tuple:
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("float" ) ),
"references": datasets.Sequence(datasets.Value("float" ) ),
}
else:
return {
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
}
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_=None ,UpperCAmelCase_="uniform_average" ,UpperCAmelCase_=True ) -> Tuple:
lowercase__ = mean_squared_error(
UpperCAmelCase_ ,UpperCAmelCase_ ,sample_weight=UpperCAmelCase_ ,multioutput=UpperCAmelCase_ ,squared=UpperCAmelCase_ )
return {"mse": mse}
| 267
|
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 256
# Modulus to hash a string
SCREAMING_SNAKE_CASE__ = 100_0003
def lowerCamelCase ( _snake_case : str ,_snake_case : str ):
'''simple docstring'''
lowercase__ = len(_snake_case )
lowercase__ = len(_snake_case )
if p_len > t_len:
return False
lowercase__ = 0
lowercase__ = 0
lowercase__ = 1
# Calculating the hash of pattern and substring of text
for i in range(_snake_case ):
lowercase__ = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
lowercase__ = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
lowercase__ = (modulus_power * alphabet_size) % modulus
for i in range(0 ,t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
lowercase__ = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def lowerCamelCase ( ):
'''simple docstring'''
lowercase__ = "abc1abc12"
lowercase__ = "alskfjaldsabc1abc1abc12k23adsfabcabc"
lowercase__ = "alskfjaldsk23adsfabcabc"
assert rabin_karp(_snake_case ,_snake_case ) and not rabin_karp(_snake_case ,_snake_case )
# Test 2)
lowercase__ = "ABABX"
lowercase__ = "ABABZABABYABABX"
assert rabin_karp(_snake_case ,_snake_case )
# Test 3)
lowercase__ = "AAAB"
lowercase__ = "ABAAAAAB"
assert rabin_karp(_snake_case ,_snake_case )
# Test 4)
lowercase__ = "abcdabcy"
lowercase__ = "abcxabcdabxabcdabcdabcy"
assert rabin_karp(_snake_case ,_snake_case )
# Test 5)
lowercase__ = "Lü"
lowercase__ = "Lüsai"
assert rabin_karp(_snake_case ,_snake_case )
lowercase__ = "Lue"
assert not rabin_karp(_snake_case ,_snake_case )
print("Success." )
if __name__ == "__main__":
test_rabin_karp()
| 267
| 1
|
"""simple docstring"""
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
snake_case_ : str = torch.load(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" )
snake_case_ : Tuple = chkpt["""model"""]
# We have the base model one level deeper than the original XLM repository
snake_case_ : int = {}
for k, v in state_dict.items():
if "pred_layer" in k:
snake_case_ : Any = v
else:
snake_case_ : Optional[int] = v
snake_case_ : int = chkpt["""params"""]
snake_case_ : List[Any] = {n: v for n, v in config.items() if not isinstance(SCREAMING_SNAKE_CASE__ , (torch.FloatTensor, numpy.ndarray) )}
snake_case_ : List[str] = chkpt["""dico_word2id"""]
snake_case_ : Any = {s + """</w>""" if s.find("""@@""" ) == -1 and i > 1_3 else s.replace("""@@""" , """""" ): i for s, i in vocab.items()}
# Save pytorch-model
snake_case_ : Optional[int] = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
snake_case_ : int = pytorch_dump_folder_path + """/""" + CONFIG_NAME
snake_case_ : Union[str, Any] = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""vocab_file"""]
print(f'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print(f'Save configuration file to {pytorch_config_dump_path}' )
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ , indent=2 ) + """\n""" )
print(f'Save vocab file to {pytorch_config_dump_path}' )
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ , indent=2 ) + """\n""" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xlm_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
a_ = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 48
|
"""simple docstring"""
import argparse
import copy
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
snake_case_ : List[Any] = {}
with open(SCREAMING_SNAKE_CASE__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
snake_case_ : int = []
_list.append([line.split()[1], line.split()[2]] )
snake_case_ : Optional[Any] = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
snake_case_ : str = []
_list.append([line.split()[0], line.split()[2]] )
snake_case_ : Optional[Any] = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE__ ) as f:
snake_case_ : Optional[Any] = f.read(1 )
snake_case_ : Union[str, Any] = start_node
snake_case_ : Dict = []
snake_case_ : Union[str, Any] = start_node
snake_case_ : Tuple = 0
while visiting not in first_solution:
snake_case_ : int = 1_0_0_0_0
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(SCREAMING_SNAKE_CASE__ ) and k[0] not in first_solution:
snake_case_ : Union[str, Any] = k[1]
snake_case_ : Any = k[0]
first_solution.append(SCREAMING_SNAKE_CASE__ )
snake_case_ : Tuple = distance_of_first_solution + int(SCREAMING_SNAKE_CASE__ )
snake_case_ : List[str] = best_node
first_solution.append(SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[Any] = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
snake_case_ : int = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0_0_0_0
)
return first_solution, distance_of_first_solution
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
snake_case_ : Union[str, Any] = []
for n in solution[1:-1]:
snake_case_ : str = solution.index(SCREAMING_SNAKE_CASE__ )
for kn in solution[1:-1]:
snake_case_ : Tuple = solution.index(SCREAMING_SNAKE_CASE__ )
if n == kn:
continue
snake_case_ : Optional[Any] = copy.deepcopy(SCREAMING_SNAKE_CASE__ )
snake_case_ : int = kn
snake_case_ : Dict = n
snake_case_ : Optional[int] = 0
for k in _tmp[:-1]:
snake_case_ : Dict = _tmp[_tmp.index(SCREAMING_SNAKE_CASE__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
snake_case_ : Dict = distance + int(i[1] )
_tmp.append(SCREAMING_SNAKE_CASE__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
snake_case_ : Optional[Any] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda SCREAMING_SNAKE_CASE__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] ):
"""simple docstring"""
snake_case_ : Dict = 1
snake_case_ : List[Any] = first_solution
snake_case_ : List[Any] = []
snake_case_ : Optional[Any] = distance_of_first_solution
snake_case_ : Dict = solution
while count <= iters:
snake_case_ : List[str] = find_neighborhood(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : List[Any] = 0
snake_case_ : List[Any] = neighborhood[index_of_best_solution]
snake_case_ : Union[str, Any] = len(SCREAMING_SNAKE_CASE__ ) - 1
snake_case_ : List[str] = False
while not found:
snake_case_ : Tuple = 0
while i < len(SCREAMING_SNAKE_CASE__ ):
if best_solution[i] != solution[i]:
snake_case_ : Optional[Any] = best_solution[i]
snake_case_ : int = solution[i]
break
snake_case_ : List[str] = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
snake_case_ : Tuple = True
snake_case_ : Dict = best_solution[:-1]
snake_case_ : Tuple = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
snake_case_ : Tuple = cost
snake_case_ : Union[str, Any] = solution
else:
snake_case_ : str = index_of_best_solution + 1
snake_case_ : Tuple = neighborhood[index_of_best_solution]
if len(SCREAMING_SNAKE_CASE__ ) >= size:
tabu_list.pop(0 )
snake_case_ : List[str] = count + 1
return best_solution_ever, best_cost
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
"""simple docstring"""
snake_case_ : Tuple = generate_neighbours(args.File )
snake_case_ , snake_case_ : Optional[Any] = generate_first_solution(
args.File , SCREAMING_SNAKE_CASE__ )
snake_case_ , snake_case_ : Dict = tabu_search(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , args.Iterations , args.Size , )
print(f'Best solution: {best_sol}, with total distance: {best_cost}.' )
if __name__ == "__main__":
a_ = argparse.ArgumentParser(description='''Tabu Search''')
parser.add_argument(
'''-f''',
'''--File''',
type=str,
help='''Path to the file containing the data''',
required=True,
)
parser.add_argument(
'''-i''',
'''--Iterations''',
type=int,
help='''How many iterations the algorithm should perform''',
required=True,
)
parser.add_argument(
'''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 48
| 1
|
'''simple docstring'''
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
a__ : List[str] = logging.get_logger(__name__)
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->None:
snake_case__ = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(__UpperCAmelCase ) == len(__UpperCAmelCase ), f'''{len(__UpperCAmelCase )} != {len(__UpperCAmelCase )}'''
dest_layers.load_state_dict(layers_to_copy.state_dict() )
a__ : Tuple = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
a__ : str = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ ) ->Optional[Any]:
try:
snake_case__ = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
f'''no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'''
f''' {n_student}''' )
return list(range(__UpperCAmelCase ) )
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ ) ->List[int]:
if n_student > n_teacher:
raise ValueError(f'''Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}''' )
elif n_teacher == n_student:
return list(range(__UpperCAmelCase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ = "student" , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_=False , UpperCAmelCase_=None , UpperCAmelCase_=None , **UpperCAmelCase_ , ) ->Tuple[PreTrainedModel, List[int], List[int]]:
snake_case__ = 'encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'
assert (e is not None) or (d is not None), _msg
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
AutoTokenizer.from_pretrained(__UpperCAmelCase ).save_pretrained(__UpperCAmelCase ) # purely for convenience
snake_case__ = AutoModelForSeqaSeqLM.from_pretrained(__UpperCAmelCase ).eval()
else:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase ), f'''teacher must be a model or string got type {type(__UpperCAmelCase )}'''
snake_case__ = teacher.config.to_diff_dict()
try:
snake_case__ , snake_case__ = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
snake_case__ = teacher_e
if d is None:
snake_case__ = teacher_d
init_kwargs.update({'encoder_layers': e, 'decoder_layers': d} )
except AttributeError: # T5
if hasattr(teacher.config , 'num_encoder_layers' ):
snake_case__ , snake_case__ = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
snake_case__ , snake_case__ = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
snake_case__ = teacher_e
if d is None:
snake_case__ = teacher_d
if hasattr(teacher.config , 'num_encoder_layers' ):
init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d} )
else:
init_kwargs.update({'num_layers': e, 'num_decoder_layers': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(__UpperCAmelCase )
# Copy weights
snake_case__ = teacher.config_class(**__UpperCAmelCase )
snake_case__ = AutoModelForSeqaSeqLM.from_config(__UpperCAmelCase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
snake_case__ = student.load_state_dict(teacher.state_dict() , strict=__UpperCAmelCase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
snake_case__ , snake_case__ = list(range(__UpperCAmelCase ) ), list(range(__UpperCAmelCase ) )
logger.info(
f'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'''
f''' {save_path}''' )
student.save_pretrained(__UpperCAmelCase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
snake_case__ = pick_layers_to_copy(__UpperCAmelCase , __UpperCAmelCase )
if d_layers_to_copy is None:
snake_case__ = pick_layers_to_copy(__UpperCAmelCase , __UpperCAmelCase )
try:
if hasattr(
__UpperCAmelCase , 'prophetnet' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , __UpperCAmelCase )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , __UpperCAmelCase )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , __UpperCAmelCase )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , __UpperCAmelCase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , __UpperCAmelCase )
copy_layers(teacher.decoder.block , student.decoder.block , __UpperCAmelCase )
logger.info(
f'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}''' )
snake_case__ = {
'teacher_type': teacher.config.model_type,
'copied_encoder_layers': e_layers_to_copy,
'copied_decoder_layers': d_layers_to_copy,
}
student.save_pretrained(__UpperCAmelCase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 368
|
'''simple docstring'''
import os
from math import logaa
def __magic_name__ ( __UpperCAmelCase = "base_exp.txt" ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(__UpperCAmelCase ) , __UpperCAmelCase ) ) ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = list(map(__UpperCAmelCase , line.split(""",""" ) ) )
if x * logaa(__UpperCAmelCase ) > largest:
__SCREAMING_SNAKE_CASE = x * logaa(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = i + 1
return result
if __name__ == "__main__":
print(solution())
| 109
| 0
|
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase__ = 2_5_6
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = ['melgan']
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> None:
'''simple docstring'''
super().__init__()
# From MELGAN
A__ = math.log(1e-5 ) # Matches MelGAN training.
A__ = 4.0 # Largest value for most examples
A__ = 128
self.register_modules(
notes_encoder=lowercase , continuous_encoder=lowercase , decoder=lowercase , scheduler=lowercase , melgan=lowercase , )
def UpperCamelCase ( self , lowercase , lowercase=(-1.0, 1.0) , lowercase=False ) -> str:
'''simple docstring'''
A__ , A__ = output_range
if clip:
A__ = torch.clip(lowercase , self.min_value , self.max_value )
# Scale to [0, 1].
A__ = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def UpperCamelCase ( self , lowercase , lowercase=(-1.0, 1.0) , lowercase=False ) -> Optional[int]:
'''simple docstring'''
A__ , A__ = input_range
A__ = torch.clip(lowercase , lowercase , lowercase ) if clip else outputs
# Scale to [0, 1].
A__ = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> Any:
'''simple docstring'''
A__ = input_tokens > 0
A__ , A__ = self.notes_encoder(
encoder_input_tokens=lowercase , encoder_inputs_mask=lowercase )
A__ , A__ = self.continuous_encoder(
encoder_inputs=lowercase , encoder_inputs_mask=lowercase )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> Optional[Any]:
'''simple docstring'''
A__ = noise_time
if not torch.is_tensor(lowercase ):
A__ = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(lowercase ) and len(timesteps.shape ) == 0:
A__ = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
A__ = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
A__ = self.decoder(
encodings_and_masks=lowercase , decoder_input_tokens=lowercase , decoder_noise_time=lowercase )
return logits
@torch.no_grad()
def __call__( self , lowercase , lowercase = None , lowercase = 100 , lowercase = True , lowercase = "numpy" , lowercase = None , lowercase = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
'''simple docstring'''
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase , lowercase ) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(lowercase )}.' )
A__ = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
A__ = np.zeros([1, 0, self.n_dims] , np.floataa )
A__ = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=lowercase , device=self.device )
for i, encoder_input_tokens in enumerate(lowercase ):
if i == 0:
A__ = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
A__ = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=lowercase , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
A__ = ones
A__ = self.scale_features(
lowercase , output_range=[-1.0, 1.0] , clip=lowercase )
A__ = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=lowercase , continuous_mask=lowercase , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
A__ = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=lowercase , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(lowercase )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
A__ = self.decode(
encodings_and_masks=lowercase , input_tokens=lowercase , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
A__ = self.scheduler.step(lowercase , lowercase , lowercase , generator=lowercase ).prev_sample
A__ = self.scale_to_features(lowercase , input_range=[-1.0, 1.0] )
A__ = mel[:1]
A__ = mel.cpu().float().numpy()
A__ = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase , lowercase )
logger.info("Generated segment" , lowercase )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'." )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'." )
if output_type == "numpy":
A__ = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
A__ = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=lowercase )
| 626
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class a__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowercase , lowercase=7 , lowercase=3 , lowercase=30 , lowercase=400 , lowercase=True , lowercase=None , lowercase=0.9 , lowercase=None , lowercase=True , lowercase=[0.5, 0.5, 0.5] , lowercase=[0.5, 0.5, 0.5] , ) -> str:
'''simple docstring'''
A__ = size if size is not None else {"shortest_edge": 30}
A__ = crop_size if crop_size is not None else {"height": 30, "width": 30}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = min_resolution
A__ = max_resolution
A__ = do_resize_and_center_crop
A__ = size
A__ = crop_pct
A__ = crop_size
A__ = do_normalize
A__ = image_mean
A__ = image_std
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class a__ ( snake_case , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = PoolFormerImageProcessor if is_vision_available() else None
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
A__ = PoolFormerImageProcessingTester(self )
@property
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase , "do_resize_and_center_crop" ) )
self.assertTrue(hasattr(lowercase , "size" ) )
self.assertTrue(hasattr(lowercase , "crop_pct" ) )
self.assertTrue(hasattr(lowercase , "do_normalize" ) )
self.assertTrue(hasattr(lowercase , "image_mean" ) )
self.assertTrue(hasattr(lowercase , "image_std" ) )
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 30} )
self.assertEqual(image_processor.crop_size , {"height": 30, "width": 30} )
A__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
pass
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A__ = image_processing(lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , numpify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A__ = image_processing(lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A__ = image_processing(lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 626
| 1
|
import comet # From: unbabel-comet
import torch
import datasets
_lowerCamelCase = datasets.logging.get_logger(__name__)
_lowerCamelCase = '\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = "{COMET}: A Neural Framework for {MT} Evaluation",\n author = "Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon",\n booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",\n month = nov,\n year = "2020",\n address = "Online",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",\n pages = "2685--2702",\n}\n'
_lowerCamelCase = '\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n'
_lowerCamelCase = '\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric(\'comet\')\n >>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use\n >>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]\n >>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]\n >>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results["scores"]])\n [0.19, 0.92]\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
"""simple docstring"""
def __snake_case ( self):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''sources''': datasets.Value('''string''' , id='''sequence'''),
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Value('''string''' , id='''sequence'''),
}) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[
'''https://github.com/Unbabel/COMET''',
'''https://www.aclweb.org/anthology/2020.emnlp-main.213/''',
'''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''',
] , )
def __snake_case ( self , a__):
"""simple docstring"""
if self.config_name == "default":
_lowerCamelCase : List[Any] = comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da'''))
else:
_lowerCamelCase : int = comet.load_from_checkpoint(comet.download_model(self.config_name))
def __snake_case ( self , a__ , a__ , a__ , a__=None , a__=False):
"""simple docstring"""
if gpus is None:
_lowerCamelCase : int = 1 if torch.cuda.is_available() else 0
_lowerCamelCase : Any = {'''src''': sources, '''mt''': predictions, '''ref''': references}
_lowerCamelCase : List[Any] = [dict(zip(a__ , a__)) for t in zip(*data.values())]
_lowerCamelCase, _lowerCamelCase : Tuple = self.scorer.predict(a__ , gpus=a__ , progress_bar=a__)
return {"mean_score": mean_score, "scores": scores}
| 114
|
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = '▁'
_lowerCamelCase = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
_lowerCamelCase = {
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
_lowerCamelCase = {
'facebook/s2t-small-librispeech-asr': 1024,
}
_lowerCamelCase = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
_lowerCamelCase = {'mustc': MUSTC_LANGS}
class __A ( lowerCamelCase__ ):
"""simple docstring"""
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = MAX_MODEL_INPUT_SIZES
UpperCAmelCase__ = ["""input_ids""", """attention_mask"""]
UpperCAmelCase__ = []
def __init__( self , a__ , a__ , a__="<s>" , a__="</s>" , a__="<pad>" , a__="<unk>" , a__=False , a__=False , a__=None , a__=None , a__ = None , **a__ , ):
"""simple docstring"""
_lowerCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a__ , eos_token=a__ , unk_token=a__ , pad_token=a__ , do_upper_case=a__ , do_lower_case=a__ , tgt_lang=a__ , lang_codes=a__ , sp_model_kwargs=self.sp_model_kwargs , **a__ , )
_lowerCamelCase : Optional[int] = do_upper_case
_lowerCamelCase : Optional[Any] = do_lower_case
_lowerCamelCase : Tuple = load_json(a__)
_lowerCamelCase : Union[str, Any] = {v: k for k, v in self.encoder.items()}
_lowerCamelCase : Tuple = spm_file
_lowerCamelCase : Any = load_spm(a__ , self.sp_model_kwargs)
if lang_codes is not None:
_lowerCamelCase : List[Any] = lang_codes
_lowerCamelCase : List[str] = LANGUAGES[lang_codes]
_lowerCamelCase : Any = [F"""<lang:{lang}>""" for lang in self.langs]
_lowerCamelCase : Optional[Any] = {lang: self.sp_model.PieceToId(F"""<lang:{lang}>""") for lang in self.langs}
_lowerCamelCase : List[str] = self.lang_tokens
_lowerCamelCase : str = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang)
else:
_lowerCamelCase : Any = {}
@property
def __snake_case ( self):
"""simple docstring"""
return len(self.encoder)
@property
def __snake_case ( self):
"""simple docstring"""
return self._tgt_lang
@tgt_lang.setter
def __snake_case ( self , a__):
"""simple docstring"""
_lowerCamelCase : Any = new_tgt_lang
self.set_tgt_lang_special_tokens(a__)
def __snake_case ( self , a__):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.lang_code_to_id[tgt_lang]
_lowerCamelCase : Any = [lang_code_id]
def __snake_case ( self , a__):
"""simple docstring"""
return self.sp_model.encode(a__ , out_type=a__)
def __snake_case ( self , a__):
"""simple docstring"""
return self.encoder.get(a__ , self.encoder[self.unk_token])
def __snake_case ( self , a__):
"""simple docstring"""
return self.decoder.get(a__ , self.unk_token)
def __snake_case ( self , a__):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : List[str] = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
_lowerCamelCase : List[Any] = self.sp_model.decode(a__)
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
_lowerCamelCase : Optional[int] = []
else:
current_sub_tokens.append(a__)
_lowerCamelCase : Tuple = self.sp_model.decode(a__)
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def __snake_case ( self , a__ , a__=None):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def __snake_case ( self , a__ , a__ = None , a__ = False):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__)
_lowerCamelCase : Tuple = [1] * len(self.prefix_tokens)
_lowerCamelCase : Tuple = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(a__)) + suffix_ones
return prefix_ones + ([0] * len(a__)) + ([0] * len(a__)) + suffix_ones
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.encoder.copy()
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.__dict__.copy()
_lowerCamelCase : str = None
return state
def __setstate__( self , a__):
"""simple docstring"""
_lowerCamelCase : Optional[int] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
_lowerCamelCase : List[Any] = {}
_lowerCamelCase : Dict = load_spm(self.spm_file , self.sp_model_kwargs)
def __snake_case ( self , a__ , a__ = None):
"""simple docstring"""
_lowerCamelCase : str = Path(a__)
assert save_dir.is_dir(), F"""{save_directory} should be a directory"""
_lowerCamelCase : Any = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
_lowerCamelCase : Optional[Any] = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , a__)
if os.path.abspath(self.spm_file) != os.path.abspath(a__) and os.path.isfile(self.spm_file):
copyfile(self.spm_file , a__)
elif not os.path.isfile(self.spm_file):
with open(a__ , '''wb''') as fi:
_lowerCamelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(a__)
return (str(a__), str(a__))
def __UpperCAmelCase( lowercase_ , lowercase_ ):
_lowerCamelCase : Optional[Any] = sentencepiece.SentencePieceProcessor(**lowercase_ )
spm.Load(str(lowercase_ ) )
return spm
def __UpperCAmelCase( lowercase_ ):
with open(lowercase_ , '''r''' ) as f:
return json.load(lowercase_ )
def __UpperCAmelCase( lowercase_ , lowercase_ ):
with open(lowercase_ , '''w''' ) as f:
json.dump(lowercase_ , lowercase_ , indent=2 )
| 114
| 1
|
"""simple docstring"""
from typing import List
import numpy as np
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = {key: len(UpperCamelCase_ ) for key, value in gen_kwargs.items() if isinstance(UpperCamelCase_ , UpperCamelCase_ )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
"""Sharding is ambiguous for this dataset: """
+ """we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"""
+ """\n""".join(f"\t- key {key} has length {length}" for key, length in lists_lengths.items() )
+ """\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, """
+ """and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."""
) )
__SCREAMING_SNAKE_CASE = max(lists_lengths.values() , default=0 )
return max(1 , UpperCamelCase_ )
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = []
for group_idx in range(UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
__SCREAMING_SNAKE_CASE = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
__SCREAMING_SNAKE_CASE = range(UpperCamelCase_ , start + num_shards_to_add )
shards_indices_per_group.append(UpperCamelCase_ )
return shards_indices_per_group
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = _number_of_shards_in_gen_kwargs(UpperCamelCase_ )
if num_shards == 1:
return [dict(UpperCamelCase_ )]
else:
__SCREAMING_SNAKE_CASE = _distribute_shards(num_shards=UpperCamelCase_ , max_num_jobs=UpperCamelCase_ )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(UpperCamelCase_ , UpperCamelCase_ )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(UpperCamelCase_ ) )
]
def _lowerCAmelCase ( UpperCamelCase_ ):
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , UpperCamelCase_ )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = {len(UpperCamelCase_ ) for value in gen_kwargs.values() if isinstance(UpperCamelCase_ , UpperCamelCase_ )}
__SCREAMING_SNAKE_CASE = {}
for size in list_sizes:
__SCREAMING_SNAKE_CASE = list(range(UpperCamelCase_ ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
__SCREAMING_SNAKE_CASE = dict(UpperCamelCase_ )
for key, value in shuffled_kwargs.items():
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = [value[i] for i in indices_per_size[len(UpperCamelCase_ )]]
return shuffled_kwargs
| 248
|
"""simple docstring"""
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
return int((input_a, input_a).count(0 ) == 0 )
def _lowerCAmelCase ( ):
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 248
| 1
|
"""simple docstring"""
def lowerCAmelCase_ ( snake_case_ : int ) ->bool:
return str(snake_case_ ) == str(snake_case_ )[::-1]
def lowerCAmelCase_ ( snake_case_ : int ) ->int:
return int(snake_case_ ) + int(str(snake_case_ )[::-1] )
def lowerCAmelCase_ ( snake_case_ : int = 1_0_0_0_0 ) ->int:
lowerCamelCase__ : List[str] =[]
for num in range(1 , snake_case_ ):
lowerCamelCase__ : Optional[int] =0
lowerCamelCase__ : Tuple =num
while iterations < 5_0:
lowerCamelCase__ : str =sum_reverse(snake_case_ )
iterations += 1
if is_palindrome(snake_case_ ):
break
else:
lychrel_nums.append(snake_case_ )
return len(snake_case_ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 174
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
lowerCAmelCase = {
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
lowerCAmelCase = {"""facebook/blenderbot-3B""": 1_28}
class A_ ( A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE_ = BlenderbotTokenizer
def __init__( self :Tuple , lowerCamelCase_ :List[str]=None , lowerCamelCase_ :Optional[int]=None , lowerCamelCase_ :Optional[int]=None , lowerCamelCase_ :str="replace" , lowerCamelCase_ :Dict="<s>" , lowerCamelCase_ :List[str]="</s>" , lowerCamelCase_ :Dict="</s>" , lowerCamelCase_ :List[str]="<s>" , lowerCamelCase_ :int="<unk>" , lowerCamelCase_ :List[Any]="<pad>" , lowerCamelCase_ :Optional[Any]="<mask>" , lowerCamelCase_ :Optional[Any]=False , lowerCamelCase_ :Union[str, Any]=True , **lowerCamelCase_ :Dict , ):
"""simple docstring"""
super().__init__(
lowerCamelCase_ , lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , errors=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , trim_offsets=lowerCamelCase_ , **lowerCamelCase_ , )
lowerCamelCase__ : int =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowerCamelCase_ ) != add_prefix_space:
lowerCamelCase__ : Tuple =getattr(lowerCamelCase_ , pre_tok_state.pop('type' ) )
lowerCamelCase__ : Tuple =add_prefix_space
lowerCamelCase__ : Tuple =pre_tok_class(**lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] =add_prefix_space
lowerCamelCase__ : int ='post_processor'
lowerCamelCase__ : int =getattr(self.backend_tokenizer , lowerCamelCase_ , lowerCamelCase_ )
if tokenizer_component_instance:
lowerCamelCase__ : List[str] =json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCamelCase__ : Dict =tuple(state['sep'] )
if "cls" in state:
lowerCamelCase__ : int =tuple(state['cls'] )
lowerCamelCase__ : Tuple =False
if state.get('add_prefix_space' , lowerCamelCase_ ) != add_prefix_space:
lowerCamelCase__ : str =add_prefix_space
lowerCamelCase__ : List[Any] =True
if state.get('trim_offsets' , lowerCamelCase_ ) != trim_offsets:
lowerCamelCase__ : Tuple =trim_offsets
lowerCamelCase__ : List[Any] =True
if changes_to_apply:
lowerCamelCase__ : Dict =getattr(lowerCamelCase_ , state.pop('type' ) )
lowerCamelCase__ : List[str] =component_class(**lowerCamelCase_ )
setattr(self.backend_tokenizer , lowerCamelCase_ , lowerCamelCase_ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCAmelCase__ ( self :Tuple , lowerCamelCase_ :Dict ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else value
lowerCamelCase__ : Dict =value
def UpperCAmelCase__ ( self :str , *lowerCamelCase_ :Any , **lowerCamelCase_ :Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : List[str] =kwargs.get('is_split_into_words' , lowerCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCamelCase_ , **lowerCamelCase_ )
def UpperCAmelCase__ ( self :Optional[int] , *lowerCamelCase_ :Optional[Any] , **lowerCamelCase_ :Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =kwargs.get('is_split_into_words' , lowerCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCamelCase_ , **lowerCamelCase_ )
def UpperCAmelCase__ ( self :int , lowerCamelCase_ :str , lowerCamelCase_ :Optional[str] = None ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =self._tokenizer.model.save(lowerCamelCase_ , name=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
def UpperCAmelCase__ ( self :Union[str, Any] , lowerCamelCase_ :List[int] , lowerCamelCase_ :Optional[List[int]] = None ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =[self.sep_token_id]
lowerCamelCase__ : str =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase__ ( self :Optional[Any] , lowerCamelCase_ :List[int] , lowerCamelCase_ :Optional[List[int]] = None ):
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def UpperCAmelCase__ ( self :str , lowerCamelCase_ :"Conversation" ):
"""simple docstring"""
lowerCamelCase__ : Tuple =[]
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] =' '.join(lowerCamelCase_ )
lowerCamelCase__ : int =self.encode(lowerCamelCase_ )
if len(lowerCamelCase_ ) > self.model_max_length:
lowerCamelCase__ : Dict =input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 174
| 1
|
"""simple docstring"""
import numpy as np
def snake_case__ ( _lowerCamelCase ) ->np.array:
"""simple docstring"""
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 281
|
"""simple docstring"""
from scipy.stats import spearmanr
import datasets
__A : Optional[int] = '\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n'
__A : List[Any] = '\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n'
__A : Optional[int] = R'\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
"""simple docstring"""
def snake_case ( self : List[str] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , )
def snake_case ( self : Optional[int] , lowercase__ : str , lowercase__ : Any , lowercase__ : int=False ):
__lowercase : Optional[int] = spearmanr(lowercase__ , lowercase__ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 281
| 1
|
"""simple docstring"""
from __future__ import annotations
def a__ ( snake_case__ ) -> int:
if not nums:
return 0
lowerCamelCase = nums[0]
lowerCamelCase = 0
for num in nums[1:]:
lowerCamelCase , lowerCamelCase = (
max_excluding + num,
max(snake_case__ , snake_case__ ),
)
return max(snake_case__ , snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 543
|
"""simple docstring"""
def a__ ( snake_case__ = 50_00_00_00 ) -> int:
lowerCamelCase = set()
lowerCamelCase = int((limit - 24) ** (1 / 2) )
lowerCamelCase = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , snake_case__ ) ) )
for primea in primes:
lowerCamelCase = primea * primea
for primea in primes:
lowerCamelCase = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
lowerCamelCase = primea * primea * primea * primea
lowerCamelCase = square + cube + tetr
if total >= limit:
break
ret.add(snake_case__ )
return len(snake_case__ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 543
| 1
|
from __future__ import annotations
_lowerCAmelCase = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class _UpperCAmelCase :
def __init__( self , a__ , a__ ):
A_ : List[Any] = graph
# mapping node to its parent in resulting breadth first tree
A_ : dict[str, str | None] = {}
A_ : Union[str, Any] = source_vertex
def _lowerCamelCase ( self ):
A_ : str = {self.source_vertex}
A_ : Optional[int] = None
A_ : List[Any] = [self.source_vertex] # first in first out queue
while queue:
A_ : str = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(a__ )
A_ : List[Any] = vertex
queue.append(a__ )
def _lowerCamelCase ( self , a__ ):
if target_vertex == self.source_vertex:
return self.source_vertex
A_ : Optional[int] = self.parent.get(a__ )
if target_vertex_parent is None:
A_ : Dict = (
F"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(a__ )
return self.shortest_path(a__ ) + F"""->{target_vertex}"""
if __name__ == "__main__":
_lowerCAmelCase = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 481
|
from typing import List
from .keymap import KEYMAP, get_character
def _lowerCAmelCase ( _lowerCAmelCase ):
'''simple docstring'''
def decorator(_lowerCAmelCase ):
A_ : List[Any] = getattr(_lowerCAmelCase ,"""handle_key""" ,[] )
handle += [key]
setattr(_lowerCAmelCase ,"""handle_key""" ,_lowerCAmelCase )
return func
return decorator
def _lowerCAmelCase ( *_lowerCAmelCase ):
'''simple docstring'''
def decorator(_lowerCAmelCase ):
A_ : Tuple = getattr(_lowerCAmelCase ,"""handle_key""" ,[] )
handle += keys
setattr(_lowerCAmelCase ,"""handle_key""" ,_lowerCAmelCase )
return func
return decorator
class _UpperCAmelCase ( _lowerCamelCase ):
def __new__( cls , a__ , a__ , a__ ):
A_ : Any = super().__new__(cls , a__ , a__ , a__ )
if not hasattr(a__ , """key_handler""" ):
setattr(a__ , """key_handler""" , {} )
setattr(a__ , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
A_ : Tuple = getattr(a__ , """handle_key""" , [] )
for key in handled_keys:
A_ : Optional[Any] = value
return new_cls
@staticmethod
def _lowerCamelCase ( cls ):
A_ : List[str] = get_character()
if char != KEYMAP["undefined"]:
A_ : str = ord(a__ )
A_ : List[str] = cls.key_handler.get(a__ )
if handler:
A_ : Optional[int] = char
return handler(cls )
else:
return None
def _lowerCAmelCase ( cls ):
'''simple docstring'''
return KeyHandler(cls.__name__ ,cls.__bases__ ,cls.__dict__.copy() )
| 481
| 1
|
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
lowercase__ : Optional[int] = OmegaConf.load(__lowerCamelCase )
lowercase__ : List[str] = torch.load(__lowerCamelCase , map_location='''cpu''' )['''model''']
lowercase__ : List[str] = list(state_dict.keys() )
# extract state_dict for VQVAE
lowercase__ : Optional[int] = {}
lowercase__ : List[str] = '''first_stage_model.'''
for key in keys:
if key.startswith(__lowerCamelCase ):
lowercase__ : Tuple = state_dict[key]
# extract state_dict for UNetLDM
lowercase__ : List[Any] = {}
lowercase__ : Optional[Any] = '''model.diffusion_model.'''
for key in keys:
if key.startswith(__lowerCamelCase ):
lowercase__ : Dict = state_dict[key]
lowercase__ : Tuple = config.model.params.first_stage_config.params
lowercase__ : Optional[int] = config.model.params.unet_config.params
lowercase__ : Optional[Any] = VQModel(**__lowerCamelCase ).eval()
vqvae.load_state_dict(__lowerCamelCase )
lowercase__ : Tuple = UNetLDMModel(**__lowerCamelCase ).eval()
unet.load_state_dict(__lowerCamelCase )
lowercase__ : Tuple = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=__lowerCamelCase , )
lowercase__ : List[str] = LDMPipeline(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
pipeline.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', type=str, required=True)
parser.add_argument('--config_path', type=str, required=True)
parser.add_argument('--output_path', type=str, required=True)
lowerCAmelCase_ = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 560
|
"""simple docstring"""
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
lowerCAmelCase_ = data_utils.TransfoXLTokenizer
lowerCAmelCase_ = data_utils.TransfoXLCorpus
lowerCAmelCase_ = data_utils
lowerCAmelCase_ = data_utils
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(__lowerCamelCase , '''rb''' ) as fp:
lowercase__ : Dict = pickle.load(__lowerCamelCase , encoding='''latin1''' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
lowercase__ : int = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file''']
print(f"""Save vocabulary to {pytorch_vocab_dump_path}""" )
lowercase__ : List[Any] = corpus.vocab.__dict__
torch.save(__lowerCamelCase , __lowerCamelCase )
lowercase__ : int = corpus.__dict__
corpus_dict_no_vocab.pop('''vocab''' , __lowerCamelCase )
lowercase__ : List[str] = pytorch_dump_folder_path + '''/''' + CORPUS_NAME
print(f"""Save dataset to {pytorch_dataset_dump_path}""" )
torch.save(__lowerCamelCase , __lowerCamelCase )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
lowercase__ : Tuple = os.path.abspath(__lowerCamelCase )
lowercase__ : List[Any] = os.path.abspath(__lowerCamelCase )
print(f"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" )
# Initialise PyTorch model
if transfo_xl_config_file == "":
lowercase__ : Tuple = TransfoXLConfig()
else:
lowercase__ : List[str] = TransfoXLConfig.from_json_file(__lowerCamelCase )
print(f"""Building PyTorch model from configuration: {config}""" )
lowercase__ : Union[str, Any] = TransfoXLLMHeadModel(__lowerCamelCase )
lowercase__ : List[Any] = load_tf_weights_in_transfo_xl(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Save pytorch-model
lowercase__ : Optional[int] = os.path.join(__lowerCamelCase , __lowerCamelCase )
lowercase__ : Dict = os.path.join(__lowerCamelCase , __lowerCamelCase )
print(f"""Save PyTorch model to {os.path.abspath(__lowerCamelCase )}""" )
torch.save(model.state_dict() , __lowerCamelCase )
print(f"""Save configuration file to {os.path.abspath(__lowerCamelCase )}""" )
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--tf_checkpoint_path',
default='',
type=str,
help='An optional path to a TensorFlow checkpoint path to be converted.',
)
parser.add_argument(
'--transfo_xl_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--transfo_xl_dataset_file',
default='',
type=str,
help='An optional dataset file to be converted in a vocabulary.',
)
lowerCAmelCase_ = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 560
| 1
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
_lowercase : Union[str, Any] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : Tuple, *lowerCamelCase : str, **lowerCamelCase : Any )-> None:
warnings.warn(
'''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use YolosImageProcessor instead.''', UpperCamelCase_, )
super().__init__(*UpperCamelCase_, **UpperCamelCase_ )
| 704
|
"""simple docstring"""
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 42
_a = 42
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
_a = 42
_a = (1_6, 3_2, 9_6, 2_5_6)
_a = jnp.floataa
def snake_case ( self : Tuple )-> int:
lowerCamelCase__ : Tuple =nn.Conv(
self.block_out_channels[0], kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, )
lowerCamelCase__ : Dict =[]
for i in range(len(self.block_out_channels ) - 1 ):
lowerCamelCase__ : Dict =self.block_out_channels[i]
lowerCamelCase__ : Dict =self.block_out_channels[i + 1]
lowerCamelCase__ : List[str] =nn.Conv(
lowerCamelCase, kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, )
blocks.append(lowerCamelCase )
lowerCamelCase__ : Optional[int] =nn.Conv(
lowerCamelCase, kernel_size=(3, 3), strides=(2, 2), padding=((1, 1), (1, 1)), dtype=self.dtype, )
blocks.append(lowerCamelCase )
lowerCamelCase__ : Any =blocks
lowerCamelCase__ : Optional[int] =nn.Conv(
self.conditioning_embedding_channels, kernel_size=(3, 3), padding=((1, 1), (1, 1)), kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
def __call__( self : Any, lowerCamelCase : int )-> List[str]:
lowerCamelCase__ : Tuple =self.conv_in(lowerCamelCase )
lowerCamelCase__ : Dict =nn.silu(lowerCamelCase )
for block in self.blocks:
lowerCamelCase__ : str =block(lowerCamelCase )
lowerCamelCase__ : List[str] =nn.silu(lowerCamelCase )
lowerCamelCase__ : Any =self.conv_out(lowerCamelCase )
return embedding
@flax_register_to_config
class __SCREAMING_SNAKE_CASE ( nn.Module , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
_a = 3_2
_a = 4
_a = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_a = False
_a = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
_a = 2
_a = 8
_a = None
_a = 1_2_8_0
_a = 0.0
_a = False
_a = jnp.floataa
_a = True
_a = 0
_a = "rgb"
_a = (1_6, 3_2, 9_6, 2_5_6)
def snake_case ( self : str, lowerCamelCase : jax.random.KeyArray )-> FrozenDict:
# init input tensors
lowerCamelCase__ : int =(1, self.in_channels, self.sample_size, self.sample_size)
lowerCamelCase__ : int =jnp.zeros(lowerCamelCase, dtype=jnp.floataa )
lowerCamelCase__ : Union[str, Any] =jnp.ones((1,), dtype=jnp.intaa )
lowerCamelCase__ : str =jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.floataa )
lowerCamelCase__ : Any =(1, 3, self.sample_size * 8, self.sample_size * 8)
lowerCamelCase__ : Optional[Any] =jnp.zeros(lowerCamelCase, dtype=jnp.floataa )
lowerCamelCase__ , lowerCamelCase__ : List[Any] =jax.random.split(lowerCamelCase )
lowerCamelCase__ : Dict ={'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )["params"]
def snake_case ( self : Any )-> Tuple:
lowerCamelCase__ : Optional[int] =self.block_out_channels
lowerCamelCase__ : Tuple =block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCamelCase__ : List[Any] =self.num_attention_heads or self.attention_head_dim
# input
lowerCamelCase__ : int =nn.Conv(
block_out_channels[0], kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, )
# time
lowerCamelCase__ : str =FlaxTimesteps(
block_out_channels[0], flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.config.freq_shift )
lowerCamelCase__ : Dict =FlaxTimestepEmbedding(lowerCamelCase, dtype=self.dtype )
lowerCamelCase__ : List[Any] =FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0], block_out_channels=self.conditioning_embedding_out_channels, )
lowerCamelCase__ : Dict =self.only_cross_attention
if isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : int =(only_cross_attention,) * len(self.down_block_types )
if isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ : List[str] =(num_attention_heads,) * len(self.down_block_types )
# down
lowerCamelCase__ : Union[str, Any] =[]
lowerCamelCase__ : Dict =[]
lowerCamelCase__ : List[Any] =block_out_channels[0]
lowerCamelCase__ : List[Any] =nn.Conv(
lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(lowerCamelCase )
for i, down_block_type in enumerate(self.down_block_types ):
lowerCamelCase__ : List[Any] =output_channel
lowerCamelCase__ : str =block_out_channels[i]
lowerCamelCase__ : Dict =i == len(lowerCamelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCamelCase__ : str =FlaxCrossAttnDownBlockaD(
in_channels=lowerCamelCase, out_channels=lowerCamelCase, dropout=self.dropout, num_layers=self.layers_per_block, num_attention_heads=num_attention_heads[i], add_downsample=not is_final_block, use_linear_projection=self.use_linear_projection, only_cross_attention=only_cross_attention[i], dtype=self.dtype, )
else:
lowerCamelCase__ : List[Any] =FlaxDownBlockaD(
in_channels=lowerCamelCase, out_channels=lowerCamelCase, dropout=self.dropout, num_layers=self.layers_per_block, add_downsample=not is_final_block, dtype=self.dtype, )
down_blocks.append(lowerCamelCase )
for _ in range(self.layers_per_block ):
lowerCamelCase__ : Any =nn.Conv(
lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(lowerCamelCase )
if not is_final_block:
lowerCamelCase__ : Any =nn.Conv(
lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(lowerCamelCase )
lowerCamelCase__ : int =down_blocks
lowerCamelCase__ : List[str] =controlnet_down_blocks
# mid
lowerCamelCase__ : Tuple =block_out_channels[-1]
lowerCamelCase__ : List[Any] =FlaxUNetMidBlockaDCrossAttn(
in_channels=lowerCamelCase, dropout=self.dropout, num_attention_heads=num_attention_heads[-1], use_linear_projection=self.use_linear_projection, dtype=self.dtype, )
lowerCamelCase__ : List[str] =nn.Conv(
lowerCamelCase, kernel_size=(1, 1), padding='''VALID''', kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
def __call__( self : int, lowerCamelCase : List[Any], lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : str, lowerCamelCase : float = 1.0, lowerCamelCase : bool = True, lowerCamelCase : bool = False, )-> Union[FlaxControlNetOutput, Tuple]:
lowerCamelCase__ : int =self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowerCamelCase__ : int =jnp.flip(lowerCamelCase, axis=1 )
# 1. time
if not isinstance(lowerCamelCase, jnp.ndarray ):
lowerCamelCase__ : Any =jnp.array([timesteps], dtype=jnp.intaa )
elif isinstance(lowerCamelCase, jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCamelCase__ : List[str] =timesteps.astype(dtype=jnp.floataa )
lowerCamelCase__ : int =jnp.expand_dims(lowerCamelCase, 0 )
lowerCamelCase__ : Optional[Any] =self.time_proj(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =self.time_embedding(lowerCamelCase )
# 2. pre-process
lowerCamelCase__ : Optional[int] =jnp.transpose(lowerCamelCase, (0, 2, 3, 1) )
lowerCamelCase__ : Dict =self.conv_in(lowerCamelCase )
lowerCamelCase__ : List[str] =jnp.transpose(lowerCamelCase, (0, 2, 3, 1) )
lowerCamelCase__ : int =self.controlnet_cond_embedding(lowerCamelCase )
sample += controlnet_cond
# 3. down
lowerCamelCase__ : Union[str, Any] =(sample,)
for down_block in self.down_blocks:
if isinstance(lowerCamelCase, lowerCamelCase ):
lowerCamelCase__ , lowerCamelCase__ : Dict =down_block(lowerCamelCase, lowerCamelCase, lowerCamelCase, deterministic=not train )
else:
lowerCamelCase__ , lowerCamelCase__ : Tuple =down_block(lowerCamelCase, lowerCamelCase, deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowerCamelCase__ : Optional[int] =self.mid_block(lowerCamelCase, lowerCamelCase, lowerCamelCase, deterministic=not train )
# 5. contronet blocks
lowerCamelCase__ : Optional[Any] =()
for down_block_res_sample, controlnet_block in zip(lowerCamelCase, self.controlnet_down_blocks ):
lowerCamelCase__ : Union[str, Any] =controlnet_block(lowerCamelCase )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowerCamelCase__ : List[str] =controlnet_down_block_res_samples
lowerCamelCase__ : List[str] =self.controlnet_mid_block(lowerCamelCase )
# 6. scaling
lowerCamelCase__ : Union[str, Any] =[sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=lowerCamelCase, mid_block_res_sample=lowerCamelCase )
| 625
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _snake_case ( unittest.TestCase ):
def __init__( self , _lowerCamelCase , _lowerCamelCase=7 , _lowerCamelCase=3 , _lowerCamelCase=18 , _lowerCamelCase=30 , _lowerCamelCase=400 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , ):
a :Tuple = size if size is not None else {'''height''': 18, '''width''': 18}
a :int = parent
a :Optional[int] = batch_size
a :str = num_channels
a :str = image_size
a :Dict = min_resolution
a :List[str] = max_resolution
a :Optional[Any] = do_resize
a :str = size
a :Optional[Any] = apply_ocr
def SCREAMING_SNAKE_CASE__ ( self ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _snake_case ( _snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def SCREAMING_SNAKE_CASE__ ( self ):
a :Optional[int] = LayoutLMvaImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self ):
a :Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''size''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''apply_ocr''' ) )
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
a :str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
# Initialize image_processing
a :Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
a :Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , _lowerCamelCase )
self.assertIsInstance(encoding.boxes , _lowerCamelCase )
# Test batched
a :Tuple = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def SCREAMING_SNAKE_CASE__ ( self ):
# Initialize image_processing
a :int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a :str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
a :Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
a :Optional[int] = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def SCREAMING_SNAKE_CASE__ ( self ):
# Initialize image_processing
a :int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a :Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
a :Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
a :int = image_processing(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def SCREAMING_SNAKE_CASE__ ( self ):
# with apply_OCR = True
a :List[str] = LayoutLMvaImageProcessor()
from datasets import load_dataset
a :Tuple = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
a :Tuple = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
a :List[str] = image_processing(_lowerCamelCase , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
a :Union[str, Any] = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
a :int = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , _lowerCamelCase )
self.assertListEqual(encoding.boxes , _lowerCamelCase )
# with apply_OCR = False
a :Optional[int] = LayoutLMvaImageProcessor(apply_ocr=_lowerCamelCase )
a :Tuple = image_processing(_lowerCamelCase , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 445
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
snake_case : List[Any] = logging.get_logger(__name__)
snake_case : Optional[int] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case : Union[str, Any] = {
'''vocab_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'''
),
},
'''tokenizer_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''',
'''roberta-base-openai-detector''': (
'''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'''
),
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'''
),
},
}
snake_case : Optional[int] = {
'''roberta-base''': 5_12,
'''roberta-large''': 5_12,
'''roberta-large-mnli''': 5_12,
'''distilroberta-base''': 5_12,
'''roberta-base-openai-detector''': 5_12,
'''roberta-large-openai-detector''': 5_12,
}
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ = ['input_ids', 'attention_mask']
SCREAMING_SNAKE_CASE__ = RobertaTokenizer
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="replace" , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , _lowerCamelCase=False , _lowerCamelCase=True , **_lowerCamelCase , ):
super().__init__(
_lowerCamelCase , _lowerCamelCase , tokenizer_file=_lowerCamelCase , errors=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , add_prefix_space=_lowerCamelCase , trim_offsets=_lowerCamelCase , **_lowerCamelCase , )
a :List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _lowerCamelCase ) != add_prefix_space:
a :Optional[int] = getattr(_lowerCamelCase , pre_tok_state.pop('''type''' ) )
a :Union[str, Any] = add_prefix_space
a :Dict = pre_tok_class(**_lowerCamelCase )
a :Union[str, Any] = add_prefix_space
a :List[Any] = '''post_processor'''
a :List[str] = getattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase )
if tokenizer_component_instance:
a :Optional[Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
a :Optional[int] = tuple(state['''sep'''] )
if "cls" in state:
a :Union[str, Any] = tuple(state['''cls'''] )
a :Optional[int] = False
if state.get('''add_prefix_space''' , _lowerCamelCase ) != add_prefix_space:
a :int = add_prefix_space
a :Union[str, Any] = True
if state.get('''trim_offsets''' , _lowerCamelCase ) != trim_offsets:
a :int = trim_offsets
a :List[str] = True
if changes_to_apply:
a :Union[str, Any] = getattr(_lowerCamelCase , state.pop('''type''' ) )
a :str = component_class(**_lowerCamelCase )
setattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :Union[str, Any] = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else value
a :int = value
def SCREAMING_SNAKE_CASE__ ( self , *_lowerCamelCase , **_lowerCamelCase ):
a :Union[str, Any] = kwargs.get('''is_split_into_words''' , _lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_lowerCamelCase , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , *_lowerCamelCase , **_lowerCamelCase ):
a :Any = kwargs.get('''is_split_into_words''' , _lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_lowerCamelCase , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None ):
a :Any = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase )
return tuple(_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase=None ):
a :Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None ):
a :Dict = [self.sep_token_id]
a :int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 445
| 1
|
from __future__ import annotations
from functools import lru_cache
from math import ceil
lowerCamelCase_ : Dict = 100
lowerCamelCase_ : Any = set(range(3, NUM_PRIMES, 2))
primes.add(2)
lowerCamelCase_ : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def lowerCAmelCase( __lowerCamelCase ):
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
__a = set()
__a = 42
__a = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def lowerCAmelCase( __lowerCamelCase = 5000 ):
for number_to_partition in range(1 , __lowerCamelCase ):
if len(partition(__lowerCamelCase ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F'''{solution() = }''')
| 246
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=__snake_case )
class a__ ( __snake_case ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
A__ : str = field(default='question-answering-extractive' , metadata={'include_in_asdict_even_if_is_default': True} )
A__ : ClassVar[Features] = Features({'question': Value('string' ), 'context': Value('string' )} )
A__ : ClassVar[Features] = Features(
{
'answers': Sequence(
{
'text': Value('string' ),
'answer_start': Value('int32' ),
} )
} )
A__ : str = "question"
A__ : str = "context"
A__ : str = "answers"
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Dict[str, str]:
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 246
| 1
|
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase : Optional[int] = 16
UpperCAmelCase : List[Any] = 32
def lowerCamelCase ( _UpperCamelCase : Accelerator , _UpperCamelCase : int = 1_6 ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained("""bert-base-cased""" )
__UpperCAmelCase : Optional[Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(_UpperCamelCase : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
__UpperCAmelCase : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_UpperCamelCase , max_length=_UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__UpperCAmelCase : Optional[int] = datasets.map(
_UpperCamelCase , batched=_UpperCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCAmelCase : List[Any] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(_UpperCamelCase : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__UpperCAmelCase : List[Any] = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__UpperCAmelCase : List[str] = 1_6
elif accelerator.mixed_precision != "no":
__UpperCAmelCase : Union[str, Any] = 8
else:
__UpperCAmelCase : List[str] = None
return tokenizer.pad(
_UpperCamelCase , padding="""longest""" , max_length=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
__UpperCAmelCase : Tuple = DataLoader(
tokenized_datasets["""train"""] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=_UpperCamelCase )
__UpperCAmelCase : Any = DataLoader(
tokenized_datasets["""validation"""] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=_UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCAmelCase : Dict = mocked_dataloaders # noqa: F811
def lowerCamelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple ) -> Optional[int]:
'''simple docstring'''
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , _UpperCamelCase ) == "1":
__UpperCAmelCase : Optional[int] = 2
# New Code #
__UpperCAmelCase : Any = int(args.gradient_accumulation_steps )
# Initialize accelerator
__UpperCAmelCase : Optional[Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_UpperCamelCase )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"""Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCAmelCase : str = config["""lr"""]
__UpperCAmelCase : Optional[int] = int(config["""num_epochs"""] )
__UpperCAmelCase : List[str] = int(config["""seed"""] )
__UpperCAmelCase : Optional[Any] = int(config["""batch_size"""] )
__UpperCAmelCase : List[Any] = evaluate.load("""glue""" , """mrpc""" )
set_seed(_UpperCamelCase )
__UpperCAmelCase ,__UpperCAmelCase : List[Any] = get_dataloaders(_UpperCamelCase , _UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCAmelCase : Optional[int] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=_UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__UpperCAmelCase : int = model.to(accelerator.device )
# Instantiate optimizer
__UpperCAmelCase : int = AdamW(params=model.parameters() , lr=_UpperCamelCase )
# Instantiate scheduler
__UpperCAmelCase : int = get_linear_schedule_with_warmup(
optimizer=_UpperCamelCase , num_warmup_steps=1_0_0 , num_training_steps=(len(_UpperCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : List[str] = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Now we train the model
for epoch in range(_UpperCamelCase ):
model.train()
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(_UpperCamelCase ):
__UpperCAmelCase : Optional[Any] = model(**_UpperCamelCase )
__UpperCAmelCase : Optional[int] = output.loss
accelerator.backward(_UpperCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(**_UpperCamelCase )
__UpperCAmelCase : Dict = outputs.logits.argmax(dim=-1 )
__UpperCAmelCase ,__UpperCAmelCase : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=_UpperCamelCase , references=_UpperCamelCase , )
__UpperCAmelCase : Dict = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , _UpperCamelCase )
def lowerCamelCase ( ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=_UpperCamelCase , default=_UpperCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=_UpperCamelCase , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
__UpperCAmelCase : Optional[Any] = parser.parse_args()
__UpperCAmelCase : Dict = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6}
training_function(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
main()
| 139
|
"""simple docstring"""
from __future__ import annotations
import pandas as pd
def lowerCamelCase ( _UpperCamelCase : list[int] , _UpperCamelCase : list[int] , _UpperCamelCase : int ) -> list[int]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = [0] * no_of_processes
__UpperCAmelCase : List[str] = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(_UpperCamelCase ):
__UpperCAmelCase : Union[str, Any] = burst_time[i]
__UpperCAmelCase : Tuple = 0
__UpperCAmelCase : str = 0
__UpperCAmelCase : Dict = 9_9_9_9_9_9_9_9_9
__UpperCAmelCase : Any = 0
__UpperCAmelCase : List[str] = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(_UpperCamelCase ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
__UpperCAmelCase : List[Any] = remaining_time[j]
__UpperCAmelCase : Tuple = j
__UpperCAmelCase : Any = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
__UpperCAmelCase : Dict = remaining_time[short]
if minm == 0:
__UpperCAmelCase : List[str] = 9_9_9_9_9_9_9_9_9
if remaining_time[short] == 0:
complete += 1
__UpperCAmelCase : Dict = False
# Find finish time of current process
__UpperCAmelCase : int = increment_time + 1
# Calculate waiting time
__UpperCAmelCase : List[Any] = finish_time - arrival_time[short]
__UpperCAmelCase : Tuple = finar - burst_time[short]
if waiting_time[short] < 0:
__UpperCAmelCase : List[Any] = 0
# Increment time
increment_time += 1
return waiting_time
def lowerCamelCase ( _UpperCamelCase : list[int] , _UpperCamelCase : int , _UpperCamelCase : list[int] ) -> list[int]:
'''simple docstring'''
__UpperCAmelCase : List[str] = [0] * no_of_processes
for i in range(_UpperCamelCase ):
__UpperCAmelCase : List[Any] = burst_time[i] + waiting_time[i]
return turn_around_time
def lowerCamelCase ( _UpperCamelCase : list[int] , _UpperCamelCase : list[int] , _UpperCamelCase : int ) -> None:
'''simple docstring'''
__UpperCAmelCase : List[Any] = 0
__UpperCAmelCase : Tuple = 0
for i in range(_UpperCamelCase ):
__UpperCAmelCase : Optional[Any] = total_waiting_time + waiting_time[i]
__UpperCAmelCase : Dict = total_turn_around_time + turn_around_time[i]
print(f'''Average waiting time = {total_waiting_time / no_of_processes:.5f}''' )
print("""Average turn around time =""" , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('Enter how many process you want to analyze')
UpperCAmelCase : Optional[Any] = int(input())
UpperCAmelCase : Any = [0] * no_of_processes
UpperCAmelCase : Tuple = [0] * no_of_processes
UpperCAmelCase : Tuple = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('Enter the arrival time and burst time for process:--' + str(i + 1))
UpperCAmelCase , UpperCAmelCase : Tuple = map(int, input().split())
UpperCAmelCase : List[str] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
UpperCAmelCase : Dict = burst_time
UpperCAmelCase : Any = no_of_processes
UpperCAmelCase : int = waiting_time
UpperCAmelCase : Union[str, Any] = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
UpperCAmelCase : List[Any] = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'Process',
'BurstTime',
'ArrivalTime',
'WaitingTime',
'TurnAroundTime',
],
)
# Printing the dataFrame
pd.set_option('display.max_rows', fcfs.shape[0] + 1)
print(fcfs)
| 139
| 1
|
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
a : Union[str, Any] = abspath(join(dirname(dirname(__file__)), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def lowercase__(A ) ->Optional[int]:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(A )
def lowercase__(A ) ->List[Any]:
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
lowercase__ : str= terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(A , id=A )
| 85
|
"""simple docstring"""
from ....utils import logging
a : List[str] = logging.get_logger(__name__)
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=None , snake_case__=2048 ):
'''simple docstring'''
lowercase__ : Dict= config.__dict__
lowercase__ : str= modal_hidden_size
if num_labels:
lowercase__ : List[str]= num_labels
| 85
| 1
|
def __snake_case ( lowerCAmelCase_ = 1_0_0 ) -> int:
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F'{solution() = }')
| 100
|
'''simple docstring'''
import os
def _snake_case ( ):
"""simple docstring"""
with open(os.path.dirname(A_ ) + """/grid.txt""" ) as f:
a_ : Dict = [] # noqa: E741
for _ in range(20 ):
l.append([int(A_ ) for x in f.readline().split()] )
a_ : Dict = 0
# right
for i in range(20 ):
for j in range(17 ):
a_ : Optional[int] = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
a_ : List[str] = temp
# down
for i in range(17 ):
for j in range(20 ):
a_ : str = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
a_ : Optional[int] = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
a_ : Optional[int] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
a_ : str = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
a_ : Any = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
a_ : List[Any] = temp
return maximum
if __name__ == "__main__":
print(solution())
| 577
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ = {
"configuration_groupvit": [
"GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"GroupViTConfig",
"GroupViTOnnxConfig",
"GroupViTTextConfig",
"GroupViTVisionConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GroupViTModel",
"GroupViTPreTrainedModel",
"GroupViTTextModel",
"GroupViTVisionModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFGroupViTModel",
"TFGroupViTPreTrainedModel",
"TFGroupViTTextModel",
"TFGroupViTVisionModel",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 639
|
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCAmelCase :
@staticmethod
def _lowerCamelCase ( *A : Union[str, Any] , **A : List[Any]) -> Union[str, Any]:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@require_torch
def _lowerCamelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , )
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['a', 'b', 'c'])
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(A) , [
[{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'b'}, {'score': 0.3_3_3, 'label': 'c'}],
[{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'c'}, {'score': 0.3_3_3, 'label': 'b'}],
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
] , )
@require_tf
def _lowerCamelCase ( self : str) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , framework='tf')
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['a', 'b', 'c'])
self.assertEqual(
nested_simplify(A) , [{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'b'}, {'score': 0.3_3_3, 'label': 'c'}] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
] , )
@slow
@require_torch
def _lowerCamelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , )
# This is an image of 2 cats with remotes and no planes
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['cat', 'plane', 'remote'])
self.assertEqual(
nested_simplify(A) , [
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
],
]
* 5 , )
@slow
@require_tf
def _lowerCamelCase ( self : List[str]) -> Any:
"""simple docstring"""
_UpperCAmelCase = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , framework='tf')
# This is an image of 2 cats with remotes and no planes
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['cat', 'plane', 'remote'])
self.assertEqual(
nested_simplify(A) , [
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
],
]
* 5 , )
| 639
| 1
|
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCamelCase ( A , A , A , A="attention" ):
UpperCamelCase__ = params[f"{prefix}/layers_{i}/{layer_name}/key/kernel"]
UpperCamelCase__ = params[f"{prefix}/layers_{i}/{layer_name}/out/kernel"]
UpperCamelCase__ = params[f"{prefix}/layers_{i}/{layer_name}/query/kernel"]
UpperCamelCase__ = params[f"{prefix}/layers_{i}/{layer_name}/value/kernel"]
return k, o, q, v
def __UpperCamelCase ( A , A , A , A=False ):
if split_mlp_wi:
UpperCamelCase__ = params[f"{prefix}/layers_{i}/mlp/wi_0/kernel"]
UpperCamelCase__ = params[f"{prefix}/layers_{i}/mlp/wi_1/kernel"]
UpperCamelCase__ = (wi_a, wi_a)
else:
UpperCamelCase__ = params[f"{prefix}/layers_{i}/mlp/wi/kernel"]
UpperCamelCase__ = params[f"{prefix}/layers_{i}/mlp/wo/kernel"]
return wi, wo
def __UpperCamelCase ( A , A , A , A ):
return params[f"{prefix}/layers_{i}/{layer_name}/scale"]
def __UpperCamelCase ( A , *, A , A ):
UpperCamelCase__ = traverse_util.flatten_dict(variables['''target'''] )
UpperCamelCase__ = {"""/""".join(_UpperCamelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
UpperCamelCase__ = """encoder/layers_0/mlp/wi_0/kernel""" in old
print('''Split MLP:''' , _UpperCamelCase )
UpperCamelCase__ = collections.OrderedDict()
# Shared embeddings.
UpperCamelCase__ = old["""token_embedder/embedding"""]
# Encoder.
for i in range(_UpperCamelCase ):
# Block i, layer 0 (Self Attention).
UpperCamelCase__ = tax_layer_norm_lookup(_UpperCamelCase , _UpperCamelCase , '''encoder''' , '''pre_attention_layer_norm''' )
UpperCamelCase__ = tax_attention_lookup(_UpperCamelCase , _UpperCamelCase , '''encoder''' , '''attention''' )
UpperCamelCase__ = layer_norm
UpperCamelCase__ = k.T
UpperCamelCase__ = o.T
UpperCamelCase__ = q.T
UpperCamelCase__ = v.T
# Block i, layer 1 (MLP).
UpperCamelCase__ = tax_layer_norm_lookup(_UpperCamelCase , _UpperCamelCase , '''encoder''' , '''pre_mlp_layer_norm''' )
UpperCamelCase__ = tax_mlp_lookup(_UpperCamelCase , _UpperCamelCase , '''encoder''' , _UpperCamelCase )
UpperCamelCase__ = layer_norm
if split_mlp_wi:
UpperCamelCase__ = wi[0].T
UpperCamelCase__ = wi[1].T
else:
UpperCamelCase__ = wi.T
UpperCamelCase__ = wo.T
UpperCamelCase__ = old[
"""encoder/relpos_bias/rel_embedding"""
].T
UpperCamelCase__ = old["""encoder/encoder_norm/scale"""]
if not is_encoder_only:
# Decoder.
for i in range(_UpperCamelCase ):
# Block i, layer 0 (Self Attention).
UpperCamelCase__ = tax_layer_norm_lookup(_UpperCamelCase , _UpperCamelCase , '''decoder''' , '''pre_self_attention_layer_norm''' )
UpperCamelCase__ = tax_attention_lookup(_UpperCamelCase , _UpperCamelCase , '''decoder''' , '''self_attention''' )
UpperCamelCase__ = layer_norm
UpperCamelCase__ = k.T
UpperCamelCase__ = o.T
UpperCamelCase__ = q.T
UpperCamelCase__ = v.T
# Block i, layer 1 (Cross Attention).
UpperCamelCase__ = tax_layer_norm_lookup(_UpperCamelCase , _UpperCamelCase , '''decoder''' , '''pre_cross_attention_layer_norm''' )
UpperCamelCase__ = tax_attention_lookup(_UpperCamelCase , _UpperCamelCase , '''decoder''' , '''encoder_decoder_attention''' )
UpperCamelCase__ = layer_norm
UpperCamelCase__ = k.T
UpperCamelCase__ = o.T
UpperCamelCase__ = q.T
UpperCamelCase__ = v.T
# Block i, layer 2 (MLP).
UpperCamelCase__ = tax_layer_norm_lookup(_UpperCamelCase , _UpperCamelCase , '''decoder''' , '''pre_mlp_layer_norm''' )
UpperCamelCase__ = tax_mlp_lookup(_UpperCamelCase , _UpperCamelCase , '''decoder''' , _UpperCamelCase )
UpperCamelCase__ = layer_norm
if split_mlp_wi:
UpperCamelCase__ = wi[0].T
UpperCamelCase__ = wi[1].T
else:
UpperCamelCase__ = wi.T
UpperCamelCase__ = wo.T
UpperCamelCase__ = old["""decoder/decoder_norm/scale"""]
UpperCamelCase__ = old[
"""decoder/relpos_bias/rel_embedding"""
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
UpperCamelCase__ = old["""decoder/logits_dense/kernel"""].T
return new
def __UpperCamelCase ( A , A ):
UpperCamelCase__ = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
UpperCamelCase__ = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
UpperCamelCase__ = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
UpperCamelCase__ = state_dict["""shared.weight"""]
return state_dict
def __UpperCamelCase ( A , A , A , A ):
UpperCamelCase__ = checkpoints.load_tax_checkpoint(_UpperCamelCase )
UpperCamelCase__ = convert_tax_to_pytorch(_UpperCamelCase , num_layers=config.num_layers , is_encoder_only=_UpperCamelCase )
UpperCamelCase__ = make_state_dict(_UpperCamelCase , _UpperCamelCase )
model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
def __UpperCamelCase ( A , A , A , A = False ):
UpperCamelCase__ = TaConfig.from_json_file(_UpperCamelCase )
print(f"Building PyTorch model from configuration: {config}" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
UpperCamelCase__ = TaEncoderModel(_UpperCamelCase )
else:
UpperCamelCase__ = TaForConditionalGeneration(_UpperCamelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(_UpperCamelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(_UpperCamelCase )
print('''Done''' )
if __name__ == "__main__":
__magic_name__ =argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
__magic_name__ =parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 415
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowerCamelCase__ ( A__ ):
__lowerCamelCase = 42
class lowerCamelCase__ ( A__ , A__ ):
@register_to_config
def __init__( self : Any , __a : int = 3 , __a : int = 3 , __a : Tuple[str] = ("DownEncoderBlock2D",) , __a : Tuple[str] = ("UpDecoderBlock2D",) , __a : Tuple[int] = (64,) , __a : int = 1 , __a : str = "silu" , __a : int = 3 , __a : int = 32 , __a : int = 256 , __a : int = 32 , __a : Optional[int] = None , __a : float = 0.18_215 , __a : str = "group" , ):
'''simple docstring'''
super().__init__()
# pass init params to Encoder
lowerCamelCase__: Tuple = Encoder(
in_channels=__a , out_channels=__a , down_block_types=__a , block_out_channels=__a , layers_per_block=__a , act_fn=__a , norm_num_groups=__a , double_z=__a , )
lowerCamelCase__: Optional[int] = vq_embed_dim if vq_embed_dim is not None else latent_channels
lowerCamelCase__: Optional[int] = nn.Convad(__a , __a , 1 )
lowerCamelCase__: List[str] = VectorQuantizer(__a , __a , beta=0.25 , remap=__a , sane_index_shape=__a )
lowerCamelCase__: str = nn.Convad(__a , __a , 1 )
# pass init params to Decoder
lowerCamelCase__: Dict = Decoder(
in_channels=__a , out_channels=__a , up_block_types=__a , block_out_channels=__a , layers_per_block=__a , act_fn=__a , norm_num_groups=__a , norm_type=__a , )
@apply_forward_hook
def lowerCamelCase_ ( self : List[Any] , __a : torch.FloatTensor , __a : bool = True ):
'''simple docstring'''
lowerCamelCase__: Union[str, Any] = self.encoder(__a )
lowerCamelCase__: Union[str, Any] = self.quant_conv(__a )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=__a )
@apply_forward_hook
def lowerCamelCase_ ( self : Tuple , __a : torch.FloatTensor , __a : bool = False , __a : bool = True ):
'''simple docstring'''
if not force_not_quantize:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[Any] = self.quantize(__a )
else:
lowerCamelCase__: Optional[Any] = h
lowerCamelCase__: List[str] = self.post_quant_conv(__a )
lowerCamelCase__: Optional[int] = self.decoder(__a , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__a )
def lowerCamelCase_ ( self : List[Any] , __a : torch.FloatTensor , __a : bool = True ):
'''simple docstring'''
lowerCamelCase__: Tuple = sample
lowerCamelCase__: List[str] = self.encode(__a ).latents
lowerCamelCase__: Any = self.decode(__a ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__a )
| 306
| 0
|
"""simple docstring"""
snake_case = 'Input must be a string of 8 numbers plus letter'
snake_case = 'TRWAGMYFPDXBNJZSQVHLCKE'
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
if not isinstance(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = f'''Expected string as input, found {type(_SCREAMING_SNAKE_CASE ).__name__}'''
raise TypeError(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = spanish_id.replace('-', '' ).upper()
if len(_SCREAMING_SNAKE_CASE ) != 9:
raise ValueError(_SCREAMING_SNAKE_CASE )
try:
SCREAMING_SNAKE_CASE = int(spanish_id_clean[0:8] )
SCREAMING_SNAKE_CASE = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(_SCREAMING_SNAKE_CASE ) from ex
if letter.isdigit():
raise ValueError(_SCREAMING_SNAKE_CASE )
return letter == LOOKUP_LETTERS[number % 2_3]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702
|
"""simple docstring"""
snake_case = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 1_0_0_0_0_0]
number //= 1_0_0_0_0_0
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
snake_case = [None] * 1_0_0_0_0_0_0_0
snake_case = True
snake_case = False
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
SCREAMING_SNAKE_CASE = chain(next_number(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE = number_chain
while number < 1_0_0_0_0_0_0_0:
SCREAMING_SNAKE_CASE = number_chain
number *= 1_0
return number_chain
def UpperCamelCase_ ( SCREAMING_SNAKE_CASE_ = 1_0_0_0_0_0_0_0 ):
for i in range(1, SCREAMING_SNAKE_CASE_ ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'{solution() = }')
| 406
| 0
|
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
__UpperCamelCase : Optional[Any] = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f'''{bindir}/../../examples/pytorch/translation'''):
from run_translation import main # noqa
set_seed(42)
__UpperCamelCase : List[str] = """sshleifer/student_marian_en_ro_6_1"""
__UpperCamelCase : int = """sshleifer/tiny-mbart"""
@require_torch
class __SCREAMING_SNAKE_CASE( a_ ):
def lowerCAmelCase_ ( self: int , UpperCamelCase: Any=False , UpperCamelCase: Optional[Any]=None , UpperCamelCase: Union[str, Any]=True , UpperCamelCase: int=True , UpperCamelCase: Union[str, Any]=True , UpperCamelCase: List[Any]=True , ) -> Tuple:
snake_case__ = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=UpperCamelCase , num_train_epochs=1 , distributed=UpperCamelCase , extra_args_str=UpperCamelCase , predict_with_generate=UpperCamelCase , do_train=UpperCamelCase , do_eval=UpperCamelCase , do_predict=UpperCamelCase , )
snake_case__ = TrainerState.load_from_json(os.path.join(UpperCamelCase , 'trainer_state.json' ) ).log_history
if not do_eval:
return
snake_case__ = [log for log in logs if 'eval_loss' in log.keys()]
snake_case__ = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
snake_case__ = eval_metrics[-1]
assert isinstance(last_step_stats['eval_bleu'] , UpperCamelCase )
assert not math.isnan(float(last_step_stats['eval_loss'] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def lowerCAmelCase_ ( self: Optional[int] ) -> Dict:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def lowerCAmelCase_ ( self: Any ) -> int:
self.run_seqaseq_quick(distributed=UpperCamelCase )
@require_torch_multi_gpu
def lowerCAmelCase_ ( self: Tuple ) -> int:
self.run_seqaseq_quick(distributed=UpperCamelCase )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase_ ( self: List[str] ) -> Optional[Any]:
self.run_seqaseq_quick(distributed=UpperCamelCase , extra_args_str='--sharded_ddp simple' )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase_ ( self: Any ) -> Any:
self.run_seqaseq_quick(distributed=UpperCamelCase , extra_args_str='--sharded_ddp simple --fp16' )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase_ ( self: int ) -> Tuple:
self.run_seqaseq_quick(distributed=UpperCamelCase , extra_args_str='--sharded_ddp zero_dp_2' , predict_with_generate=UpperCamelCase )
@unittest.skip('Requires an update of the env running those tests' )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase_ ( self: Dict ) -> Tuple:
self.run_seqaseq_quick(
distributed=UpperCamelCase , extra_args_str='--sharded_ddp zero_dp_2 --fp16' , predict_with_generate=UpperCamelCase )
@require_apex
@require_torch_gpu
def lowerCAmelCase_ ( self: Tuple ) -> Any:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=UpperCamelCase , extra_args_str='--fp16 --fp16_backend=apex' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=UpperCamelCase , extra_args_str='--fp16 --fp16_backend=apex' )
@parameterized.expand(['base', 'low', 'high', 'mixed'] )
@require_torch_multi_gpu
def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: List[str] ) -> str:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
snake_case__ = {
# test with the default log_level - should be info and thus log info once
'base': {'extra_args_str': '', 'n_matches': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'low': {'extra_args_str': '--log_level debug --log_level_replica debug', 'n_matches': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'high': {'extra_args_str': '--log_level error --log_level_replica debug', 'n_matches': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'mixed': {'extra_args_str': '--log_level error --log_level_replica error', 'n_matches': 0},
}
snake_case__ = experiments[experiment_id]
snake_case__ = {'distributed': True, 'predict_with_generate': False, 'do_eval': False, 'do_predict': False}
snake_case__ = 'Running training'
with CaptureStderr() as cl:
self.run_seqaseq_quick(**UpperCamelCase , extra_args_str=data['extra_args_str'] )
snake_case__ = len(re.findall(UpperCamelCase , cl.err ) )
self.assertEqual(UpperCamelCase , data['n_matches'] )
@slow
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Any:
snake_case__ = self.run_trainer(
eval_steps=2 , max_len=1_28 , model_name=UpperCamelCase , learning_rate=3e-4 , num_train_epochs=10 , distributed=UpperCamelCase , )
# Check metrics
snake_case__ = TrainerState.load_from_json(os.path.join(UpperCamelCase , 'trainer_state.json' ) ).log_history
snake_case__ = [log for log in logs if 'eval_loss' in log.keys()]
snake_case__ = eval_metrics[0]
snake_case__ = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['eval_bleu'] , UpperCamelCase )
# test if do_predict saves generations and metrics
snake_case__ = os.listdir(UpperCamelCase )
snake_case__ = {os.path.basename(UpperCamelCase ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def lowerCAmelCase_ ( self: int ) -> int:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(UpperCamelCase: str ) -> Tuple[int, float]:
snake_case__ = '--skip_memory_metrics 0'
snake_case__ = self.run_trainer(
max_len=1_28 , model_name=UpperCamelCase , learning_rate=3e-4 , num_train_epochs=1 , optim=UpperCamelCase , distributed=UpperCamelCase , extra_args_str=UpperCamelCase , do_eval=UpperCamelCase , do_predict=UpperCamelCase , n_gpus_to_use=1 , )
# Check metrics
snake_case__ = TrainerState.load_from_json(Path(UpperCamelCase , 'trainer_state.json' ) ).log_history
snake_case__ = int(logs[0]['train_mem_gpu_peaked_delta'] / 2**20 )
snake_case__ = int(logs[0]['train_mem_gpu_alloc_delta'] / 2**20 )
snake_case__ = logs[0]['train_loss']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
snake_case__ , snake_case__ , snake_case__ = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
snake_case__ , snake_case__ , snake_case__ = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
snake_case__ = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
snake_case__ = gpu_peak_mem_orig + gpu_alloc_mem_orig
snake_case__ = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
snake_case__ = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
snake_case__ = 1_20
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
UpperCamelCase , UpperCamelCase , 'should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'
F''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
F''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , )
self.assertGreater(
UpperCamelCase , UpperCamelCase , 'should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'
F''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
F''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , )
self.assertEqual(
UpperCamelCase , UpperCamelCase , F'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' )
def lowerCAmelCase_ ( self: Optional[int] , UpperCamelCase: int , UpperCamelCase: str , UpperCamelCase: int , UpperCamelCase: float = 3e-3 , UpperCamelCase: str = "adafactor" , UpperCamelCase: bool = False , UpperCamelCase: str = None , UpperCamelCase: int = 0 , UpperCamelCase: bool = True , UpperCamelCase: bool = True , UpperCamelCase: bool = True , UpperCamelCase: bool = True , UpperCamelCase: int = None , ) -> Union[str, Any]:
snake_case__ = self.test_file_dir / '../fixtures/tests_samples/wmt_en_ro'
snake_case__ = self.get_auto_remove_tmp_dir()
snake_case__ = F'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(UpperCamelCase )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(UpperCamelCase )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
snake_case__ = F'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(UpperCamelCase )}
'''.split()
snake_case__ = '\n --do_predict\n '.split()
snake_case__ = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
snake_case__ = get_gpu_count()
snake_case__ = get_torch_dist_unique_port()
snake_case__ = F'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
snake_case__ = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(UpperCamelCase , env=self.get_env() )
else:
snake_case__ = ['run_translation.py'] + args
with patch.object(UpperCamelCase , 'argv' , UpperCamelCase ):
main()
return output_dir
| 328
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class __SCREAMING_SNAKE_CASE( unittest.TestCase ):
def __init__( self: int , UpperCamelCase: List[Any] , UpperCamelCase: Optional[int]=7 , UpperCamelCase: List[Any]=3 , UpperCamelCase: List[Any]=30 , UpperCamelCase: List[Any]=4_00 , UpperCamelCase: Union[str, Any]=True , UpperCamelCase: Any=None , UpperCamelCase: Tuple=True , UpperCamelCase: List[str]=[0.5, 0.5, 0.5] , UpperCamelCase: Dict=[0.5, 0.5, 0.5] , UpperCamelCase: Tuple=True , UpperCamelCase: List[str]=1 / 2_55 , UpperCamelCase: Union[str, Any]=True , ) -> List[str]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
snake_case__ = size if size is not None else {'shortest_edge': 18, 'longest_edge': 13_33}
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = num_channels
snake_case__ = min_resolution
snake_case__ = max_resolution
snake_case__ = do_resize
snake_case__ = size
snake_case__ = do_normalize
snake_case__ = image_mean
snake_case__ = image_std
snake_case__ = do_rescale
snake_case__ = rescale_factor
snake_case__ = do_pad
def lowerCAmelCase_ ( self: Dict ) -> str:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase_ ( self: Any , UpperCamelCase: List[Any] , UpperCamelCase: Optional[int]=False ) -> int:
if not batched:
snake_case__ = image_inputs[0]
if isinstance(UpperCamelCase , Image.Image ):
snake_case__ , snake_case__ = image.size
else:
snake_case__ , snake_case__ = image.shape[1], image.shape[2]
if w < h:
snake_case__ = int(self.size['shortest_edge'] * h / w )
snake_case__ = self.size['shortest_edge']
elif w > h:
snake_case__ = self.size['shortest_edge']
snake_case__ = int(self.size['shortest_edge'] * w / h )
else:
snake_case__ = self.size['shortest_edge']
snake_case__ = self.size['shortest_edge']
else:
snake_case__ = []
for image in image_inputs:
snake_case__ , snake_case__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case__ = max(UpperCamelCase , key=lambda UpperCamelCase : item[0] )[0]
snake_case__ = max(UpperCamelCase , key=lambda UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE( a_ , unittest.TestCase ):
_UpperCAmelCase = ConditionalDetrImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self: Dict ) -> Union[str, Any]:
snake_case__ = ConditionalDetrImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self: Optional[Any] ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self: Dict ) -> Union[str, Any]:
snake_case__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , 'image_mean' ) )
self.assertTrue(hasattr(UpperCamelCase , 'image_std' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(UpperCamelCase , 'size' ) )
def lowerCAmelCase_ ( self: int ) -> Optional[int]:
snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 13_33} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
snake_case__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
def lowerCAmelCase_ ( self: List[str] ) -> List[Any]:
pass
def lowerCAmelCase_ ( self: Optional[int] ) -> Tuple:
# Initialize image_processing
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
snake_case__ = image_processing(UpperCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self: Tuple ) -> List[str]:
# Initialize image_processing
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ = image_processing(UpperCamelCase , return_tensors='pt' ).pixel_values
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self: List[str] ) -> Union[str, Any]:
# Initialize image_processing
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ = image_processing(UpperCamelCase , return_tensors='pt' ).pixel_values
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase_ ( self: str ) -> Any:
# prepare image and target
snake_case__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
snake_case__ = json.loads(f.read() )
snake_case__ = {'image_id': 3_97_69, 'annotations': target}
# encode them
snake_case__ = ConditionalDetrImageProcessor.from_pretrained('microsoft/conditional-detr-resnet-50' )
snake_case__ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , return_tensors='pt' )
# verify pixel values
snake_case__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , UpperCamelCase )
snake_case__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) )
# verify area
snake_case__ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , UpperCamelCase ) )
# verify boxes
snake_case__ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , UpperCamelCase )
snake_case__ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , UpperCamelCase , atol=1e-3 ) )
# verify image_id
snake_case__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , UpperCamelCase ) )
# verify is_crowd
snake_case__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , UpperCamelCase ) )
# verify class_labels
snake_case__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , UpperCamelCase ) )
# verify orig_size
snake_case__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , UpperCamelCase ) )
# verify size
snake_case__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , UpperCamelCase ) )
@slow
def lowerCAmelCase_ ( self: List[Any] ) -> Dict:
# prepare image, target and masks_path
snake_case__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
snake_case__ = json.loads(f.read() )
snake_case__ = {'file_name': '000000039769.png', 'image_id': 3_97_69, 'segments_info': target}
snake_case__ = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
snake_case__ = ConditionalDetrImageProcessor(format='coco_panoptic' )
snake_case__ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , masks_path=UpperCamelCase , return_tensors='pt' )
# verify pixel values
snake_case__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , UpperCamelCase )
snake_case__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) )
# verify area
snake_case__ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , UpperCamelCase ) )
# verify boxes
snake_case__ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , UpperCamelCase )
snake_case__ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , UpperCamelCase , atol=1e-3 ) )
# verify image_id
snake_case__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , UpperCamelCase ) )
# verify is_crowd
snake_case__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , UpperCamelCase ) )
# verify class_labels
snake_case__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , UpperCamelCase ) )
# verify masks
snake_case__ = 82_28_73
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , UpperCamelCase )
# verify orig_size
snake_case__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , UpperCamelCase ) )
# verify size
snake_case__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , UpperCamelCase ) )
| 328
| 1
|
"""simple docstring"""
from __future__ import annotations
def lowercase (snake_case__ : dict , snake_case__ : str ) -> set[str]:
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase = set(snake_case__ ), [start]
while stack:
lowerCAmelCase = stack.pop()
explored.add(snake_case__ )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(snake_case__ )
return explored
a = {
'A': ['B', 'C', 'D'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F'],
'D': ['B', 'D'],
'E': ['B', 'F'],
'F': ['C', 'E', 'G'],
'G': ['F'],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, 'A'))
| 529
|
"""simple docstring"""
from queue import PriorityQueue
from typing import Any
import numpy as np
def lowercase (snake_case__ : dict , snake_case__ : str , snake_case__ : set , snake_case__ : set , snake_case__ : dict , snake_case__ : dict , snake_case__ : PriorityQueue , snake_case__ : dict , snake_case__ : float | int , ) -> float | int:
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
lowerCAmelCase = cst_fwd.get(snake_case__ , np.inf )
lowerCAmelCase = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
lowerCAmelCase = new_cost_f
lowerCAmelCase = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
lowerCAmelCase = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def lowercase (snake_case__ : str , snake_case__ : str , snake_case__ : dict , snake_case__ : dict ) -> int:
'''simple docstring'''
lowerCAmelCase = -1
lowerCAmelCase = set()
lowerCAmelCase = set()
lowerCAmelCase = {source: 0}
lowerCAmelCase = {destination: 0}
lowerCAmelCase = {source: None}
lowerCAmelCase = {destination: None}
lowerCAmelCase = PriorityQueue()
lowerCAmelCase = PriorityQueue()
lowerCAmelCase = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
lowerCAmelCase , lowerCAmelCase = queue_forward.get()
visited_forward.add(snake_case__ )
lowerCAmelCase , lowerCAmelCase = queue_backward.get()
visited_backward.add(snake_case__ )
lowerCAmelCase = pass_and_relaxation(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
lowerCAmelCase = pass_and_relaxation(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
lowerCAmelCase = shortest_distance
return shortest_path_distance
a = {
'B': [['C', 1]],
'C': [['D', 1]],
'D': [['F', 1]],
'E': [['B', 1], ['G', 2]],
'F': [],
'G': [['F', 1]],
}
a = {
'B': [['E', 1]],
'C': [['B', 1]],
'D': [['C', 1]],
'F': [['D', 1], ['G', 1]],
'E': [[None, np.inf]],
'G': [['E', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 529
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase : Optional[int] = {
"configuration_longformer": [
"LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"LongformerConfig",
"LongformerOnnxConfig",
],
"tokenization_longformer": ["LongformerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Any = ["LongformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[int] = [
"LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongformerForMaskedLM",
"LongformerForMultipleChoice",
"LongformerForQuestionAnswering",
"LongformerForSequenceClassification",
"LongformerForTokenClassification",
"LongformerModel",
"LongformerPreTrainedModel",
"LongformerSelfAttention",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Union[str, Any] = [
"TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLongformerForMaskedLM",
"TFLongformerForMultipleChoice",
"TFLongformerForQuestionAnswering",
"TFLongformerForSequenceClassification",
"TFLongformerForTokenClassification",
"TFLongformerModel",
"TFLongformerPreTrainedModel",
"TFLongformerSelfAttention",
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
__lowercase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 564
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase : Union[str, Any] = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Any = [
"IBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"IBertForMaskedLM",
"IBertForMultipleChoice",
"IBertForQuestionAnswering",
"IBertForSequenceClassification",
"IBertForTokenClassification",
"IBertModel",
"IBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
__lowercase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 564
| 1
|
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : Tuple = logging.get_logger(__name__)
__lowerCAmelCase : int = {
'''nvidia/segformer-b0-finetuned-ade-512-512''': (
'''https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'''
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''segformer'''
def __init__( self , _lowercase=3 , _lowercase=4 , _lowercase=[2, 2, 2, 2] , _lowercase=[8, 4, 2, 1] , _lowercase=[3_2, 6_4, 1_6_0, 2_5_6] , _lowercase=[7, 3, 3, 3] , _lowercase=[4, 2, 2, 2] , _lowercase=[1, 2, 5, 8] , _lowercase=[4, 4, 4, 4] , _lowercase="gelu" , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.1 , _lowercase=0.02 , _lowercase=0.1 , _lowercase=1E-6 , _lowercase=2_5_6 , _lowercase=2_5_5 , **_lowercase , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**_lowercase )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , _lowercase , )
snake_case_ : Union[str, Any] = num_channels
snake_case_ : Optional[int] = num_encoder_blocks
snake_case_ : Union[str, Any] = depths
snake_case_ : Optional[int] = sr_ratios
snake_case_ : Dict = hidden_sizes
snake_case_ : str = patch_sizes
snake_case_ : Optional[int] = strides
snake_case_ : Union[str, Any] = mlp_ratios
snake_case_ : Optional[Any] = num_attention_heads
snake_case_ : Optional[Any] = hidden_act
snake_case_ : Dict = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = classifier_dropout_prob
snake_case_ : str = initializer_range
snake_case_ : Optional[int] = drop_path_rate
snake_case_ : Tuple = layer_norm_eps
snake_case_ : List[str] = decoder_hidden_size
snake_case_ : Optional[Any] = kwargs.get("""reshape_last_stage""" , _lowercase )
snake_case_ : List[str] = semantic_loss_ignore_index
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = version.parse('''1.11''' )
@property
def UpperCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCAmelCase__ ( self ) -> float:
'''simple docstring'''
return 1E-4
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return 1_2
| 704
|
"""simple docstring"""
import math
import tensorflow as tf
from packaging import version
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : int = tf.convert_to_tensor(__UpperCamelCase )
snake_case_ : int = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
snake_case_ : int = tf.convert_to_tensor(__UpperCamelCase )
snake_case_ : List[Any] = tf.cast(math.pi , x.dtype )
snake_case_ : int = tf.cast(0.044_715 , x.dtype )
snake_case_ : Optional[int] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(__UpperCamelCase , 3 )) ))
return x * cdf
def __lowerCAmelCase ( __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : Optional[Any] = tf.convert_to_tensor(__UpperCamelCase )
return x * tf.tanh(tf.math.softplus(__UpperCamelCase ) )
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : str = tf.convert_to_tensor(__UpperCamelCase )
snake_case_ : int = tf.cast(0.044_715 , x.dtype )
snake_case_ : Optional[int] = tf.cast(0.7_978_845_608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
snake_case_ : Tuple = tf.convert_to_tensor(__UpperCamelCase )
snake_case_ : str = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
return tf.clip_by_value(_gelu(__UpperCamelCase ) , -1_0 , 1_0 )
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str]=-1 ):
'''simple docstring'''
snake_case_ , snake_case_ : List[Any] = tf.split(__UpperCamelCase , 2 , axis=__UpperCamelCase )
return a * tf.math.sigmoid(__UpperCamelCase )
if version.parse(tf.version.VERSION) >= version.parse('''2.4'''):
def __lowerCAmelCase ( __UpperCamelCase : List[Any] ):
'''simple docstring'''
return tf.keras.activations.gelu(__UpperCamelCase , approximate=__UpperCamelCase )
__lowerCAmelCase : int = tf.keras.activations.gelu
__lowerCAmelCase : Optional[Any] = approximate_gelu_wrap
else:
__lowerCAmelCase : List[Any] = _gelu
__lowerCAmelCase : Any = _gelu_new
__lowerCAmelCase : Dict = {
'''gelu''': gelu,
'''gelu_10''': gelu_aa,
'''gelu_fast''': gelu_fast,
'''gelu_new''': gelu_new,
'''glu''': glu,
'''mish''': mish,
'''quick_gelu''': quick_gelu,
'''relu''': tf.keras.activations.relu,
'''sigmoid''': tf.keras.activations.sigmoid,
'''silu''': tf.keras.activations.swish,
'''swish''': tf.keras.activations.swish,
'''tanh''': tf.keras.activations.tanh,
}
def __lowerCAmelCase ( __UpperCamelCase : Any ):
'''simple docstring'''
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F'function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}' )
| 21
| 0
|
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
_lowercase = '\\n@inproceedings{snover-etal-2006-study,\n title = "A Study of Translation Edit Rate with Targeted Human Annotation",\n author = "Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John",\n booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",\n month = aug # " 8-12",\n year = "2006",\n address = "Cambridge, Massachusetts, USA",\n publisher = "Association for Machine Translation in the Americas",\n url = "https://aclanthology.org/2006.amta-papers.25",\n pages = "223--231",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
_lowercase = '\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n'
_lowercase = '\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \'score\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \'num_edits\' (int): The cumulative number of edits\n \'ref_length\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}\n\n Example 2:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}\n\n Example 3:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}\n\n Example 4:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}\n\n Example 5:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""http://www.cs.umd.edu/~snover/tercom/""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#ter"""] , reference_urls=[
"""https://github.com/jhclark/tercom""",
] , )
def lowerCamelCase_ ( self : List[Any] , __a : Tuple , __a : str , __a : Tuple = False , __a : Any = False , __a : Union[str, Any] = False , __a : int = False , ):
'''simple docstring'''
lowerCamelCase__: int = len(references[0] )
if any(len(_a ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
lowerCamelCase__: List[str] = [[refs[i] for refs in references] for i in range(_a )]
lowerCamelCase__: List[str] = TER(
normalized=_a , no_punct=_a , asian_support=_a , case_sensitive=_a , )
lowerCamelCase__: Optional[Any] = sb_ter.corpus_score(_a , _a )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 306
|
'''simple docstring'''
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , _a ):
"""simple docstring"""
# we need a list not a string, so do something to change the type
a__ = arr.split(',' )
def lowercase__ ( self ):
"""simple docstring"""
a__ = [int(self.array[0] )] * len(self.array )
a__ = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
a__ = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
a__ = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
__A : str = input('please input some numbers:')
__A : int = SubArray(whole_array)
__A : str = array.solve_sub_array()
print(('the results is:', re))
| 394
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A_ : int = {
"configuration_maskformer": ["MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "MaskFormerConfig"],
"configuration_maskformer_swin": ["MaskFormerSwinConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = ["MaskFormerFeatureExtractor"]
A_ : Dict = ["MaskFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
"MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"MaskFormerForInstanceSegmentation",
"MaskFormerModel",
"MaskFormerPreTrainedModel",
]
A_ : int = [
"MaskFormerSwinBackbone",
"MaskFormerSwinModel",
"MaskFormerSwinPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
A_ : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 709
|
'''simple docstring'''
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
A_ : Dict = logging.getLogger(__name__)
A_ : List[Any] = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
A_ : Union[str, Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __snake_case :
'''simple docstring'''
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(__SCREAMING_SNAKE_CASE )} , )
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
lowerCamelCase__ = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def __UpperCamelCase ( self ):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"""--config_overrides can't be used in combination with --config_name or --model_name_or_path""" )
@dataclass
class __snake_case :
'''simple docstring'''
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
lowerCamelCase__ = field(default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''The input training data file (a text file).'''} )
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''} , )
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''} , )
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
lowerCamelCase__ = field(
default=5 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
} , )
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
lowerCamelCase__ = field(
default=0.1_5 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
def __UpperCamelCase ( self ):
if self.train_file is not None:
snake_case__ : Optional[Any] = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
snake_case__ : int = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def UpperCamelCase__ ( __magic_name__ : Tuple , __magic_name__ : List[str] ) -> Optional[int]:
'''simple docstring'''
with open(__magic_name__ , """r""" , encoding="""utf-8""" ) as f:
snake_case__ : Tuple = [json.loads(__magic_name__ ) for line in f.read().splitlines() if (len(__magic_name__ ) > 0 and not line.isspace())]
assert len(__magic_name__ ) == len(__magic_name__ )
snake_case__ : Optional[int] = {c: dataset[c] for c in dataset.column_names}
snake_case__ : Dict = refs
return Dataset.from_dict(__magic_name__ )
def UpperCamelCase__ ( ) -> List[str]:
'''simple docstring'''
snake_case__ : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case__ , snake_case__ , snake_case__ : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case__ , snake_case__ , snake_case__ : str = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
snake_case__ : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case__ : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , __magic_name__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
snake_case__ : Optional[Any] = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
snake_case__ : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"train[:{data_args.validation_split_percentage}%]" , )
snake_case__ : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"train[{data_args.validation_split_percentage}%:]" , )
else:
snake_case__ : Optional[int] = {}
if data_args.train_file is not None:
snake_case__ : Tuple = data_args.train_file
if data_args.validation_file is not None:
snake_case__ : Optional[int] = data_args.validation_file
snake_case__ : str = data_args.train_file.split(""".""" )[-1]
if extension == "txt":
snake_case__ : Dict = """text"""
snake_case__ : Union[str, Any] = load_dataset(__magic_name__ , data_files=__magic_name__ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case__ : Optional[int] = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
snake_case__ : Any = AutoConfig.from_pretrained(model_args.config_name , **__magic_name__ )
elif model_args.model_name_or_path:
snake_case__ : Optional[int] = AutoConfig.from_pretrained(model_args.model_name_or_path , **__magic_name__ )
else:
snake_case__ : Tuple = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(f"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(f"New config: {config}" )
snake_case__ : Union[str, Any] = {
"""cache_dir""": model_args.cache_dir,
"""use_fast""": model_args.use_fast_tokenizer,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
snake_case__ : Union[str, Any] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **__magic_name__ )
elif model_args.model_name_or_path:
snake_case__ : int = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **__magic_name__ )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported by this script."""
"""You can do it from another script, save it, and load it from here, using --tokenizer_name.""" )
if model_args.model_name_or_path:
snake_case__ : Optional[Any] = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__magic_name__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
snake_case__ : int = AutoModelForMaskedLM.from_config(__magic_name__ )
model.resize_token_embeddings(len(__magic_name__ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
snake_case__ : Dict = datasets["""train"""].column_names
else:
snake_case__ : Optional[Any] = datasets["""validation"""].column_names
snake_case__ : int = """text""" if """text""" in column_names else column_names[0]
snake_case__ : str = """max_length""" if data_args.pad_to_max_length else False
def tokenize_function(__magic_name__ : Optional[Any] ):
# Remove empty lines
snake_case__ : Union[str, Any] = [line for line in examples["""text"""] if len(__magic_name__ ) > 0 and not line.isspace()]
return tokenizer(examples["""text"""] , padding=__magic_name__ , truncation=__magic_name__ , max_length=data_args.max_seq_length )
snake_case__ : Union[str, Any] = datasets.map(
__magic_name__ , batched=__magic_name__ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
snake_case__ : Optional[int] = add_chinese_references(tokenized_datasets["""train"""] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
snake_case__ : Tuple = add_chinese_references(
tokenized_datasets["""validation"""] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
snake_case__ : int = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
snake_case__ : Optional[int] = False
# Data collator
# This one will take care of randomly masking the tokens.
snake_case__ : Union[str, Any] = DataCollatorForWholeWordMask(tokenizer=__magic_name__ , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
snake_case__ : Tuple = Trainer(
model=__magic_name__ , args=__magic_name__ , train_dataset=tokenized_datasets["""train"""] if training_args.do_train else None , eval_dataset=tokenized_datasets["""validation"""] if training_args.do_eval else None , tokenizer=__magic_name__ , data_collator=__magic_name__ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
snake_case__ : Tuple = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
snake_case__ : Union[str, Any] = model_args.model_name_or_path
else:
snake_case__ : Dict = None
snake_case__ : List[str] = trainer.train(resume_from_checkpoint=__magic_name__ )
trainer.save_model() # Saves the tokenizer too for easy upload
snake_case__ : Any = os.path.join(training_args.output_dir , """train_results.txt""" )
if trainer.is_world_process_zero():
with open(__magic_name__ , """w""" ) as writer:
logger.info("""***** Train results *****""" )
for key, value in sorted(train_result.metrics.items() ):
logger.info(f" {key} = {value}" )
writer.write(f"{key} = {value}\n" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# Evaluation
snake_case__ : int = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
snake_case__ : int = trainer.evaluate()
snake_case__ : int = math.exp(eval_output["""eval_loss"""] )
snake_case__ : Union[str, Any] = perplexity
snake_case__ : Dict = os.path.join(training_args.output_dir , """eval_results_mlm_wwm.txt""" )
if trainer.is_world_process_zero():
with open(__magic_name__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in sorted(results.items() ):
logger.info(f" {key} = {value}" )
writer.write(f"{key} = {value}\n" )
return results
def UpperCamelCase__ ( __magic_name__ : Dict ) -> List[Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 419
| 0
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] ):
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Dict ):
SCREAMING_SNAKE_CASE__ = create_tensor(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = gather(UpperCamelCase__ )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple ):
SCREAMING_SNAKE_CASE__ = [state.process_index]
SCREAMING_SNAKE_CASE__ = gather_object(UpperCamelCase__ )
assert len(UpperCamelCase__ ) == state.num_processes, f'''{gathered_obj}, {len(UpperCamelCase__ )} != {state.num_processes}'''
assert gathered_obj == list(range(state.num_processes ) ), f'''{gathered_obj} != {list(range(state.num_processes ) )}'''
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = create_tensor(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = broadcast(UpperCamelCase__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ):
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
SCREAMING_SNAKE_CASE__ = torch.arange(state.num_processes + 1 ).to(state.device )
else:
SCREAMING_SNAKE_CASE__ = torch.arange(state.num_processes ).to(state.device )
SCREAMING_SNAKE_CASE__ = pad_across_processes(UpperCamelCase__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[int] ):
# For now runs on only two processes
if state.num_processes != 2:
return
SCREAMING_SNAKE_CASE__ = create_tensor(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = reduce(UpperCamelCase__ , """sum""" )
SCREAMING_SNAKE_CASE__ = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ ), f'''{reduced_tensor} != {truth_tensor}'''
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ):
# For now runs on only two processes
if state.num_processes != 2:
return
SCREAMING_SNAKE_CASE__ = create_tensor(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = reduce(UpperCamelCase__ , """mean""" )
SCREAMING_SNAKE_CASE__ = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ ), f'''{reduced_tensor} != {truth_tensor}'''
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[int] ):
# For xla_spawn (TPUs)
main()
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = PartialState()
state.print(f'''State: {state}''' )
state.print("""testing gather""" )
test_gather(UpperCamelCase__ )
state.print("""testing gather_object""" )
test_gather_object(UpperCamelCase__ )
state.print("""testing broadcast""" )
test_broadcast(UpperCamelCase__ )
state.print("""testing pad_across_processes""" )
test_pad_across_processes(UpperCamelCase__ )
state.print("""testing reduce_sum""" )
test_reduce_sum(UpperCamelCase__ )
state.print("""testing reduce_mean""" )
test_reduce_mean(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 6
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "encoder-decoder"
lowerCamelCase_ = True
def __init__( self :Optional[int] , **__A :str ) -> int:
"""simple docstring"""
super().__init__(**__A )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
SCREAMING_SNAKE_CASE__ = kwargs.pop("""encoder""" )
SCREAMING_SNAKE_CASE__ = encoder_config.pop("""model_type""" )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""decoder""" )
SCREAMING_SNAKE_CASE__ = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
SCREAMING_SNAKE_CASE__ = AutoConfig.for_model(__A , **__A )
SCREAMING_SNAKE_CASE__ = AutoConfig.for_model(__A , **__A )
SCREAMING_SNAKE_CASE__ = True
@classmethod
def _snake_case ( cls :str , __A :PretrainedConfig , __A :PretrainedConfig , **__A :List[str] ) -> PretrainedConfig:
"""simple docstring"""
logger.info("""Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__A )
def _snake_case ( self :str ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ = self.encoder.to_dict()
SCREAMING_SNAKE_CASE__ = self.decoder.to_dict()
SCREAMING_SNAKE_CASE__ = self.__class__.model_type
return output
| 6
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ : List[str] = logging.get_logger(__name__)
lowerCAmelCase__ : Dict = """▁"""
lowerCAmelCase__ : Tuple = {"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""}
lowerCAmelCase__ : int = {
"""vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""",
},
"""monolingual_vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""",
},
}
lowerCAmelCase__ : str = {"""vinai/bartpho-syllable""": 1024}
class a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = ["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , snake_case_ : List[str] , snake_case_ : Optional[int] , snake_case_ : Dict="<s>" , snake_case_ : List[Any]="</s>" , snake_case_ : List[Any]="</s>" , snake_case_ : Optional[int]="<s>" , snake_case_ : Tuple="<unk>" , snake_case_ : Optional[Any]="<pad>" , snake_case_ : Optional[int]="<mask>" , snake_case_ : Optional[Dict[str, Any]] = None , **snake_case_ : List[str] , ):
'''simple docstring'''
snake_case__ : str = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token
snake_case__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , **snake_case_ , )
snake_case__ : str = vocab_file
snake_case__ : Tuple = monolingual_vocab_file
snake_case__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case_ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
snake_case__ : List[str] = {}
snake_case__ : Any = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(snake_case_ ) not in self.fairseq_tokens_to_ids:
snake_case__ : Union[str, Any] = cnt
cnt += 1
with open(snake_case_ , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
snake_case__ : str = line.strip().split()[0]
snake_case__ : List[Any] = len(self.fairseq_tokens_to_ids )
if str(snake_case_ ) not in self.fairseq_tokens_to_ids:
snake_case__ : Union[str, Any] = len(self.fairseq_tokens_to_ids )
snake_case__ : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Tuple ):
'''simple docstring'''
snake_case__ : str = self.__dict__.copy()
snake_case__ : Optional[int] = None
snake_case__ : str = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Tuple , snake_case_ : Dict ):
'''simple docstring'''
snake_case__ : Any = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case__ : Dict = {}
snake_case__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __magic_name__ ( self : Any , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case__ : int = [self.cls_token_id]
snake_case__ : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __magic_name__ ( self : Optional[int] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None , snake_case_ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case_ )) + [1]
return [1] + ([0] * len(snake_case_ )) + [1, 1] + ([0] * len(snake_case_ )) + [1]
def __magic_name__ ( self : str , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
'''simple docstring'''
snake_case__ : List[Any] = [self.sep_token_id]
snake_case__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __magic_name__ ( self : int ):
'''simple docstring'''
return len(self.fairseq_ids_to_tokens )
def __magic_name__ ( self : str ):
'''simple docstring'''
snake_case__ : int = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __magic_name__ ( self : Tuple , snake_case_ : str ):
'''simple docstring'''
return self.sp_model.encode(snake_case_ , out_type=snake_case_ )
def __magic_name__ ( self : Dict , snake_case_ : Union[str, Any] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def __magic_name__ ( self : Union[str, Any] , snake_case_ : Optional[int] ):
'''simple docstring'''
return self.fairseq_ids_to_tokens[index]
def __magic_name__ ( self : List[Any] , snake_case_ : Optional[int] ):
'''simple docstring'''
snake_case__ : Union[str, Any] = ''''''.join(snake_case_ ).replace(snake_case_ , ''' ''' ).strip()
return out_string
def __magic_name__ ( self : Union[str, Any] , snake_case_ : str , snake_case_ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(snake_case_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case__ : str = os.path.join(
snake_case_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case__ : Dict = os.path.join(
snake_case_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case_ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case_ , '''wb''' ) as fi:
snake_case__ : Any = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
snake_case_ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , snake_case_ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F"""{str(snake_case_ )} \n""" )
return out_vocab_file, out_monolingual_vocab_file
| 502
|
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
lowerCAmelCase__ : List[Any] = """Run commands across TPU VMs for initial setup before running `accelerate launch`."""
def _a ( __lowerCAmelCase : Any=None ):
"""simple docstring"""
if subparsers is not None:
snake_case__ : Tuple = subparsers.add_parser('''tpu-config''' , description=_description )
else:
snake_case__ : str = argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description )
# Core arguments
snake_case__ : Any = parser.add_argument_group(
'''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''' , type=__lowerCAmelCase , default=__lowerCAmelCase , help='''Path to the config file to use for accelerate.''' , )
config_args.add_argument(
'''--tpu_name''' , default=__lowerCAmelCase , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , )
config_args.add_argument(
'''--tpu_zone''' , default=__lowerCAmelCase , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , )
snake_case__ : Optional[Any] = parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , )
pod_args.add_argument(
'''--command_file''' , default=__lowerCAmelCase , help='''The path to the file containing the commands to run on the pod on startup.''' , )
pod_args.add_argument(
'''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , )
pod_args.add_argument(
'''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , )
pod_args.add_argument(
'''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , )
pod_args.add_argument(
'''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=__lowerCAmelCase )
return parser
def _a ( __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
snake_case__ : Optional[int] = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(__lowerCAmelCase ):
snake_case__ : Tuple = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
snake_case__ : Optional[int] = defaults.command_file
if not args.command and defaults.commands is not None:
snake_case__ : Union[str, Any] = defaults.commands
if not args.tpu_name:
snake_case__ : Union[str, Any] = defaults.tpu_name
if not args.tpu_zone:
snake_case__ : Optional[Any] = defaults.tpu_zone
if args.accelerate_version == "dev":
snake_case__ : Tuple = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
snake_case__ : Union[str, Any] = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , __lowerCAmelCase ):
snake_case__ : Any = F"""accelerate=={args.accelerate_version}"""
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file , '''r''' ) as f:
snake_case__ : str = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , __lowerCAmelCase ):
snake_case__ : List[Any] = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
snake_case__ : Optional[int] = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [F"""pip install {args.accelerate_version}"""]
new_cmd += args.command
snake_case__ : Tuple = '''; '''.join(__lowerCAmelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
snake_case__ : str = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F"""Running {' '.join(__lowerCAmelCase )}""" )
return
subprocess.run(__lowerCAmelCase )
print('''Successfully setup pod.''' )
def _a ( ):
"""simple docstring"""
snake_case__ : Dict = tpu_command_parser()
snake_case__ : Dict = parser.parse_args()
tpu_command_launcher(__lowerCAmelCase )
| 502
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/config.json""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/config.json""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"""
),
"""distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json""",
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"""
),
"""distilbert-base-uncased-finetuned-sst-2-english""": (
"""https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"""
),
}
class lowerCamelCase__ ( _UpperCamelCase ):
__lowerCamelCase = "distilbert"
__lowerCamelCase = {
"hidden_size": "dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
}
def __init__( self : Union[str, Any] , __a : str=30522 , __a : List[str]=512 , __a : int=False , __a : List[Any]=6 , __a : Optional[Any]=12 , __a : List[Any]=768 , __a : List[str]=4 * 768 , __a : int=0.1 , __a : Union[str, Any]=0.1 , __a : Optional[Any]="gelu" , __a : int=0.02 , __a : Dict=0.1 , __a : Tuple=0.2 , __a : Union[str, Any]=0 , **__a : int , ):
'''simple docstring'''
lowerCamelCase__: Union[str, Any] = vocab_size
lowerCamelCase__: Any = max_position_embeddings
lowerCamelCase__: Optional[Any] = sinusoidal_pos_embds
lowerCamelCase__: Union[str, Any] = n_layers
lowerCamelCase__: List[Any] = n_heads
lowerCamelCase__: Optional[Any] = dim
lowerCamelCase__: Union[str, Any] = hidden_dim
lowerCamelCase__: Tuple = dropout
lowerCamelCase__: Tuple = attention_dropout
lowerCamelCase__: Any = activation
lowerCamelCase__: Dict = initializer_range
lowerCamelCase__: Tuple = qa_dropout
lowerCamelCase__: List[str] = seq_classif_dropout
super().__init__(**__a , pad_token_id=__a )
class lowerCamelCase__ ( _UpperCamelCase ):
@property
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCamelCase__: List[str] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCamelCase__: List[str] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 306
|
"""simple docstring"""
import re
import string
import numpy as np
import datasets
a__ : Dict = """
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
"""
a__ : List[str] = """
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
25.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
50.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
75.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results[\"exact_match\"], 1))
100.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]
>>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
33.3
"""
a__ : int = """
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def _lowerCamelCase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , reference_urls=[] , )
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__=None , __magic_name__=False , __magic_name__=False , __magic_name__=False , ):
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
_lowerCAmelCase = np.array([re.sub(__magic_name__ , '' , __magic_name__ ) for x in predictions] )
_lowerCAmelCase = np.array([re.sub(__magic_name__ , '' , __magic_name__ ) for x in references] )
else:
_lowerCAmelCase = np.asarray(__magic_name__ )
_lowerCAmelCase = np.asarray(__magic_name__ )
if ignore_case:
_lowerCAmelCase = np.char.lower(__magic_name__ )
_lowerCAmelCase = np.char.lower(__magic_name__ )
if ignore_punctuation:
_lowerCAmelCase = string.punctuation.maketrans('' , '' , string.punctuation )
_lowerCAmelCase = np.char.translate(__magic_name__ , table=__magic_name__ )
_lowerCAmelCase = np.char.translate(__magic_name__ , table=__magic_name__ )
if ignore_numbers:
_lowerCAmelCase = string.digits.maketrans('' , '' , string.digits )
_lowerCAmelCase = np.char.translate(__magic_name__ , table=__magic_name__ )
_lowerCAmelCase = np.char.translate(__magic_name__ , table=__magic_name__ )
_lowerCAmelCase = predictions == references
return {"exact_match": np.mean(__magic_name__ ) * 1_0_0}
| 589
| 0
|
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = len(_A )
lowercase__ = [[0] * n for i in range(_A )]
for i in range(_A ):
lowercase__ = y_points[i]
for i in range(2 , _A ):
for j in range(_A , _A ):
lowercase__ = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719
|
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: int ) -> None:
"""simple docstring"""
lowercase__ = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
lowercase__ = Vector()
def lowerCamelCase_ ( self: int ) -> None:
"""simple docstring"""
lowercase__ = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(UpperCamelCase_ ) , '''(0,0,0,0,0,1)''' )
def lowerCamelCase_ ( self: Optional[int] ) -> None:
"""simple docstring"""
lowercase__ = Vector([1, 2, 3, 4] )
self.assertEqual(len(UpperCamelCase_ ) , 4 )
def lowerCamelCase_ ( self: str ) -> None:
"""simple docstring"""
lowercase__ = Vector([1, 2] )
lowercase__ = Vector([1, 2, 3, 4, 5] )
lowercase__ = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
lowercase__ = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def lowerCamelCase_ ( self: List[str] ) -> None:
"""simple docstring"""
lowercase__ = Vector([1, 2, 3] )
lowercase__ = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def lowerCamelCase_ ( self: Optional[int] ) -> None:
"""simple docstring"""
lowercase__ = Vector([1, 2, 3] )
lowercase__ = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def lowerCamelCase_ ( self: str ) -> None:
"""simple docstring"""
lowercase__ = Vector([1, 2, 3] )
lowercase__ = Vector([2, -1, 4] ) # for test of dot product
lowercase__ = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , '''(3.0,6.0,9.0)''' )
self.assertEqual((a * b) , 0 )
def lowerCamelCase_ ( self: Optional[int] ) -> None:
"""simple docstring"""
self.assertEqual(str(zero_vector(10 ) ).count('''0''' ) , 10 )
def lowerCamelCase_ ( self: Any ) -> None:
"""simple docstring"""
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '''(0,1,0)''' )
def lowerCamelCase_ ( self: List[Any] ) -> None:
"""simple docstring"""
lowercase__ = Vector([1, 2, 3] )
lowercase__ = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , UpperCamelCase_ , UpperCamelCase_ ) ) , '''(3,4,7)''' )
def lowerCamelCase_ ( self: Dict ) -> None:
"""simple docstring"""
lowercase__ = Vector([1, 0, 0, 0, 0, 0] )
lowercase__ = x.copy()
self.assertEqual(str(UpperCamelCase_ ) , str(UpperCamelCase_ ) )
def lowerCamelCase_ ( self: Any ) -> None:
"""simple docstring"""
lowercase__ = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(UpperCamelCase_ ) , '''(0,1,0)''' )
def lowerCamelCase_ ( self: Optional[Any] ) -> None:
"""simple docstring"""
lowercase__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual('''|1,2,3|\n|2,4,5|\n|6,7,8|\n''' , str(UpperCamelCase_ ) )
def lowerCamelCase_ ( self: Any ) -> None:
"""simple docstring"""
lowercase__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowercase__ = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCamelCase_ ( self: Optional[Any] ) -> None:
"""simple docstring"""
lowercase__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowercase__ = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCamelCase_ ( self: List[str] ) -> None:
"""simple docstring"""
lowercase__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def lowerCamelCase_ ( self: Union[str, Any] ) -> None:
"""simple docstring"""
lowercase__ = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
lowercase__ = Vector([1, 2, 3] )
self.assertEqual('''(14,32,50)''' , str(a * x ) )
self.assertEqual('''|2,4,6|\n|8,10,12|\n|14,16,18|\n''' , str(a * 2 ) )
def lowerCamelCase_ ( self: int ) -> None:
"""simple docstring"""
lowercase__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual('''|1,2,5|\n|2,4,5|\n|6,7,8|\n''' , str(UpperCamelCase_ ) )
def lowerCamelCase_ ( self: Union[str, Any] ) -> None:
"""simple docstring"""
lowercase__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def lowerCamelCase_ ( self: Dict ) -> None:
"""simple docstring"""
lowercase__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowercase__ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('''|2,4,10|\n|4,8,10|\n|12,14,18|\n''' , str(a + b ) )
def lowerCamelCase_ ( self: Any ) -> None:
"""simple docstring"""
lowercase__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowercase__ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('''|0,0,-4|\n|0,0,0|\n|0,0,-2|\n''' , str(a - b ) )
def lowerCamelCase_ ( self: Optional[int] ) -> None:
"""simple docstring"""
self.assertEqual(
'''|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n''' , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 429
| 0
|
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
UpperCAmelCase_ : int = logging.get_logger(__name__)
UpperCAmelCase_ : Union[str, Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase_ : str = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
UpperCAmelCase_ : int = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
UpperCAmelCase_ : int = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
UpperCAmelCase_ : Dict = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 5_1_2,
'''facebook/dpr-ctx_encoder-multiset-base''': 5_1_2,
}
UpperCAmelCase_ : List[Any] = {
'''facebook/dpr-question_encoder-single-nq-base''': 5_1_2,
'''facebook/dpr-question_encoder-multiset-base''': 5_1_2,
}
UpperCAmelCase_ : str = {
'''facebook/dpr-reader-single-nq-base''': 5_1_2,
'''facebook/dpr-reader-multiset-base''': 5_1_2,
}
UpperCAmelCase_ : Tuple = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
UpperCAmelCase_ : Optional[Any] = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
UpperCAmelCase_ : int = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class lowerCAmelCase ( __UpperCamelCase):
__lowercase : List[Any] = VOCAB_FILES_NAMES
__lowercase : Optional[int] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__lowercase : List[str] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : List[str] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class lowerCAmelCase ( __UpperCamelCase):
__lowercase : Dict = VOCAB_FILES_NAMES
__lowercase : Any = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__lowercase : Dict = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Optional[int] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase_ : List[str] = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
UpperCAmelCase_ : List[Any] = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
UpperCAmelCase_ : List[str] = R'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(__UpperCamelCase)
class lowerCAmelCase :
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) -> str:
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
elif titles is None or texts is None:
__snake_case = titles if texts is None else texts
return super().__call__(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__snake_case = titles if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else [titles]
__snake_case = texts if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else [texts]
__snake_case = len(__SCREAMING_SNAKE_CASE )
__snake_case = questions if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else [questions] * n_passages
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
raise ValueError(
F'''There should be as many titles than texts but got {len(__SCREAMING_SNAKE_CASE )} titles and {len(__SCREAMING_SNAKE_CASE )} texts.''' )
__snake_case = super().__call__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE )['''input_ids''']
__snake_case = super().__call__(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE )['''input_ids''']
__snake_case = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
]
}
if return_attention_mask is not False:
__snake_case = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__snake_case = attention_mask
return self.pad(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 16 , __SCREAMING_SNAKE_CASE = 64 , __SCREAMING_SNAKE_CASE = 4 , ) -> Optional[Any]:
'''simple docstring'''
__snake_case = reader_input['''input_ids''']
__snake_case , __snake_case , __snake_case = reader_output[:3]
__snake_case = len(__SCREAMING_SNAKE_CASE )
__snake_case = sorted(range(__SCREAMING_SNAKE_CASE ) , reverse=__SCREAMING_SNAKE_CASE , key=relevance_logits.__getitem__ )
__snake_case = []
for doc_id in sorted_docs:
__snake_case = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__snake_case = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__snake_case = sequence_ids.index(self.pad_token_id )
else:
__snake_case = len(__SCREAMING_SNAKE_CASE )
__snake_case = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__SCREAMING_SNAKE_CASE , top_spans=__SCREAMING_SNAKE_CASE , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__SCREAMING_SNAKE_CASE , start_index=__SCREAMING_SNAKE_CASE , end_index=__SCREAMING_SNAKE_CASE , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(__SCREAMING_SNAKE_CASE ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = []
for start_index, start_score in enumerate(__SCREAMING_SNAKE_CASE ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__snake_case = sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : x[1] , reverse=__SCREAMING_SNAKE_CASE )
__snake_case = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F'''Wrong span indices: [{start_index}:{end_index}]''' )
__snake_case = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F'''Span is too long: {length} > {max_answer_length}''' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__SCREAMING_SNAKE_CASE ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__UpperCamelCase)
class lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase):
__lowercase : Any = VOCAB_FILES_NAMES
__lowercase : Any = READER_PRETRAINED_VOCAB_FILES_MAP
__lowercase : Tuple = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Optional[int] = READER_PRETRAINED_INIT_CONFIGURATION
__lowercase : Any = ['input_ids', 'attention_mask']
| 24
|
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowerCamelCase , lowerCamelCase = 13 , lowerCamelCase = 64 , lowerCamelCase = 2 , lowerCamelCase = 3 , lowerCamelCase = 3 , lowerCamelCase = True , lowerCamelCase = True , lowerCamelCase = 1_28 , lowerCamelCase=[16, 32, 64, 1_28] , lowerCamelCase = 7 , lowerCamelCase = 4 , lowerCamelCase = 37 , lowerCamelCase = "gelu" , lowerCamelCase = 0.1 , lowerCamelCase = 0.1 , lowerCamelCase = 10 , lowerCamelCase = 0.0_2 , lowerCamelCase = 2 , lowerCamelCase = 1 , lowerCamelCase = 1_28 , lowerCamelCase = [2, 2, 2, 2] , lowerCamelCase = 2 , lowerCamelCase = 2 , ):
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = image_size
snake_case__ = patch_size
snake_case__ = num_channels
snake_case__ = is_training
snake_case__ = use_labels
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = type_sequence_label_size
snake_case__ = initializer_range
snake_case__ = encoder_stride
snake_case__ = num_attention_outputs
snake_case__ = embed_dim
snake_case__ = embed_dim + 1
snake_case__ = resolution
snake_case__ = depths
snake_case__ = hidden_sizes
snake_case__ = dim
snake_case__ = mlp_expansion_ratio
def A_ ( self ):
snake_case__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ = None
if self.use_labels:
snake_case__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ = self.get_config()
return config, pixel_values, labels
def A_ ( self ):
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def A_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
snake_case__ = TFEfficientFormerModel(config=lowerCamelCase )
snake_case__ = model(lowerCamelCase , training=lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
snake_case__ = self.type_sequence_label_size
snake_case__ = TFEfficientFormerForImageClassification(lowerCamelCase )
snake_case__ = model(lowerCamelCase , labels=lowerCamelCase , training=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case__ = 1
snake_case__ = TFEfficientFormerForImageClassification(lowerCamelCase )
snake_case__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case__ = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A_ ( self ):
snake_case__ = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ = config_and_inputs
snake_case__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
_A : str = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
_A : List[str] = (
{
'feature-extraction': TFEfficientFormerModel,
'image-classification': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
_A : Optional[Any] = False
_A : List[Any] = False
_A : Tuple = False
_A : List[Any] = False
_A : Any = False
def A_ ( self ):
snake_case__ = TFEfficientFormerModelTester(self )
snake_case__ = ConfigTester(
self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase , hidden_size=37 )
def A_ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="EfficientFormer does not use inputs_embeds" )
def A_ ( self ):
pass
@unittest.skip(reason="EfficientFormer does not support input and output embeddings" )
def A_ ( self ):
pass
def A_ ( self ):
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ = model_class(lowerCamelCase )
snake_case__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ = [*signature.parameters.keys()]
snake_case__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def A_ ( self ):
def check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
snake_case__ = model_class(lowerCamelCase )
snake_case__ = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) , training=lowerCamelCase )
snake_case__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case__ = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
if hasattr(self.model_tester , "encoder_seq_length" ):
snake_case__ = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , "chunk_length" ) and self.model_tester.chunk_length > 1:
snake_case__ = seq_length * self.model_tester.chunk_length
else:
snake_case__ = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
snake_case__ = outputs.decoder_hidden_states
self.asseretIsInstance(lowerCamelCase , (list, tuple) )
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
snake_case__ = getattr(self.model_tester , "seq_length" , lowerCamelCase )
snake_case__ = getattr(self.model_tester , "decoder_seq_length" , lowerCamelCase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def A_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=False ):
snake_case__ = super()._prepare_for_class(lowerCamelCase , lowerCamelCase , return_labels=lowerCamelCase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def A_ ( self ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
@unittest.skip(reason="EfficientFormer does not implement masked image modeling yet" )
def A_ ( self ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase )
def A_ ( self ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
@slow
def A_ ( self ):
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ = TFEfficientFormerModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def A_ ( self ):
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ = True
snake_case__ = getattr(self.model_tester , "seq_length" , lowerCamelCase )
snake_case__ = getattr(self.model_tester , "encoder_seq_length" , lowerCamelCase )
snake_case__ = getattr(self.model_tester , "key_length" , lowerCamelCase )
snake_case__ = getattr(self.model_tester , "chunk_length" , lowerCamelCase )
if chunk_length is not None and hasattr(self.model_tester , "num_hashes" ):
snake_case__ = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
snake_case__ = True
snake_case__ = False
snake_case__ = True
snake_case__ = model_class(lowerCamelCase )
snake_case__ = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) , training=lowerCamelCase )
snake_case__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCamelCase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case__ = True
snake_case__ = model_class(lowerCamelCase )
snake_case__ = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) , training=lowerCamelCase )
snake_case__ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCamelCase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def A_ ( self ):
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
snake_case__ , snake_case__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
snake_case__ = model_class(lowerCamelCase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
snake_case__ = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=lowerCamelCase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
snake_case__ = model(lowerCamelCase )
self.assertTrue(outputs_dict is not None )
def SCREAMING_SNAKE_CASE__ ( ):
snake_case__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def A_ ( self ):
return (
EfficientFormerImageProcessor.from_pretrained("snap-research/efficientformer-l1-300" )
if is_vision_available()
else None
)
@slow
def A_ ( self ):
snake_case__ = TFEfficientFormerForImageClassification.from_pretrained("snap-research/efficientformer-l1-300" )
snake_case__ = self.default_image_processor
snake_case__ = prepare_img()
snake_case__ = image_processor(images=lowerCamelCase , return_tensors="tf" )
# forward pass
snake_case__ = model(**lowerCamelCase , training=lowerCamelCase )
# verify the logits
snake_case__ = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
snake_case__ = tf.constant([-0.0_5_5_5, 0.4_8_2_5, -0.0_8_5_2] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1e-4 ) )
@slow
def A_ ( self ):
snake_case__ = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"snap-research/efficientformer-l1-300" )
snake_case__ = self.default_image_processor
snake_case__ = prepare_img()
snake_case__ = image_processor(images=lowerCamelCase , return_tensors="tf" )
# forward pass
snake_case__ = model(**lowerCamelCase , training=lowerCamelCase )
# verify the logits
snake_case__ = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
snake_case__ = tf.constant([-0.1_3_1_2, 0.4_3_5_3, -1.0_4_9_9] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1e-4 ) )
| 276
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""",
"""umberto-commoncrawl-cased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"""
),
"""umberto-wikipedia-uncased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Dict = '''camembert'''
def __init__( self : str , UpperCAmelCase_ : Optional[Any]=3_0522 , UpperCAmelCase_ : Optional[int]=768 , UpperCAmelCase_ : List[Any]=12 , UpperCAmelCase_ : Union[str, Any]=12 , UpperCAmelCase_ : Optional[Any]=3072 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : List[str]=512 , UpperCAmelCase_ : List[Any]=2 , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : List[str]=1E-12 , UpperCAmelCase_ : int=1 , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : int="absolute" , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Optional[Any]=None , **UpperCAmelCase_ : Optional[int] , ):
super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE : Any = num_hidden_layers
SCREAMING_SNAKE_CASE : Dict = num_attention_heads
SCREAMING_SNAKE_CASE : Dict = hidden_act
SCREAMING_SNAKE_CASE : Tuple = intermediate_size
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE : Dict = position_embedding_type
SCREAMING_SNAKE_CASE : List[Any] = use_cache
SCREAMING_SNAKE_CASE : str = classifier_dropout
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
@property
def _A ( self : Optional[int] ):
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE : Optional[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 488
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""microsoft/markuplm-base""": """https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json""",
"""microsoft/markuplm-large""": """https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[int] = '''markuplm'''
def __init__( self : int , UpperCAmelCase_ : List[str]=3_0522 , UpperCAmelCase_ : Optional[Any]=768 , UpperCAmelCase_ : Any=12 , UpperCAmelCase_ : Tuple=12 , UpperCAmelCase_ : Union[str, Any]=3072 , UpperCAmelCase_ : Dict="gelu" , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : Dict=512 , UpperCAmelCase_ : Optional[int]=2 , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : Dict=1E-12 , UpperCAmelCase_ : int=0 , UpperCAmelCase_ : Optional[Any]=0 , UpperCAmelCase_ : Optional[int]=2 , UpperCAmelCase_ : str=256 , UpperCAmelCase_ : str=1024 , UpperCAmelCase_ : List[str]=216 , UpperCAmelCase_ : List[Any]=1001 , UpperCAmelCase_ : Union[str, Any]=32 , UpperCAmelCase_ : Dict=50 , UpperCAmelCase_ : Optional[int]="absolute" , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Any=None , **UpperCAmelCase_ : Any , ):
super().__init__(
pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : Any = vocab_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : Tuple = intermediate_size
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : int = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : Any = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : int = position_embedding_type
SCREAMING_SNAKE_CASE : List[str] = use_cache
SCREAMING_SNAKE_CASE : Union[str, Any] = classifier_dropout
# additional properties
SCREAMING_SNAKE_CASE : Any = max_depth
SCREAMING_SNAKE_CASE : int = max_xpath_tag_unit_embeddings
SCREAMING_SNAKE_CASE : List[str] = max_xpath_subs_unit_embeddings
SCREAMING_SNAKE_CASE : Optional[int] = tag_pad_id
SCREAMING_SNAKE_CASE : Optional[Any] = subs_pad_id
SCREAMING_SNAKE_CASE : List[Any] = xpath_unit_hidden_size
| 488
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : List[str] = {
'''configuration_xlm_roberta_xl''': [
'''XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaXLConfig''',
'''XLMRobertaXLOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] = [
'''XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaXLForCausalLM''',
'''XLMRobertaXLForMaskedLM''',
'''XLMRobertaXLForMultipleChoice''',
'''XLMRobertaXLForQuestionAnswering''',
'''XLMRobertaXLForSequenceClassification''',
'''XLMRobertaXLForTokenClassification''',
'''XLMRobertaXLModel''',
'''XLMRobertaXLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 72
|
'''simple docstring'''
import unittest
import numpy as np
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : np.ndarray | None = None , ):
'''simple docstring'''
UpperCAmelCase__ = np.shape(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = np.shape(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = np.shape(SCREAMING_SNAKE_CASE__ )
if shape_a[0] != shape_b[0]:
UpperCAmelCase__ = (
"""Expected the same number of rows for A and B. """
F'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
if shape_b[1] != shape_c[1]:
UpperCAmelCase__ = (
"""Expected the same number of columns for B and C. """
F'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = pseudo_inv
if a_inv is None:
try:
UpperCAmelCase__ = np.linalg.inv(SCREAMING_SNAKE_CASE__ )
except np.linalg.LinAlgError:
raise ValueError(
"""Input matrix A is not invertible. Cannot compute Schur complement.""" )
return mat_c - mat_b.T @ a_inv @ mat_b
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase__ = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase__ = np.array([[2, 1], [6, 3]] )
UpperCAmelCase__ = schur_complement(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = np.block([[a, b], [b.T, c]] )
UpperCAmelCase__ = np.linalg.det(_UpperCAmelCase )
UpperCAmelCase__ = np.linalg.det(_UpperCAmelCase )
UpperCAmelCase__ = np.linalg.det(_UpperCAmelCase )
self.assertAlmostEqual(_UpperCAmelCase , det_a * det_s )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase__ = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase__ = np.array([[2, 1], [6, 3]] )
with self.assertRaises(_UpperCAmelCase ):
schur_complement(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase__ = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase__ = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(_UpperCAmelCase ):
schur_complement(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 603
| 0
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class UpperCamelCase_ (unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
UpperCAmelCase_ : str = "| <pad> <unk> <s> </s> a b c d e f g h i j k".split()
UpperCAmelCase_ : Optional[int] = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
UpperCAmelCase_ : int = {
"unk_token": "<unk>",
"bos_token": "<s>",
"eos_token": "</s>",
}
UpperCAmelCase_ : Dict = {
"feature_size": 1,
"padding_value": 0.0,
"sampling_rate": 16_000,
"return_attention_mask": False,
"do_normalize": True,
}
UpperCAmelCase_ : Optional[Any] = tempfile.mkdtemp()
UpperCAmelCase_ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ : List[str] = os.path.join(self.tmpdirname , lowerCAmelCase_ )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + "\n" )
with open(self.feature_extraction_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + "\n" )
# load decoder from hub
UpperCAmelCase_ : Any = "hf-internal-testing/ngram-beam-search-decoder"
def _SCREAMING_SNAKE_CASE ( self : Tuple , **lowerCAmelCase_ : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase_ : str = self.add_kwargs_tokens_map.copy()
kwargs.update(lowerCAmelCase_ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , **lowerCAmelCase_ : Union[str, Any] ) -> Union[str, Any]:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , **lowerCAmelCase_ : Union[str, Any] ) -> List[Any]:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
UpperCAmelCase_ : Dict = self.get_tokenizer()
UpperCAmelCase_ : str = self.get_feature_extractor()
UpperCAmelCase_ : List[str] = self.get_decoder()
UpperCAmelCase_ : Any = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : List[Any] = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase_ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , lowerCAmelCase_ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
UpperCAmelCase_ : str = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["xx"] )
with self.assertRaisesRegex(lowerCAmelCase_ , "include" ):
WavaVecaProcessorWithLM(
tokenizer=lowerCAmelCase_ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = self.get_feature_extractor()
UpperCAmelCase_ : str = self.get_tokenizer()
UpperCAmelCase_ : Optional[int] = self.get_decoder()
UpperCAmelCase_ : int = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = floats_list((3, 1_000) )
UpperCAmelCase_ : int = feature_extractor(lowerCAmelCase_ , return_tensors="np" )
UpperCAmelCase_ : List[str] = processor(lowerCAmelCase_ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
UpperCAmelCase_ : Union[str, Any] = self.get_feature_extractor()
UpperCAmelCase_ : Tuple = self.get_tokenizer()
UpperCAmelCase_ : List[str] = self.get_decoder()
UpperCAmelCase_ : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
UpperCAmelCase_ : Any = "This is a test string"
UpperCAmelCase_ : Union[str, Any] = processor(text=lowerCAmelCase_ )
UpperCAmelCase_ : int = tokenizer(lowerCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : str=(2, 10, 16) , lowerCAmelCase_ : Dict=77 ) -> Optional[int]:
np.random.seed(lowerCAmelCase_ )
return np.random.rand(*lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
UpperCAmelCase_ : Any = self.get_feature_extractor()
UpperCAmelCase_ : List[Any] = self.get_tokenizer()
UpperCAmelCase_ : List[Any] = self.get_decoder()
UpperCAmelCase_ : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = self._get_dummy_logits(shape=(10, 16) , seed=13 )
UpperCAmelCase_ : Union[str, Any] = processor.decode(lowerCAmelCase_ )
UpperCAmelCase_ : Dict = decoder.decode_beams(lowerCAmelCase_ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("</s> <s> </s>" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["fork"], ["spawn"]] )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : Dict ) -> List[str]:
UpperCAmelCase_ : int = self.get_feature_extractor()
UpperCAmelCase_ : Optional[Any] = self.get_tokenizer()
UpperCAmelCase_ : List[Any] = self.get_decoder()
UpperCAmelCase_ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
UpperCAmelCase_ : Tuple = processor.batch_decode(lowerCAmelCase_ )
else:
with get_context(lowerCAmelCase_ ).Pool() as pool:
UpperCAmelCase_ : Tuple = processor.batch_decode(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = list(lowerCAmelCase_ )
with get_context("fork" ).Pool() as p:
UpperCAmelCase_ : str = decoder.decode_beams_batch(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(lowerCAmelCase_ , decoded_processor.text )
self.assertListEqual(["<s> <s> </s>", "<s> <s> <s>"] , decoded_processor.text )
self.assertListEqual(lowerCAmelCase_ , decoded_processor.logit_score )
self.assertListEqual(lowerCAmelCase_ , decoded_processor.lm_score )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
UpperCAmelCase_ : List[Any] = self.get_feature_extractor()
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase_ : int = self.get_decoder()
UpperCAmelCase_ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
UpperCAmelCase_ : str = self._get_dummy_logits()
UpperCAmelCase_ : Dict = 15
UpperCAmelCase_ : int = -2_0.0
UpperCAmelCase_ : Any = -4.0
UpperCAmelCase_ : Dict = processor.batch_decode(
lowerCAmelCase_ , beam_width=lowerCAmelCase_ , beam_prune_logp=lowerCAmelCase_ , token_min_logp=lowerCAmelCase_ , )
UpperCAmelCase_ : Dict = decoded_processor_out.text
UpperCAmelCase_ : List[str] = list(lowerCAmelCase_ )
with get_context("fork" ).Pool() as pool:
UpperCAmelCase_ : Dict = decoder.decode_beams_batch(
lowerCAmelCase_ , lowerCAmelCase_ , beam_width=lowerCAmelCase_ , beam_prune_logp=lowerCAmelCase_ , token_min_logp=lowerCAmelCase_ , )
UpperCAmelCase_ : Union[str, Any] = [d[0][0] for d in decoded_decoder_out]
UpperCAmelCase_ : List[str] = [d[0][2] for d in decoded_decoder_out]
UpperCAmelCase_ : Optional[int] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(["</s> <s> <s>", "<s> <s> <s>"] , lowerCAmelCase_ )
self.assertTrue(np.array_equal(lowerCAmelCase_ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] , lowerCAmelCase_ , atol=1e-3 ) )
self.assertTrue(np.array_equal(lowerCAmelCase_ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] , lowerCAmelCase_ , atol=1e-3 ) )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
UpperCAmelCase_ : int = self.get_feature_extractor()
UpperCAmelCase_ : Optional[Any] = self.get_tokenizer()
UpperCAmelCase_ : Tuple = self.get_decoder()
UpperCAmelCase_ : List[str] = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = self._get_dummy_logits()
UpperCAmelCase_ : Dict = 2.0
UpperCAmelCase_ : Union[str, Any] = 5.0
UpperCAmelCase_ : str = -2_0.0
UpperCAmelCase_ : List[str] = True
UpperCAmelCase_ : str = processor.batch_decode(
lowerCAmelCase_ , alpha=lowerCAmelCase_ , beta=lowerCAmelCase_ , unk_score_offset=lowerCAmelCase_ , lm_score_boundary=lowerCAmelCase_ , )
UpperCAmelCase_ : List[Any] = decoded_processor_out.text
UpperCAmelCase_ : str = list(lowerCAmelCase_ )
decoder.reset_params(
alpha=lowerCAmelCase_ , beta=lowerCAmelCase_ , unk_score_offset=lowerCAmelCase_ , lm_score_boundary=lowerCAmelCase_ , )
with get_context("fork" ).Pool() as pool:
UpperCAmelCase_ : Dict = decoder.decode_beams_batch(
lowerCAmelCase_ , lowerCAmelCase_ , )
UpperCAmelCase_ : Any = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(["<s> </s> <s> </s> </s>", "</s> </s> <s> </s> </s>"] , lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -2_0.0 )
self.assertEqual(lm_model.score_boundary , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
UpperCAmelCase_ : Tuple = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
UpperCAmelCase_ : List[str] = processor.decoder.model_container[processor.decoder._model_key]
UpperCAmelCase_ : Any = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute()
UpperCAmelCase_ : Dict = os.listdir(lowerCAmelCase_ )
UpperCAmelCase_ : int = ["alphabet.json", "language_model"]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = snapshot_download("hf-internal-testing/processor_with_lm" )
UpperCAmelCase_ : int = WavaVecaProcessorWithLM.from_pretrained(lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = processor.decoder.model_container[processor.decoder._model_key]
UpperCAmelCase_ : str = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute()
UpperCAmelCase_ : Any = os.listdir(lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = os.listdir(lowerCAmelCase_ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase_ : int = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
UpperCAmelCase_ : Optional[Any] = AutoProcessor.from_pretrained("hf-internal-testing/processor_with_lm" )
UpperCAmelCase_ : List[Any] = floats_list((3, 1_000) )
UpperCAmelCase_ : Union[str, Any] = processor_wavaveca(lowerCAmelCase_ , return_tensors="np" )
UpperCAmelCase_ : List[str] = processor_auto(lowerCAmelCase_ , return_tensors="np" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
UpperCAmelCase_ : Optional[int] = self._get_dummy_logits()
UpperCAmelCase_ : str = processor_wavaveca.batch_decode(lowerCAmelCase_ )
UpperCAmelCase_ : int = processor_auto.batch_decode(lowerCAmelCase_ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
UpperCAmelCase_ : Dict = self.get_feature_extractor()
UpperCAmelCase_ : List[Any] = self.get_tokenizer()
UpperCAmelCase_ : str = self.get_decoder()
UpperCAmelCase_ : Dict = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , )
@staticmethod
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str ) -> Optional[int]:
UpperCAmelCase_ : str = [d[key] for d in offsets]
return retrieved_list
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
UpperCAmelCase_ : Optional[int] = self._get_dummy_logits()[0]
UpperCAmelCase_ : List[Any] = processor.decode(lowerCAmelCase_ , output_word_offsets=lowerCAmelCase_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("text" in outputs )
self.assertTrue("word_offsets" in outputs )
self.assertTrue(isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) )
self.assertEqual(" ".join(self.get_from_offsets(outputs["word_offsets"] , "word" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "word" ) , ["<s>", "<s>", "</s>"] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "start_offset" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "end_offset" ) , [1, 3, 5] )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase_ : int = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
UpperCAmelCase_ : List[str] = self._get_dummy_logits()
UpperCAmelCase_ : Optional[int] = processor.batch_decode(lowerCAmelCase_ , output_word_offsets=lowerCAmelCase_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("text" in outputs )
self.assertTrue("word_offsets" in outputs )
self.assertTrue(isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) )
self.assertListEqual(
[" ".join(self.get_from_offsets(lowerCAmelCase_ , "word" ) ) for o in outputs["word_offsets"]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "word" ) , ["<s>", "<s>", "</s>"] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "start_offset" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "end_offset" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
import torch
UpperCAmelCase_ : Optional[Any] = load_dataset("common_voice" , "en" , split="train" , streaming=lowerCAmelCase_ )
UpperCAmelCase_ : int = ds.cast_column("audio" , datasets.Audio(sampling_rate=16_000 ) )
UpperCAmelCase_ : Optional[Any] = iter(lowerCAmelCase_ )
UpperCAmelCase_ : int = next(lowerCAmelCase_ )
UpperCAmelCase_ : Any = AutoProcessor.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" )
UpperCAmelCase_ : Dict = WavaVecaForCTC.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
UpperCAmelCase_ : Union[str, Any] = processor(sample["audio"]["array"] , return_tensors="pt" ).input_values
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(lowerCAmelCase_ ).logits.cpu().numpy()
UpperCAmelCase_ : List[str] = processor.decode(logits[0] , output_word_offsets=lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
UpperCAmelCase_ : Tuple = [
{
"start_time": d["start_offset"] * time_offset,
"end_time": d["end_offset"] * time_offset,
"word": d["word"],
}
for d in output["word_offsets"]
]
UpperCAmelCase_ : List[Any] = "WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"
# output words
self.assertEqual(" ".join(self.get_from_offsets(lowerCAmelCase_ , "word" ) ) , lowerCAmelCase_ )
self.assertEqual(" ".join(self.get_from_offsets(lowerCAmelCase_ , "word" ) ) , output.text )
# output times
UpperCAmelCase_ : Tuple = torch.tensor(self.get_from_offsets(lowerCAmelCase_ , "start_time" ) )
UpperCAmelCase_ : List[str] = torch.tensor(self.get_from_offsets(lowerCAmelCase_ , "end_time" ) )
# fmt: off
UpperCAmelCase_ : Optional[int] = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
UpperCAmelCase_ : int = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=0.0_1 ) )
self.assertTrue(torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=0.0_1 ) )
| 463
|
"""simple docstring"""
lowerCamelCase_ = {
'''a''': '''AAAAA''',
'''b''': '''AAAAB''',
'''c''': '''AAABA''',
'''d''': '''AAABB''',
'''e''': '''AABAA''',
'''f''': '''AABAB''',
'''g''': '''AABBA''',
'''h''': '''AABBB''',
'''i''': '''ABAAA''',
'''j''': '''BBBAA''',
'''k''': '''ABAAB''',
'''l''': '''ABABA''',
'''m''': '''ABABB''',
'''n''': '''ABBAA''',
'''o''': '''ABBAB''',
'''p''': '''ABBBA''',
'''q''': '''ABBBB''',
'''r''': '''BAAAA''',
'''s''': '''BAAAB''',
'''t''': '''BAABA''',
'''u''': '''BAABB''',
'''v''': '''BBBAB''',
'''w''': '''BABAA''',
'''x''': '''BABAB''',
'''y''': '''BABBA''',
'''z''': '''BABBB''',
''' ''': ''' ''',
}
lowerCamelCase_ = {value: key for key, value in encode_dict.items()}
def snake_case ( A__ ):
UpperCAmelCase_ : Union[str, Any] = ""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("encode() accepts only letters of the alphabet and spaces" )
return encoded
def snake_case ( A__ ):
if set(A__ ) - {"A", "B", " "} != set():
raise Exception("decode() accepts only 'A', 'B' and spaces" )
UpperCAmelCase_ : Dict = ""
for word in coded.split():
while len(A__ ) != 0:
decoded += decode_dict[word[:5]]
UpperCAmelCase_ : str = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 463
| 1
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Dict = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = {
'''microsoft/unispeech-sat-base-100h-libri-ft''': (
'''https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'''
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class a ( a__ ):
snake_case__ = '''unispeech-sat'''
def __init__( self , _snake_case=32 , _snake_case=7_68 , _snake_case=12 , _snake_case=12 , _snake_case=30_72 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=0.1 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.1 , _snake_case=0.1 , _snake_case=0.02 , _snake_case=1E-5 , _snake_case="group" , _snake_case="gelu" , _snake_case=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , _snake_case=(5, 2, 2, 2, 2, 2, 2) , _snake_case=(10, 3, 3, 3, 3, 2, 2) , _snake_case=False , _snake_case=1_28 , _snake_case=16 , _snake_case=False , _snake_case=True , _snake_case=0.05 , _snake_case=10 , _snake_case=2 , _snake_case=0.0 , _snake_case=10 , _snake_case=0 , _snake_case=3_20 , _snake_case=2 , _snake_case=0.1 , _snake_case=1_00 , _snake_case=2_56 , _snake_case=2_56 , _snake_case=0.1 , _snake_case="mean" , _snake_case=False , _snake_case=False , _snake_case=2_56 , _snake_case=(5_12, 5_12, 5_12, 5_12, 15_00) , _snake_case=(5, 3, 3, 1, 1) , _snake_case=(1, 2, 3, 1, 1) , _snake_case=5_12 , _snake_case=0 , _snake_case=1 , _snake_case=2 , _snake_case=5_04 , **_snake_case , ):
"""simple docstring"""
super().__init__(**_snake_case , pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case )
lowerCAmelCase = hidden_size
lowerCAmelCase = feat_extract_norm
lowerCAmelCase = feat_extract_activation
lowerCAmelCase = list(_snake_case )
lowerCAmelCase = list(_snake_case )
lowerCAmelCase = list(_snake_case )
lowerCAmelCase = conv_bias
lowerCAmelCase = num_conv_pos_embeddings
lowerCAmelCase = num_conv_pos_embedding_groups
lowerCAmelCase = len(self.conv_dim )
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = feat_proj_dropout
lowerCAmelCase = final_dropout
lowerCAmelCase = layerdrop
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = initializer_range
lowerCAmelCase = vocab_size
lowerCAmelCase = num_clusters
lowerCAmelCase = do_stable_layer_norm
lowerCAmelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase = apply_spec_augment
lowerCAmelCase = mask_time_prob
lowerCAmelCase = mask_time_length
lowerCAmelCase = mask_time_min_masks
lowerCAmelCase = mask_feature_prob
lowerCAmelCase = mask_feature_length
lowerCAmelCase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCAmelCase = num_codevectors_per_group
lowerCAmelCase = num_codevector_groups
lowerCAmelCase = contrastive_logits_temperature
lowerCAmelCase = feat_quantizer_dropout
lowerCAmelCase = num_negatives
lowerCAmelCase = codevector_dim
lowerCAmelCase = proj_codevector_dim
lowerCAmelCase = diversity_loss_weight
# ctc loss
lowerCAmelCase = ctc_loss_reduction
lowerCAmelCase = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowerCAmelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowerCAmelCase = list(_snake_case )
lowerCAmelCase = list(_snake_case )
lowerCAmelCase = list(_snake_case )
lowerCAmelCase = xvector_output_dim
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 4
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=4 , ):
__a : Any = parent
__a : Optional[int] = batch_size
__a : str = seq_length
__a : List[str] = is_training
__a : Optional[Any] = use_attention_mask
__a : Optional[Any] = use_token_type_ids
__a : List[str] = use_labels
__a : Union[str, Any] = vocab_size
__a : int = hidden_size
__a : Union[str, Any] = num_hidden_layers
__a : Union[str, Any] = num_attention_heads
__a : Dict = intermediate_size
__a : List[str] = hidden_act
__a : Dict = hidden_dropout_prob
__a : Union[str, Any] = attention_probs_dropout_prob
__a : int = max_position_embeddings
__a : Tuple = type_vocab_size
__a : Optional[int] = type_sequence_label_size
__a : Optional[Any] = initializer_range
__a : Optional[int] = num_choices
def _lowerCamelCase ( self ):
__a : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : Union[str, Any] = None
if self.use_attention_mask:
__a : Any = random_attention_mask([self.batch_size, self.seq_length] )
__a : Optional[int] = None
if self.use_token_type_ids:
__a : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a : Any = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowerCamelCase ( self ):
__a : Dict = self.prepare_config_and_inputs()
__a , __a , __a , __a : str = config_and_inputs
__a : str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def _lowerCamelCase ( self ):
__a : Any = self.prepare_config_and_inputs()
__a , __a , __a , __a : Union[str, Any] = config_and_inputs
__a : Optional[int] = True
__a : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = True
__lowerCAmelCase = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCamelCase ( self ):
__a : Dict = FlaxRobertaModelTester(self )
@slow
def _lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__a : int = model_class_name.from_pretrained('''roberta-base''' , from_pt=_UpperCAmelCase )
__a : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCAmelCase )
| 52
| 0
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase__ :
'''simple docstring'''
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=12 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=32 , _lowerCAmelCase=32 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=0.02 , _lowerCAmelCase=0 , _lowerCAmelCase=None , ):
a =parent
a =batch_size
a =seq_length
a =is_training
a =use_input_mask
a =use_labels
a =vocab_size
a =hidden_size
a =projection_dim
a =num_hidden_layers
a =num_attention_heads
a =intermediate_size
a =dropout
a =attention_dropout
a =max_position_embeddings
a =initializer_range
a =scope
a =bos_token_id
def lowerCAmelCase__ ( self ):
a =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a =None
if self.use_input_mask:
a =random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
a =input_mask.numpy()
a , a =input_mask.shape
a =np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_lowerCAmelCase ):
a =1
a =0
a =self.get_config()
return config, input_ids, tf.convert_to_tensor(_lowerCAmelCase )
def lowerCAmelCase__ ( self ):
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def lowerCAmelCase__ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
a =TFBlipTextModel(config=_lowerCAmelCase )
a =model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , training=_lowerCAmelCase )
a =model(_lowerCAmelCase , training=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase__ ( self ):
a =self.prepare_config_and_inputs()
a , a , a =config_and_inputs
a ={"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : str = (TFBlipTextModel,) if is_tf_available() else ()
_SCREAMING_SNAKE_CASE : Tuple = False
_SCREAMING_SNAKE_CASE : int = False
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
def lowerCAmelCase__ ( self ):
a =BlipTextModelTester(self )
a =ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase__ ( self ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ):
a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def lowerCAmelCase__ ( self ):
pass
def lowerCAmelCase__ ( self ):
pass
@unittest.skip(reason="""Blip does not use inputs_embeds""" )
def lowerCAmelCase__ ( self ):
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def lowerCAmelCase__ ( self ):
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def lowerCAmelCase__ ( self ):
pass
@slow
def lowerCAmelCase__ ( self ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a =TFBlipTextModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def lowerCAmelCase__ ( self , _lowerCAmelCase=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=_lowerCAmelCase )
| 321
|
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class UpperCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : int = MvpTokenizer
_SCREAMING_SNAKE_CASE : Union[str, Any] = MvpTokenizerFast
_SCREAMING_SNAKE_CASE : Optional[int] = True
_SCREAMING_SNAKE_CASE : Any = filter_roberta_detectors
def lowerCAmelCase__ ( self ):
super().setUp()
a =[
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
a =dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
a =["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
a ={"""unk_token""": """<unk>"""}
a =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
a =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_lowerCAmelCase ) )
def lowerCAmelCase__ ( self , **_lowerCAmelCase ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def lowerCAmelCase__ ( self , **_lowerCAmelCase ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def lowerCAmelCase__ ( self , _lowerCAmelCase ):
return "lower newer", "lower newer"
@cached_property
def lowerCAmelCase__ ( self ):
return MvpTokenizer.from_pretrained("""RUCAIBox/mvp""" )
@cached_property
def lowerCAmelCase__ ( self ):
return MvpTokenizerFast.from_pretrained("""RUCAIBox/mvp""" )
@require_torch
def lowerCAmelCase__ ( self ):
a =["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
a =[0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a =tokenizer(_lowerCAmelCase , max_length=len(_lowerCAmelCase ) , padding=_lowerCAmelCase , return_tensors="""pt""" )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
a =batch.input_ids.tolist()[0]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
# Test that special tokens are reset
@require_torch
def lowerCAmelCase__ ( self ):
a =["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a =tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="""pt""" )
# check if input_ids are returned and no labels
self.assertIn("""input_ids""" , _lowerCAmelCase )
self.assertIn("""attention_mask""" , _lowerCAmelCase )
self.assertNotIn("""labels""" , _lowerCAmelCase )
self.assertNotIn("""decoder_attention_mask""" , _lowerCAmelCase )
@require_torch
def lowerCAmelCase__ ( self ):
a =[
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a =tokenizer(text_target=_lowerCAmelCase , max_length=32 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
@require_torch
def lowerCAmelCase__ ( self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a =tokenizer(
["""I am a small frog""" * 1_024, """I am a small frog"""] , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , return_tensors="""pt""" )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(batch.input_ids.shape , (2, 1_024) )
@require_torch
def lowerCAmelCase__ ( self ):
a =["""A long paragraph for summarization."""]
a =[
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a =tokenizer(_lowerCAmelCase , text_target=_lowerCAmelCase , return_tensors="""pt""" )
a =inputs["""input_ids"""]
a =inputs["""labels"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def lowerCAmelCase__ ( self ):
pass
def lowerCAmelCase__ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
a =self.rust_tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
a =self.tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
a ="""A, <mask> AllenNLP sentence."""
a =tokenizer_r.encode_plus(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase )
a =tokenizer_p.encode_plus(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
a =tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
a =tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
_lowerCAmelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
_lowerCAmelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 321
| 1
|
def _A ( __snake_case :str ) -> List[Any]:
"""simple docstring"""
return credit_card_number.startswith(("34", "35", "37", "4", "5", "6") )
def _A ( __snake_case :str ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = credit_card_number
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = len(lowerCamelCase__ ) - 2
for i in range(lowerCamelCase__ , -1 , -2 ):
# double the value of every second digit
__SCREAMING_SNAKE_CASE = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
__SCREAMING_SNAKE_CASE = cc_number[:i] + str(lowerCamelCase__ ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(lowerCamelCase__ ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def _A ( __snake_case :str ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = f'''{credit_card_number} is an invalid credit card number because'''
if not credit_card_number.isdigit():
print(f'''{error_message} it has nonnumerical characters.''' )
return False
if not 13 <= len(lowerCamelCase__ ) <= 16:
print(f'''{error_message} of its length.''' )
return False
if not validate_initial_digits(lowerCamelCase__ ):
print(f'''{error_message} of its first two digits.''' )
return False
if not luhn_validation(lowerCamelCase__ ):
print(f'''{error_message} it fails the Luhn check.''' )
return False
print(f'''{credit_card_number} is a valid credit card number.''' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('4111111111111111')
validate_credit_card_number('32323')
| 693
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case = {
'configuration_informer': [
'INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'InformerForPrediction',
'InformerModel',
'InformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 200
| 0
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a ( lowercase_ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase : Any = DDIMPipeline
__lowerCAmelCase : int = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
__lowerCAmelCase : List[str] = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""latents""",
"""callback""",
"""callback_steps""",
}
__lowerCAmelCase : Optional[Any] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
__lowerCAmelCase : Any = False
def __UpperCamelCase ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
_a : Dict = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
_a : str = DDIMScheduler()
_a : Optional[Any] = {"""unet""": unet, """scheduler""": scheduler}
return components
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_=0 ) -> str:
if str(lowerCamelCase_ ).startswith('mps' ):
_a : int = torch.manual_seed(lowerCamelCase_ )
else:
_a : Tuple = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
_a : int = {
"""batch_size""": 1,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def __UpperCamelCase ( self ) -> Dict:
_a : Union[str, Any] = """cpu"""
_a : Optional[int] = self.get_dummy_components()
_a : Optional[Any] = self.pipeline_class(**lowerCamelCase_ )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_a : List[str] = self.get_dummy_inputs(lowerCamelCase_ )
_a : Optional[int] = pipe(**lowerCamelCase_ ).images
_a : Dict = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 3_2, 3_2, 3) )
_a : Union[str, Any] = np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
_a : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase_ , 1e-3 )
def __UpperCamelCase ( self ) -> Tuple:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def __UpperCamelCase ( self ) -> Union[str, Any]:
super().test_save_load_local(expected_max_difference=3e-3 )
def __UpperCamelCase ( self ) -> Tuple:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def __UpperCamelCase ( self ) -> Optional[Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ) -> Dict:
_a : Union[str, Any] = """google/ddpm-cifar10-32"""
_a : Union[str, Any] = UNetaDModel.from_pretrained(lowerCamelCase_ )
_a : Dict = DDIMScheduler()
_a : Union[str, Any] = DDIMPipeline(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
ddim.to(lowerCamelCase_ )
ddim.set_progress_bar_config(disable=lowerCamelCase_ )
_a : List[Any] = torch.manual_seed(0 )
_a : Union[str, Any] = ddim(generator=lowerCamelCase_ , eta=0.0 , output_type='numpy' ).images
_a : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
_a : Any = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase ( self ) -> List[Any]:
_a : int = """google/ddpm-ema-bedroom-256"""
_a : int = UNetaDModel.from_pretrained(lowerCamelCase_ )
_a : Tuple = DDIMScheduler.from_pretrained(lowerCamelCase_ )
_a : Union[str, Any] = DDIMPipeline(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
ddpm.to(lowerCamelCase_ )
ddpm.set_progress_bar_config(disable=lowerCamelCase_ )
_a : str = torch.manual_seed(0 )
_a : List[str] = ddpm(generator=lowerCamelCase_ , output_type='numpy' ).images
_a : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
_a : Union[str, Any] = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 713
|
'''simple docstring'''
from typing import Any
class a :
'''simple docstring'''
def __init__( self , lowerCamelCase_ ) -> Dict:
_a : int = data
_a : Any = None
def __repr__( self ) -> str:
return F'''Node({self.data})'''
class a :
'''simple docstring'''
def __init__( self ) -> int:
_a : Any = None
def __iter__( self ) -> Any:
_a : Dict = self.head
while node:
yield node.data
_a : Dict = node.next
def __len__( self ) -> int:
return sum(1 for _ in self )
def __repr__( self ) -> str:
return "->".join([str(lowerCamelCase_ ) for item in self] )
def __getitem__( self , lowerCamelCase_ ) -> Any:
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , lowerCamelCase_ , lowerCamelCase_ ) -> None:
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
_a : List[str] = self.head
for _ in range(lowerCamelCase_ ):
_a : List[str] = current.next
_a : str = data
def __UpperCamelCase ( self , lowerCamelCase_ ) -> None:
self.insert_nth(len(self ) , lowerCamelCase_ )
def __UpperCamelCase ( self , lowerCamelCase_ ) -> None:
self.insert_nth(0 , lowerCamelCase_ )
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ ) -> None:
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
_a : List[str] = Node(lowerCamelCase_ )
if self.head is None:
_a : int = new_node
elif index == 0:
_a : List[Any] = self.head # link new_node to head
_a : Optional[int] = new_node
else:
_a : Dict = self.head
for _ in range(index - 1 ):
_a : List[Any] = temp.next
_a : Any = temp.next
_a : Dict = new_node
def __UpperCamelCase ( self ) -> None: # print every node data
print(self )
def __UpperCamelCase ( self ) -> Any:
return self.delete_nth(0 )
def __UpperCamelCase ( self ) -> Any: # delete from tail
return self.delete_nth(len(self ) - 1 )
def __UpperCamelCase ( self , lowerCamelCase_ = 0 ) -> Any:
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
_a : List[Any] = self.head # default first node
if index == 0:
_a : Union[str, Any] = self.head.next
else:
_a : List[Any] = self.head
for _ in range(index - 1 ):
_a : List[Any] = temp.next
_a : str = temp.next
_a : List[Any] = temp.next.next
return delete_node.data
def __UpperCamelCase ( self ) -> bool:
return self.head is None
def __UpperCamelCase ( self ) -> None:
_a : List[str] = None
_a : Optional[Any] = self.head
while current:
# Store the current node's next node.
_a : Union[str, Any] = current.next
# Make the current node's next point backwards
_a : Any = prev
# Make the previous node be the current node
_a : List[str] = current
# Make the current node the next node (to progress iteration)
_a : Tuple = next_node
# Return prev in order to put the head at the end
_a : Optional[Any] = prev
def UpperCAmelCase_ ( ):
'''simple docstring'''
_a : Dict = LinkedList()
assert linked_list.is_empty() is True
assert str(A ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(1_0 ):
assert len(A ) == i
linked_list.insert_nth(A , i + 1 )
assert str(A ) == "->".join(str(A ) for i in range(1 , 1_1 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(1_1 )
assert str(A ) == "->".join(str(A ) for i in range(0 , 1_2 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 1_0
assert linked_list.delete_tail() == 1_1
assert len(A ) == 9
assert str(A ) == "->".join(str(A ) for i in range(1 , 1_0 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
_a : Union[str, Any] = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(A ) == "->".join(str(A ) for i in range(-8 , 1 ) )
def UpperCAmelCase_ ( ):
'''simple docstring'''
_a : Dict = [
-9,
1_0_0,
Node(7_7_3_4_5_1_1_2 ),
'dlrow olleH',
7,
5_5_5_5,
0,
-1_92.5_55_55,
'Hello, world!',
77.9,
Node(1_0 ),
None,
None,
12.20,
]
_a : Union[str, Any] = LinkedList()
for i in test_input:
linked_list.insert_tail(A )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(A ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
_a : int = linked_list.delete_head()
assert result == -9
assert (
str(A ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
_a : Union[str, Any] = linked_list.delete_tail()
assert result == 12.2
assert (
str(A ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
_a : Optional[int] = linked_list.delete_nth(1_0 )
assert result is None
assert (
str(A ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(A )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(A )
assert (
str(A )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(A )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def UpperCAmelCase_ ( ):
'''simple docstring'''
from doctest import testmod
testmod()
_a : Optional[Any] = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(A )
print('\nReading/changing Node data using indexing:' )
print(f'''Element at Position 1: {linked_list[1]}''' )
_a : List[Any] = input('Enter New Value: ' ).strip()
print('New list:' )
print(A )
print(f'''length of linked_list is : {len(A )}''' )
if __name__ == "__main__":
main()
| 424
| 0
|
'''simple docstring'''
from collections import deque
class lowercase_ :
"""simple docstring"""
def __init__( self : Tuple, UpperCamelCase__ : str, UpperCamelCase__ : int, UpperCamelCase__ : int ) -> None:
_A = process_name # process name
_A = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
_A = arrival_time
_A = burst_time # remaining burst time
_A = 0 # total time of the process wait in ready queue
_A = 0 # time from arrival time to completion time
class lowercase_ :
"""simple docstring"""
def __init__( self : Tuple, UpperCamelCase__ : int, UpperCamelCase__ : list[int], UpperCamelCase__ : deque[Process], UpperCamelCase__ : int, ) -> None:
# total number of mlfq's queues
_A = number_of_queues
# time slice of queues that round robin algorithm applied
_A = time_slices
# unfinished process is in this ready_queue
_A = queue
# current time
_A = current_time
# finished process is in this sequence queue
_A = deque()
def __UpperCAmelCase ( self : Union[str, Any] ) -> list[str]:
_A = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def __UpperCAmelCase ( self : List[Any], UpperCamelCase__ : list[Process] ) -> list[int]:
_A = []
for i in range(len(UpperCamelCase__ ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def __UpperCAmelCase ( self : Tuple, UpperCamelCase__ : list[Process] ) -> list[int]:
_A = []
for i in range(len(UpperCamelCase__ ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def __UpperCAmelCase ( self : List[str], UpperCamelCase__ : list[Process] ) -> list[int]:
_A = []
for i in range(len(UpperCamelCase__ ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def __UpperCAmelCase ( self : str, UpperCamelCase__ : deque[Process] ) -> list[int]:
return [q.burst_time for q in queue]
def __UpperCAmelCase ( self : Optional[int], UpperCamelCase__ : Process ) -> int:
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def __UpperCAmelCase ( self : List[Any], UpperCamelCase__ : deque[Process] ) -> deque[Process]:
_A = deque() # sequence deque of finished process
while len(UpperCamelCase__ ) != 0:
_A = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(UpperCamelCase__ )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
_A = 0
# set the process's turnaround time because it is finished
_A = self.current_time - cp.arrival_time
# set the completion time
_A = self.current_time
# add the process to queue that has finished queue
finished.append(UpperCamelCase__ )
self.finish_queue.extend(UpperCamelCase__ ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def __UpperCAmelCase ( self : str, UpperCamelCase__ : deque[Process], UpperCamelCase__ : int ) -> tuple[deque[Process], deque[Process]]:
_A = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(UpperCamelCase__ ) ):
_A = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(UpperCamelCase__ )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
_A = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(UpperCamelCase__ )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
_A = 0
# set the finish time
_A = self.current_time
# update the process' turnaround time because it is finished
_A = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(UpperCamelCase__ )
self.finish_queue.extend(UpperCamelCase__ ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def __UpperCAmelCase ( self : Any ) -> deque[Process]:
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
_A , _A = self.round_robin(
self.ready_queue, self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
_UpperCAmelCase : List[Any] = Process('''P1''', 0, 53)
_UpperCAmelCase : Any = Process('''P2''', 0, 17)
_UpperCAmelCase : Any = Process('''P3''', 0, 68)
_UpperCAmelCase : Union[str, Any] = Process('''P4''', 0, 24)
_UpperCAmelCase : Optional[Any] = 3
_UpperCAmelCase : Dict = [17, 25]
_UpperCAmelCase : Dict = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])})
_UpperCAmelCase : Optional[int] = Process('''P1''', 0, 53)
_UpperCAmelCase : Tuple = Process('''P2''', 0, 17)
_UpperCAmelCase : Tuple = Process('''P3''', 0, 68)
_UpperCAmelCase : str = Process('''P4''', 0, 24)
_UpperCAmelCase : Union[str, Any] = 3
_UpperCAmelCase : int = [17, 25]
_UpperCAmelCase : Optional[int] = deque([Pa, Pa, Pa, Pa])
_UpperCAmelCase : int = MLFQ(number_of_queues, time_slices, queue, 0)
_UpperCAmelCase : Tuple = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'''waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'''completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'''turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print sequence of finished processes
print(
F'''sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'''
)
| 107
|
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
lowercase : Optional[Any] = "3"
print("Python version:", sys.version)
print("OS platform:", platform.platform())
print("OS architecture:", platform.machine())
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
except ImportError:
print("Torch version:", None)
try:
import transformers
print("transformers version:", transformers.__version__)
except ImportError:
print("transformers version:", None)
| 495
| 0
|
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
_snake_case : Any = collections.namedtuple('_Datasets', ['train', 'validation', 'test'])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
_snake_case : int = 'https://storage.googleapis.com/cvdf-datasets/mnist/'
def _A ( __snake_case :List[Any] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = numpy.dtype(numpy.uintaa ).newbyteorder(">" )
return numpy.frombuffer(bytestream.read(4 ) , dtype=__snake_case )[0]
@deprecated(__snake_case , "Please use tf.data to implement this functionality." )
def _A ( __snake_case :Union[str, Any] ) -> Any:
"""simple docstring"""
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=__snake_case ) as bytestream:
__SCREAMING_SNAKE_CASE = _readaa(__snake_case )
if magic != 2051:
raise ValueError(
"Invalid magic number %d in MNIST image file: %s" % (magic, f.name) )
__SCREAMING_SNAKE_CASE = _readaa(__snake_case )
__SCREAMING_SNAKE_CASE = _readaa(__snake_case )
__SCREAMING_SNAKE_CASE = _readaa(__snake_case )
__SCREAMING_SNAKE_CASE = bytestream.read(rows * cols * num_images )
__SCREAMING_SNAKE_CASE = numpy.frombuffer(__snake_case , dtype=numpy.uinta )
__SCREAMING_SNAKE_CASE = data.reshape(__snake_case , __snake_case , __snake_case , 1 )
return data
@deprecated(__snake_case , "Please use tf.one_hot on tensors." )
def _A ( __snake_case :Dict , __snake_case :Any ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = labels_dense.shape[0]
__SCREAMING_SNAKE_CASE = numpy.arange(__snake_case ) * num_classes
__SCREAMING_SNAKE_CASE = numpy.zeros((num_labels, num_classes) )
__SCREAMING_SNAKE_CASE = 1
return labels_one_hot
@deprecated(__snake_case , "Please use tf.data to implement this functionality." )
def _A ( __snake_case :int , __snake_case :List[str]=False , __snake_case :Dict=10 ) -> Any:
"""simple docstring"""
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=__snake_case ) as bytestream:
__SCREAMING_SNAKE_CASE = _readaa(__snake_case )
if magic != 2049:
raise ValueError(
"Invalid magic number %d in MNIST label file: %s" % (magic, f.name) )
__SCREAMING_SNAKE_CASE = _readaa(__snake_case )
__SCREAMING_SNAKE_CASE = bytestream.read(__snake_case )
__SCREAMING_SNAKE_CASE = numpy.frombuffer(__snake_case , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(__snake_case , __snake_case )
return labels
class __SCREAMING_SNAKE_CASE :
@deprecated(
_a, "Please use alternatives such as official/mnist/_DataSet.py"
" from tensorflow/models.", )
def __init__( self, _a, _a, _a=False, _a=False, _a=dtypes.floataa, _a=True, _a=None, ) -> Tuple:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = random_seed.get_seed(_a )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
__SCREAMING_SNAKE_CASE = dtypes.as_dtype(_a ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype )
if fake_data:
__SCREAMING_SNAKE_CASE = 1_00_00
__SCREAMING_SNAKE_CASE = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'''images.shape: {images.shape} labels.shape: {labels.shape}'''
__SCREAMING_SNAKE_CASE = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
__SCREAMING_SNAKE_CASE = images.reshape(
images.shape[0], images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
__SCREAMING_SNAKE_CASE = images.astype(numpy.floataa )
__SCREAMING_SNAKE_CASE = numpy.multiply(_a, 1.0 / 255.0 )
__SCREAMING_SNAKE_CASE = images
__SCREAMING_SNAKE_CASE = labels
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
@property
def __lowerCAmelCase ( self ) -> Optional[Any]:
return self._images
@property
def __lowerCAmelCase ( self ) -> List[str]:
return self._labels
@property
def __lowerCAmelCase ( self ) -> Any:
return self._num_examples
@property
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return self._epochs_completed
def __lowerCAmelCase ( self, _a, _a=False, _a=True ) -> Tuple:
if fake_data:
__SCREAMING_SNAKE_CASE = [1] * 7_84
__SCREAMING_SNAKE_CASE = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(_a )],
[fake_label for _ in range(_a )],
)
__SCREAMING_SNAKE_CASE = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
__SCREAMING_SNAKE_CASE = numpy.arange(self._num_examples )
numpy.random.shuffle(_a )
__SCREAMING_SNAKE_CASE = self.images[perma]
__SCREAMING_SNAKE_CASE = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
__SCREAMING_SNAKE_CASE = self._num_examples - start
__SCREAMING_SNAKE_CASE = self._images[start : self._num_examples]
__SCREAMING_SNAKE_CASE = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
__SCREAMING_SNAKE_CASE = numpy.arange(self._num_examples )
numpy.random.shuffle(_a )
__SCREAMING_SNAKE_CASE = self.images[perm]
__SCREAMING_SNAKE_CASE = self.labels[perm]
# Start next epoch
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = batch_size - rest_num_examples
__SCREAMING_SNAKE_CASE = self._index_in_epoch
__SCREAMING_SNAKE_CASE = self._images[start:end]
__SCREAMING_SNAKE_CASE = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part), axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part), axis=0 ),
)
else:
self._index_in_epoch += batch_size
__SCREAMING_SNAKE_CASE = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(__snake_case , "Please write your own downloading logic." )
def _A ( __snake_case :Optional[Any] , __snake_case :str , __snake_case :Optional[Any] ) -> str:
"""simple docstring"""
if not gfile.Exists(__snake_case ):
gfile.MakeDirs(__snake_case )
__SCREAMING_SNAKE_CASE = os.path.join(__snake_case , __snake_case )
if not gfile.Exists(__snake_case ):
urllib.request.urlretrieve(__snake_case , __snake_case ) # noqa: S310
with gfile.GFile(__snake_case ) as f:
__SCREAMING_SNAKE_CASE = f.size()
print("Successfully downloaded" , __snake_case , __snake_case , "bytes." )
return filepath
@deprecated(
__snake_case , "Please use alternatives such as:" " tensorflow_datasets.load('mnist')" )
def _A ( __snake_case :List[Any] , __snake_case :Optional[Any]=False , __snake_case :Optional[int]=False , __snake_case :List[str]=dtypes.floataa , __snake_case :Any=True , __snake_case :Optional[Any]=5000 , __snake_case :int=None , __snake_case :Dict=DEFAULT_SOURCE_URL , ) -> Optional[int]:
"""simple docstring"""
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=__snake_case , one_hot=__snake_case , dtype=__snake_case , seed=__snake_case )
__SCREAMING_SNAKE_CASE = fake()
__SCREAMING_SNAKE_CASE = fake()
__SCREAMING_SNAKE_CASE = fake()
return _Datasets(train=__snake_case , validation=__snake_case , test=__snake_case )
if not source_url: # empty string check
__SCREAMING_SNAKE_CASE = DEFAULT_SOURCE_URL
__SCREAMING_SNAKE_CASE = "train-images-idx3-ubyte.gz"
__SCREAMING_SNAKE_CASE = "train-labels-idx1-ubyte.gz"
__SCREAMING_SNAKE_CASE = "t10k-images-idx3-ubyte.gz"
__SCREAMING_SNAKE_CASE = "t10k-labels-idx1-ubyte.gz"
__SCREAMING_SNAKE_CASE = _maybe_download(
__snake_case , __snake_case , source_url + train_images_file )
with gfile.Open(__snake_case , "rb" ) as f:
__SCREAMING_SNAKE_CASE = _extract_images(__snake_case )
__SCREAMING_SNAKE_CASE = _maybe_download(
__snake_case , __snake_case , source_url + train_labels_file )
with gfile.Open(__snake_case , "rb" ) as f:
__SCREAMING_SNAKE_CASE = _extract_labels(__snake_case , one_hot=__snake_case )
__SCREAMING_SNAKE_CASE = _maybe_download(
__snake_case , __snake_case , source_url + test_images_file )
with gfile.Open(__snake_case , "rb" ) as f:
__SCREAMING_SNAKE_CASE = _extract_images(__snake_case )
__SCREAMING_SNAKE_CASE = _maybe_download(
__snake_case , __snake_case , source_url + test_labels_file )
with gfile.Open(__snake_case , "rb" ) as f:
__SCREAMING_SNAKE_CASE = _extract_labels(__snake_case , one_hot=__snake_case )
if not 0 <= validation_size <= len(__snake_case ):
__SCREAMING_SNAKE_CASE = (
"Validation size should be between 0 and "
f'''{len(__snake_case )}. Received: {validation_size}.'''
)
raise ValueError(__snake_case )
__SCREAMING_SNAKE_CASE = train_images[:validation_size]
__SCREAMING_SNAKE_CASE = train_labels[:validation_size]
__SCREAMING_SNAKE_CASE = train_images[validation_size:]
__SCREAMING_SNAKE_CASE = train_labels[validation_size:]
__SCREAMING_SNAKE_CASE = {"dtype": dtype, "reshape": reshape, "seed": seed}
__SCREAMING_SNAKE_CASE = _DataSet(__snake_case , __snake_case , **__snake_case )
__SCREAMING_SNAKE_CASE = _DataSet(__snake_case , __snake_case , **__snake_case )
__SCREAMING_SNAKE_CASE = _DataSet(__snake_case , __snake_case , **__snake_case )
return _Datasets(train=__snake_case , validation=__snake_case , test=__snake_case )
| 214
|
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def _A ( ) -> Optional[int]:
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__SCREAMING_SNAKE_CASE = "__test_patch_submodule_mock__"
with patch_submodule(_test_patching , "os.path.join" , __snake_case ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def _A ( ) -> Any:
"""simple docstring"""
assert _test_patching.open is open
__SCREAMING_SNAKE_CASE = "__test_patch_submodule_builtin_mock__"
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , "open" , __snake_case ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def _A ( ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = "__test_patch_submodule_missing_mock__"
with patch_submodule(_test_patching , "pandas.read_csv" , __snake_case ):
pass
def _A ( ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = "__test_patch_submodule_missing_builtin_mock__"
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , "len" , __snake_case ) is None
with patch_submodule(_test_patching , "len" , __snake_case ):
assert _test_patching.len is mock
assert _test_patching.len is len
def _A ( ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = "__test_patch_submodule_start_and_stop_mock__"
__SCREAMING_SNAKE_CASE = patch_submodule(_test_patching , "open" , __snake_case )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def _A ( ) -> str:
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__SCREAMING_SNAKE_CASE = "__test_patch_submodule_successive_join__"
__SCREAMING_SNAKE_CASE = "__test_patch_submodule_successive_dirname__"
__SCREAMING_SNAKE_CASE = "__test_patch_submodule_successive_rename__"
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , "os.path.join" , __snake_case ):
with patch_submodule(_test_patching , "os.rename" , __snake_case ):
with patch_submodule(_test_patching , "os.path.dirname" , __snake_case ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , "os.rename" , __snake_case ):
with patch_submodule(_test_patching , "os.path.join" , __snake_case ):
with patch_submodule(_test_patching , "os.path.dirname" , __snake_case ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def _A ( ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = "__test_patch_submodule_doesnt_exist_mock__"
with patch_submodule(_test_patching , "__module_that_doesn_exist__.__attribute_that_doesn_exist__" , __snake_case ):
pass
with patch_submodule(_test_patching , "os.__attribute_that_doesn_exist__" , __snake_case ):
pass
| 214
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class __lowerCamelCase ( lowerCAmelCase ):
a__: torch.FloatTensor
class __lowerCamelCase ( lowerCAmelCase , lowerCAmelCase ):
@register_to_config
def __init__( self , UpperCAmelCase = 16 , UpperCAmelCase = 88 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = 0.0 , UpperCAmelCase = 32 , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = "geglu" , UpperCAmelCase = True , UpperCAmelCase = True , ):
super().__init__()
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = attention_head_dim
lowerCamelCase_ = num_attention_heads * attention_head_dim
lowerCamelCase_ = in_channels
lowerCamelCase_ = torch.nn.GroupNorm(num_groups=UpperCAmelCase , num_channels=UpperCAmelCase , eps=1e-6 , affine=UpperCAmelCase )
lowerCamelCase_ = nn.Linear(UpperCAmelCase , UpperCAmelCase )
# 3. Define transformers blocks
lowerCamelCase_ = nn.ModuleList(
[
BasicTransformerBlock(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , dropout=UpperCAmelCase , cross_attention_dim=UpperCAmelCase , activation_fn=UpperCAmelCase , attention_bias=UpperCAmelCase , double_self_attention=UpperCAmelCase , norm_elementwise_affine=UpperCAmelCase , )
for d in range(UpperCAmelCase )
] )
lowerCamelCase_ = nn.Linear(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=1 , UpperCAmelCase=None , UpperCAmelCase = True , ):
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = hidden_states.shape
lowerCamelCase_ = batch_frames // num_frames
lowerCamelCase_ = hidden_states
lowerCamelCase_ = hidden_states[None, :].reshape(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
lowerCamelCase_ = self.norm(UpperCAmelCase )
lowerCamelCase_ = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = self.proj_in(UpperCAmelCase )
# 2. Blocks
for block in self.transformer_blocks:
lowerCamelCase_ = block(
UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , timestep=UpperCAmelCase , cross_attention_kwargs=UpperCAmelCase , class_labels=UpperCAmelCase , )
# 3. Output
lowerCamelCase_ = self.proj_out(UpperCAmelCase )
lowerCamelCase_ = (
hidden_states[None, None, :]
.reshape(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
lowerCamelCase_ = hidden_states.reshape(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=UpperCAmelCase )
| 29
|
'''simple docstring'''
import flax.linen as nn
import jax
import jax.numpy as jnp
class A ( nn.Module ):
lowerCamelCase : int
lowerCamelCase : jnp.dtype = jnp.floataa
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
lowercase__ , lowercase__ , lowercase__ , lowercase__ = hidden_states.shape
lowercase__ = jax.image.resize(
lowerCamelCase__ , shape=(batch, height * 2, width * 2, channels) , method="""nearest""" , )
lowercase__ = self.conv(lowerCamelCase__ )
return hidden_states
class A ( nn.Module ):
lowerCamelCase : int
lowerCamelCase : jnp.dtype = jnp.floataa
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase__ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
lowercase__ = self.conv(lowerCamelCase__ )
return hidden_states
class A ( nn.Module ):
lowerCamelCase : int
lowerCamelCase : int = None
lowerCamelCase : float = 0.0
lowerCamelCase : bool = None
lowerCamelCase : jnp.dtype = jnp.floataa
def A__ ( self ) -> str:
'''simple docstring'''
lowercase__ = self.in_channels if self.out_channels is None else self.out_channels
lowercase__ = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
lowercase__ = nn.Conv(
lowerCamelCase__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowercase__ = nn.Dense(lowerCamelCase__ , dtype=self.dtype )
lowercase__ = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
lowercase__ = nn.Dropout(self.dropout_prob )
lowercase__ = nn.Conv(
lowerCamelCase__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowercase__ = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
lowercase__ = None
if use_nin_shortcut:
lowercase__ = nn.Conv(
lowerCamelCase__ , kernel_size=(1, 1) , strides=(1, 1) , padding="""VALID""" , dtype=self.dtype , )
def __call__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=True ) -> List[str]:
'''simple docstring'''
lowercase__ = hidden_states
lowercase__ = self.norma(lowerCamelCase__ )
lowercase__ = nn.swish(lowerCamelCase__ )
lowercase__ = self.conva(lowerCamelCase__ )
lowercase__ = self.time_emb_proj(nn.swish(lowerCamelCase__ ) )
lowercase__ = jnp.expand_dims(jnp.expand_dims(lowerCamelCase__ , 1 ) , 1 )
lowercase__ = hidden_states + temb
lowercase__ = self.norma(lowerCamelCase__ )
lowercase__ = nn.swish(lowerCamelCase__ )
lowercase__ = self.dropout(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ = self.conva(lowerCamelCase__ )
if self.conv_shortcut is not None:
lowercase__ = self.conv_shortcut(lowerCamelCase__ )
return hidden_states + residual
| 325
| 0
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : int = 10 ) -> str:
"""simple docstring"""
if not isinstance(__A ,__A ) or n < 0:
raise ValueError('Invalid input' )
SCREAMING_SNAKE_CASE_ : str =10**n
SCREAMING_SNAKE_CASE_ : Optional[int] =2_8433 * (pow(2 ,783_0457 ,__A )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"""{solution(10) = }""")
| 708
|
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class lowerCAmelCase_ ( __A ):
'''simple docstring'''
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
warnings.warn(
'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use YolosImageProcessor instead.' , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
| 153
| 0
|
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class _lowerCAmelCase:
"""simple docstring"""
a : Optional[int] =XGLMConfig
a : str ={}
a : Any ='''gelu'''
def __init__( self , _lowerCamelCase , _lowerCamelCase=1_4 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=9_9 , _lowerCamelCase=3_2 , _lowerCamelCase=2 , _lowerCamelCase=4 , _lowerCamelCase=3_7 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=5_1_2 , _lowerCamelCase=0.0_2 , ):
UpperCamelCase_: Dict = parent
UpperCamelCase_: str = batch_size
UpperCamelCase_: List[str] = seq_length
UpperCamelCase_: Tuple = is_training
UpperCamelCase_: Optional[Any] = use_input_mask
UpperCamelCase_: Any = use_labels
UpperCamelCase_: List[Any] = vocab_size
UpperCamelCase_: Optional[Any] = d_model
UpperCamelCase_: List[str] = num_hidden_layers
UpperCamelCase_: List[str] = num_attention_heads
UpperCamelCase_: List[str] = ffn_dim
UpperCamelCase_: str = activation_function
UpperCamelCase_: Union[str, Any] = activation_dropout
UpperCamelCase_: str = attention_dropout
UpperCamelCase_: str = max_position_embeddings
UpperCamelCase_: List[str] = initializer_range
UpperCamelCase_: str = None
UpperCamelCase_: List[Any] = 0
UpperCamelCase_: str = 2
UpperCamelCase_: Tuple = 1
def _a ( self ):
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def _a ( self ):
UpperCamelCase_: List[Any] = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
UpperCamelCase_: Dict = None
if self.use_input_mask:
UpperCamelCase_: Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase_: Optional[int] = self.get_config()
UpperCamelCase_: Union[str, Any] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def _a ( self ):
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=_lowerCamelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=_lowerCamelCase , )
def _a ( self ):
UpperCamelCase_: int = self.prepare_config_and_inputs()
(
(
UpperCamelCase_
) ,(
UpperCamelCase_
) ,(
UpperCamelCase_
) ,(
UpperCamelCase_
) ,
): List[str] = config_and_inputs
UpperCamelCase_: Any = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class _lowerCAmelCase( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
a : str =(TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
a : int =(TFXGLMForCausalLM,) if is_tf_available() else ()
a : str =(
{'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {}
)
a : Optional[int] =False
a : Any =False
a : Optional[int] =False
def _a ( self ):
UpperCamelCase_: Dict = TFXGLMModelTester(self )
UpperCamelCase_: Any = ConfigTester(self , config_class=_lowerCamelCase , n_embd=3_7 )
def _a ( self ):
self.config_tester.run_common_tests()
@slow
def _a ( self ):
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_: Union[str, Any] = TFXGLMModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def _a ( self ):
super().test_resize_token_embeddings()
@require_tf
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self , _lowerCamelCase=True ):
UpperCamelCase_: Optional[Any] = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
UpperCamelCase_: int = tf.convert_to_tensor([[2, 2_6_8, 9_8_6_5]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
UpperCamelCase_: List[Any] = [2, 2_6_8, 9_8_6_5, 6_7, 1_1, 1_9_8_8, 5_7_2_5_2, 9_8_6_5, 5, 9_8_4, 6_7, 1_9_8_8, 2_1_3_8_3_8, 1_6_5_8, 5_3, 7_0_4_4_6, 3_3, 6_6_5_7, 2_7_8, 1_5_8_1]
# fmt: on
UpperCamelCase_: int = model.generate(_lowerCamelCase , do_sample=_lowerCamelCase , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , _lowerCamelCase )
@slow
def _a ( self ):
UpperCamelCase_: str = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
UpperCamelCase_: int = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
UpperCamelCase_: str = tokenizer('Today is a nice day and' , return_tensors='tf' )
UpperCamelCase_: int = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
UpperCamelCase_: str = model.generate(_lowerCamelCase , do_sample=_lowerCamelCase , seed=[7, 0] )
UpperCamelCase_: Tuple = tokenizer.decode(output_ids[0] , skip_special_tokens=_lowerCamelCase )
UpperCamelCase_: Dict = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
@slow
def _a ( self ):
UpperCamelCase_: List[Any] = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
UpperCamelCase_: List[Any] = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
UpperCamelCase_: Optional[int] = 'left'
# use different length sentences to test batching
UpperCamelCase_: str = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
UpperCamelCase_: List[str] = tokenizer(_lowerCamelCase , return_tensors='tf' , padding=_lowerCamelCase )
UpperCamelCase_: Optional[Any] = inputs['input_ids']
UpperCamelCase_: Optional[Any] = model.generate(input_ids=_lowerCamelCase , attention_mask=inputs['attention_mask'] , max_new_tokens=1_2 )
UpperCamelCase_: Dict = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
UpperCamelCase_: int = model.generate(input_ids=_lowerCamelCase , max_new_tokens=1_2 )
UpperCamelCase_: Tuple = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
UpperCamelCase_: str = model.generate(input_ids=_lowerCamelCase , max_new_tokens=1_2 )
UpperCamelCase_: Tuple = tokenizer.batch_decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
UpperCamelCase_: Optional[Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_lowerCamelCase )
UpperCamelCase_: int = tokenizer.decode(output_padded[0] , skip_special_tokens=_lowerCamelCase )
UpperCamelCase_: Dict = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
self.assertListEqual(_lowerCamelCase , [non_padded_sentence, padded_sentence] )
| 57
|
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
lowercase = ["""image_processor"""]
lowercase = """SamImageProcessor"""
def __init__( self , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE )
UpperCamelCase = self.image_processor
UpperCamelCase = -10
UpperCamelCase = self.image_processor.size["longest_edge"]
def __call__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> BatchEncoding:
"""simple docstring"""
UpperCamelCase = self.image_processor(
SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
# pop arguments that are not used in the foward but used nevertheless
UpperCamelCase = encoding_image_processor["original_sizes"]
if hasattr(SCREAMING_SNAKE_CASE , "numpy" ): # Checks if Torch or TF tensor
UpperCamelCase = original_sizes.numpy()
UpperCamelCase , UpperCamelCase , UpperCamelCase = self._check_and_preprocess_points(
input_points=SCREAMING_SNAKE_CASE , input_labels=SCREAMING_SNAKE_CASE , input_boxes=SCREAMING_SNAKE_CASE , )
UpperCamelCase = self._normalize_and_convert(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , input_points=SCREAMING_SNAKE_CASE , input_labels=SCREAMING_SNAKE_CASE , input_boxes=SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , )
return encoding_image_processor
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE="pt" , ) -> int:
"""simple docstring"""
if input_points is not None:
if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ):
UpperCamelCase = [
self._normalize_coordinates(self.target_size , SCREAMING_SNAKE_CASE , original_sizes[0] ) for point in input_points
]
else:
UpperCamelCase = [
self._normalize_coordinates(self.target_size , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for point, original_size in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
UpperCamelCase , UpperCamelCase = self._pad_points_and_labels(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase = np.array(SCREAMING_SNAKE_CASE )
if input_labels is not None:
UpperCamelCase = np.array(SCREAMING_SNAKE_CASE )
if input_boxes is not None:
if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ):
UpperCamelCase = [
self._normalize_coordinates(self.target_size , SCREAMING_SNAKE_CASE , original_sizes[0] , is_bounding_box=SCREAMING_SNAKE_CASE )
for box in input_boxes
]
else:
UpperCamelCase = [
self._normalize_coordinates(self.target_size , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , is_bounding_box=SCREAMING_SNAKE_CASE )
for box, original_size in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
]
UpperCamelCase = np.array(SCREAMING_SNAKE_CASE )
if input_boxes is not None:
if return_tensors == "pt":
UpperCamelCase = torch.from_numpy(SCREAMING_SNAKE_CASE )
# boxes batch size of 1 by default
UpperCamelCase = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
UpperCamelCase = tf.convert_to_tensor(SCREAMING_SNAKE_CASE )
# boxes batch size of 1 by default
UpperCamelCase = tf.expand_dims(SCREAMING_SNAKE_CASE , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"input_boxes": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
UpperCamelCase = torch.from_numpy(SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
UpperCamelCase = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
UpperCamelCase = tf.convert_to_tensor(SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
UpperCamelCase = tf.expand_dims(SCREAMING_SNAKE_CASE , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"input_points": input_points} )
if input_labels is not None:
if return_tensors == "pt":
UpperCamelCase = torch.from_numpy(SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
UpperCamelCase = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
UpperCamelCase = tf.convert_to_tensor(SCREAMING_SNAKE_CASE )
# point batch size of 1 by default
UpperCamelCase = tf.expand_dims(SCREAMING_SNAKE_CASE , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"input_labels": input_labels} )
return encoding_image_processor
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
UpperCamelCase = max([point.shape[0] for point in input_points] )
UpperCamelCase = []
for i, point in enumerate(SCREAMING_SNAKE_CASE ):
if point.shape[0] != expected_nb_points:
UpperCamelCase = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
UpperCamelCase = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(SCREAMING_SNAKE_CASE )
UpperCamelCase = processed_input_points
return input_points, input_labels
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> np.ndarray:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = original_size
UpperCamelCase , UpperCamelCase = self.image_processor._get_preprocess_shape(SCREAMING_SNAKE_CASE , longest_edge=SCREAMING_SNAKE_CASE )
UpperCamelCase = deepcopy(SCREAMING_SNAKE_CASE ).astype(SCREAMING_SNAKE_CASE )
if is_bounding_box:
UpperCamelCase = coords.reshape(-1 , 2 , 2 )
UpperCamelCase = coords[..., 0] * (new_w / old_w)
UpperCamelCase = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
UpperCamelCase = coords.reshape(-1 , 4 )
return coords
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , ) -> Any:
"""simple docstring"""
if input_points is not None:
if hasattr(SCREAMING_SNAKE_CASE , "numpy" ): # Checks for TF or Torch tensor
UpperCamelCase = input_points.numpy().tolist()
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or not isinstance(input_points[0] , SCREAMING_SNAKE_CASE ):
raise ValueError("Input points must be a list of list of floating points." )
UpperCamelCase = [np.array(SCREAMING_SNAKE_CASE ) for input_point in input_points]
else:
UpperCamelCase = None
if input_labels is not None:
if hasattr(SCREAMING_SNAKE_CASE , "numpy" ):
UpperCamelCase = input_labels.numpy().tolist()
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or not isinstance(input_labels[0] , SCREAMING_SNAKE_CASE ):
raise ValueError("Input labels must be a list of list integers." )
UpperCamelCase = [np.array(SCREAMING_SNAKE_CASE ) for label in input_labels]
else:
UpperCamelCase = None
if input_boxes is not None:
if hasattr(SCREAMING_SNAKE_CASE , "numpy" ):
UpperCamelCase = input_boxes.numpy().tolist()
if (
not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or not isinstance(input_boxes[0] , SCREAMING_SNAKE_CASE )
or not isinstance(input_boxes[0][0] , SCREAMING_SNAKE_CASE )
):
raise ValueError("Input boxes must be a list of list of list of floating points." )
UpperCamelCase = [np.array(SCREAMING_SNAKE_CASE ).astype(np.floataa ) for box in input_boxes]
else:
UpperCamelCase = None
return input_points, input_labels, input_boxes
@property
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(SCREAMING_SNAKE_CASE ) )
def __lowerCAmelCase ( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
return self.image_processor.post_process_masks(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
| 606
| 0
|
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
a__ : str = MODEL_FOR_CAUSAL_LM_MAPPING
a__ : Optional[int] = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :Optional[int] = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''')
# Using `do_sample=False` to force deterministic output
__UpperCamelCase :Dict = text_generator('''This is a test''' , do_sample=__lowercase)
self.assertEqual(
__lowercase , [
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
] , )
__UpperCamelCase :str = text_generator(['''This is a test''', '''This is a second test'''])
self.assertEqual(
__lowercase , [
[
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'''
''' oscope. oscope. FiliFili@@'''
)
}
],
] , )
__UpperCamelCase :List[Any] = text_generator('''This is a test''' , do_sample=__lowercase , num_return_sequences=2 , return_tensors=__lowercase)
self.assertEqual(
__lowercase , [
{'''generated_token_ids''': ANY(__lowercase)},
{'''generated_token_ids''': ANY(__lowercase)},
] , )
__UpperCamelCase :Optional[int] = text_generator.model.config.eos_token_id
__UpperCamelCase :Union[str, Any] = '''<pad>'''
__UpperCamelCase :int = text_generator(
['''This is a test''', '''This is a second test'''] , do_sample=__lowercase , num_return_sequences=2 , batch_size=2 , return_tensors=__lowercase , )
self.assertEqual(
__lowercase , [
[
{'''generated_token_ids''': ANY(__lowercase)},
{'''generated_token_ids''': ANY(__lowercase)},
],
[
{'''generated_token_ids''': ANY(__lowercase)},
{'''generated_token_ids''': ANY(__lowercase)},
],
] , )
@require_tf
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :List[str] = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''')
# Using `do_sample=False` to force deterministic output
__UpperCamelCase :Optional[Any] = text_generator('''This is a test''' , do_sample=__lowercase)
self.assertEqual(
__lowercase , [
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
] , )
__UpperCamelCase :Any = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=__lowercase)
self.assertEqual(
__lowercase , [
[
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'''
''' Cannes 閲閲Cannes Cannes Cannes 攵 please,'''
)
}
],
] , )
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase) -> Optional[Any]:
__UpperCamelCase :List[str] = TextGenerationPipeline(model=__lowercase , tokenizer=__lowercase)
return text_generator, ["This is a test", "Another test"]
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :Tuple = '''Hello I believe in'''
__UpperCamelCase :List[Any] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''')
__UpperCamelCase :Any = text_generator(__lowercase)
self.assertEqual(
__lowercase , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , )
__UpperCamelCase :Optional[int] = text_generator(__lowercase , stop_sequence=''' fe''')
self.assertEqual(__lowercase , [{'''generated_text''': '''Hello I believe in fe'''}])
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> Dict:
__UpperCamelCase :List[Any] = text_generator.model
__UpperCamelCase :int = text_generator.tokenizer
__UpperCamelCase :Optional[Any] = text_generator('''This is a test''')
self.assertEqual(__lowercase , [{'''generated_text''': ANY(__lowercase)}])
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test'''))
__UpperCamelCase :Dict = text_generator('''This is a test''' , return_full_text=__lowercase)
self.assertEqual(__lowercase , [{'''generated_text''': ANY(__lowercase)}])
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''])
__UpperCamelCase :List[Any] = pipeline(task='''text-generation''' , model=__lowercase , tokenizer=__lowercase , return_full_text=__lowercase)
__UpperCamelCase :str = text_generator('''This is a test''')
self.assertEqual(__lowercase , [{'''generated_text''': ANY(__lowercase)}])
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''])
__UpperCamelCase :Optional[int] = text_generator('''This is a test''' , return_full_text=__lowercase)
self.assertEqual(__lowercase , [{'''generated_text''': ANY(__lowercase)}])
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test'''))
__UpperCamelCase :List[str] = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=__lowercase)
self.assertEqual(
__lowercase , [
[{'''generated_text''': ANY(__lowercase)}, {'''generated_text''': ANY(__lowercase)}],
[{'''generated_text''': ANY(__lowercase)}, {'''generated_text''': ANY(__lowercase)}],
] , )
if text_generator.tokenizer.pad_token is not None:
__UpperCamelCase :Tuple = text_generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=__lowercase)
self.assertEqual(
__lowercase , [
[{'''generated_text''': ANY(__lowercase)}, {'''generated_text''': ANY(__lowercase)}],
[{'''generated_text''': ANY(__lowercase)}, {'''generated_text''': ANY(__lowercase)}],
] , )
with self.assertRaises(__lowercase):
__UpperCamelCase :List[str] = text_generator('''test''' , return_full_text=__lowercase , return_text=__lowercase)
with self.assertRaises(__lowercase):
__UpperCamelCase :Optional[int] = text_generator('''test''' , return_full_text=__lowercase , return_tensors=__lowercase)
with self.assertRaises(__lowercase):
__UpperCamelCase :int = text_generator('''test''' , return_text=__lowercase , return_tensors=__lowercase)
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
__UpperCamelCase :Any = text_generator('''''')
self.assertEqual(__lowercase , [{'''generated_text''': ANY(__lowercase)}])
else:
with self.assertRaises((ValueError, AssertionError)):
__UpperCamelCase :int = text_generator('''''')
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
__UpperCamelCase :int = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM''']
if (
tokenizer.model_max_length < 10_000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError)):
text_generator('''This is a test''' * 500 , max_new_tokens=20)
__UpperCamelCase :Tuple = text_generator('''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=20)
# Hole strategy cannot work
with self.assertRaises(__lowercase):
text_generator(
'''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def UpperCamelCase__ ( self) -> Union[str, Any]:
import torch
# Classic `model_kwargs`
__UpperCamelCase :Optional[Any] = pipeline(
model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0))
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa)
__UpperCamelCase :int = pipe('''This is a test''')
self.assertEqual(
__lowercase , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
__UpperCamelCase :Optional[Any] = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa)
self.assertEqual(pipe.model.device , torch.device(0))
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa)
__UpperCamelCase :Optional[int] = pipe('''This is a test''')
self.assertEqual(
__lowercase , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
__UpperCamelCase :Tuple = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''')
self.assertEqual(pipe.model.device , torch.device(0))
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa)
__UpperCamelCase :List[Any] = pipe('''This is a test''')
self.assertEqual(
__lowercase , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
@require_torch
@require_torch_gpu
def UpperCamelCase__ ( self) -> Optional[int]:
import torch
__UpperCamelCase :List[Any] = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa)
pipe('''This is a test''')
@require_torch
@require_accelerate
@require_torch_gpu
def UpperCamelCase__ ( self) -> Optional[Any]:
import torch
__UpperCamelCase :Optional[Any] = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa)
pipe('''This is a test''' , do_sample=__lowercase , top_p=0.5)
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :Dict = '''Hello world'''
__UpperCamelCase :Any = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''')
if text_generator.model.framework == "tf":
__UpperCamelCase :Optional[int] = logging.get_logger('''transformers.generation.tf_utils''')
else:
__UpperCamelCase :List[Any] = logging.get_logger('''transformers.generation.utils''')
__UpperCamelCase :Tuple = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(__lowercase) as cl:
__UpperCamelCase :int = text_generator(__lowercase , max_length=10 , max_new_tokens=1)
self.assertIn(__lowercase , cl.out)
# The user only sets one -> no warning
with CaptureLogger(__lowercase) as cl:
__UpperCamelCase :Any = text_generator(__lowercase , max_new_tokens=1)
self.assertNotIn(__lowercase , cl.out)
with CaptureLogger(__lowercase) as cl:
__UpperCamelCase :Union[str, Any] = text_generator(__lowercase , max_length=10)
self.assertNotIn(__lowercase , cl.out)
| 452
|
import qiskit
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Dict = qiskit.Aer.get_backend('''aer_simulator''' )
__UpperCamelCase :Tuple = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
__UpperCamelCase :Optional[Any] = qiskit.execute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , shots=1_000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase = half_adder(1, 1)
print(F'Half Adder Output Qubit Counts: {counts}')
| 452
| 1
|
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A__ :
"""simple docstring"""
def __init__( self , lowercase , lowercase=13 , lowercase=30 , lowercase=2 , lowercase=3 , lowercase=True , lowercase=True , lowercase=32 , lowercase=2 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=10 , lowercase=0.02 , lowercase=3 , lowercase=None , ) -> Optional[Any]:
'''simple docstring'''
a__ : Optional[int] = parent
a__ : Any = batch_size
a__ : Tuple = image_size
a__ : Optional[Any] = patch_size
a__ : Optional[Any] = num_channels
a__ : Dict = is_training
a__ : Optional[int] = use_labels
a__ : Optional[Any] = hidden_size
a__ : Dict = num_hidden_layers
a__ : Union[str, Any] = num_attention_heads
a__ : Optional[Any] = intermediate_size
a__ : Dict = hidden_act
a__ : Tuple = hidden_dropout_prob
a__ : Any = attention_probs_dropout_prob
a__ : List[str] = type_sequence_label_size
a__ : Tuple = initializer_range
a__ : Any = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a__ : str = (image_size // patch_size) ** 2
a__ : Union[str, Any] = num_patches + 1
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a__ : List[str] = None
if self.use_labels:
a__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a__ : Dict = self.get_config()
return config, pixel_values, labels
def __lowercase ( self) -> int:
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase , initializer_range=self.initializer_range , )
def __lowercase ( self , lowercase , lowercase , lowercase) -> Tuple:
'''simple docstring'''
a__ : Union[str, Any] = TFViTModel(config=lowercase)
a__ : int = model(lowercase , training=lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
# Test with an image with different size than the one specified in config.
a__ : Optional[Any] = self.image_size // 2
a__ : List[str] = pixel_values[:, :, :image_size, :image_size]
a__ : Union[str, Any] = model(lowercase , interpolate_pos_encoding=lowercase , training=lowercase)
a__ : str = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size))
def __lowercase ( self , lowercase , lowercase , lowercase) -> Optional[Any]:
'''simple docstring'''
a__ : Any = self.type_sequence_label_size
a__ : Dict = TFViTForImageClassification(lowercase)
a__ : Tuple = model(lowercase , labels=lowercase , training=lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# Test with an image with different size than the one specified in config.
a__ : str = self.image_size // 2
a__ : int = pixel_values[:, :, :image_size, :image_size]
a__ : List[str] = model(lowercase , interpolate_pos_encoding=lowercase , training=lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
a__ : List[Any] = 1
a__ : Optional[int] = TFViTForImageClassification(lowercase)
a__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a__ : Optional[Any] = model(lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : Any = self.prepare_config_and_inputs()
a__ , a__ , a__ : int = config_and_inputs
a__ : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class A__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__A : List[Any] = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
__A : Optional[int] = (
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
__A : Optional[int] = False
__A : Any = False
__A : Tuple = False
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : Optional[int] = TFViTModelTester(self)
a__ : str = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37)
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds')
def __lowercase ( self) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason='ViT does not use inputs_embeds')
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
pass
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ , a__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : int = model_class(lowercase)
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer))
a__ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , tf.keras.layers.Layer))
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ , a__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : List[Any] = model_class(lowercase)
a__ : Optional[int] = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : str = [*signature.parameters.keys()]
a__ : Tuple = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase)
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase)
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase)
@slow
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : str = TFViTModel.from_pretrained('google/vit-base-patch16-224')
self.assertIsNotNone(lowercase)
def A_ ( ) -> Any:
a__ : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class A__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self) -> int:
'''simple docstring'''
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224') if is_vision_available() else None
@slow
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ : int = TFViTForImageClassification.from_pretrained('google/vit-base-patch16-224')
a__ : Optional[int] = self.default_image_processor
a__ : List[Any] = prepare_img()
a__ : Any = image_processor(images=lowercase , return_tensors='tf')
# forward pass
a__ : Union[str, Any] = model(**lowercase)
# verify the logits
a__ : Any = tf.TensorShape((1, 1000))
self.assertEqual(outputs.logits.shape , lowercase)
a__ : Dict = tf.constant([-0.27_44, 0.82_15, -0.08_36])
tf.debugging.assert_near(outputs.logits[0, :3] , lowercase , atol=1e-4)
| 302
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
lowercase : Any = logging.get_logger(__name__)
lowercase : Dict = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
lowercase : List[Any] = {
"""vocab_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"""
),
},
"""tokenizer_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""",
"""roberta-base-openai-detector""": (
"""https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"""
),
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"""
),
},
}
lowercase : Union[str, Any] = {
"""roberta-base""": 5_1_2,
"""roberta-large""": 5_1_2,
"""roberta-large-mnli""": 5_1_2,
"""distilroberta-base""": 5_1_2,
"""roberta-base-openai-detector""": 5_1_2,
"""roberta-large-openai-detector""": 5_1_2,
}
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : List[Any] = VOCAB_FILES_NAMES
__A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__A : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : List[str] = ['''input_ids''', '''attention_mask''']
__A : Tuple = RobertaTokenizer
def __init__( self , lowercase=None , lowercase=None , lowercase=None , lowercase="replace" , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=False , lowercase=True , **lowercase , ) -> int:
'''simple docstring'''
super().__init__(
lowercase , lowercase , tokenizer_file=lowercase , errors=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase , **lowercase , )
a__ : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space' , lowercase) != add_prefix_space:
a__ : Dict = getattr(lowercase , pre_tok_state.pop('type'))
a__ : Optional[int] = add_prefix_space
a__ : Optional[int] = pre_tok_class(**lowercase)
a__ : List[Any] = add_prefix_space
a__ : Dict = 'post_processor'
a__ : Union[str, Any] = getattr(self.backend_tokenizer , lowercase , lowercase)
if tokenizer_component_instance:
a__ : Optional[Any] = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
a__ : List[str] = tuple(state['sep'])
if "cls" in state:
a__ : Any = tuple(state['cls'])
a__ : Union[str, Any] = False
if state.get('add_prefix_space' , lowercase) != add_prefix_space:
a__ : int = add_prefix_space
a__ : Dict = True
if state.get('trim_offsets' , lowercase) != trim_offsets:
a__ : List[str] = trim_offsets
a__ : List[str] = True
if changes_to_apply:
a__ : Any = getattr(lowercase , state.pop('type'))
a__ : str = component_class(**lowercase)
setattr(self.backend_tokenizer , lowercase , lowercase)
@property
def __lowercase ( self) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
@mask_token.setter
def __lowercase ( self , lowercase) -> Dict:
'''simple docstring'''
a__ : Tuple = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase) if isinstance(lowercase , lowercase) else value
a__ : List[str] = value
def __lowercase ( self , *lowercase , **lowercase) -> BatchEncoding:
'''simple docstring'''
a__ : Any = kwargs.get('is_split_into_words' , lowercase)
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowercase , **lowercase)
def __lowercase ( self , *lowercase , **lowercase) -> BatchEncoding:
'''simple docstring'''
a__ : Dict = kwargs.get('is_split_into_words' , lowercase)
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowercase , **lowercase)
def __lowercase ( self , lowercase , lowercase = None) -> Tuple[str]:
'''simple docstring'''
a__ : int = self._tokenizer.model.save(lowercase , name=lowercase)
return tuple(lowercase)
def __lowercase ( self , lowercase , lowercase=None) -> Optional[Any]:
'''simple docstring'''
a__ : int = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowercase ( self , lowercase , lowercase = None) -> List[int]:
'''simple docstring'''
a__ : Union[str, Any] = [self.sep_token_id]
a__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 302
| 1
|
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
a = {
"iou_prediction_head.layers.0": "iou_prediction_head.proj_in",
"iou_prediction_head.layers.1": "iou_prediction_head.layers.0",
"iou_prediction_head.layers.2": "iou_prediction_head.proj_out",
"mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1",
"mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm",
"mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2",
"mask_downscaling.0": "mask_embed.conv1",
"mask_downscaling.1": "mask_embed.layer_norm1",
"mask_downscaling.3": "mask_embed.conv2",
"mask_downscaling.4": "mask_embed.layer_norm2",
"mask_downscaling.6": "mask_embed.conv3",
"point_embeddings": "point_embed",
"pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding",
"image_encoder": "vision_encoder",
"neck.0": "neck.conv1",
"neck.1": "neck.layer_norm1",
"neck.2": "neck.conv2",
"neck.3": "neck.layer_norm2",
"patch_embed.proj": "patch_embed.projection",
".norm": ".layer_norm",
"blocks": "layers",
}
def _SCREAMING_SNAKE_CASE ( snake_case ) -> Union[str, Any]:
_UpperCAmelCase = {}
state_dict.pop("""pixel_mean""" , snake_case )
state_dict.pop("""pixel_std""" , snake_case )
_UpperCAmelCase = R""".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"""
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_UpperCAmelCase = key.replace(snake_case , snake_case )
if re.match(snake_case , snake_case ):
_UpperCAmelCase = int(re.match(snake_case , snake_case ).group(2 ) )
if layer_nb == 0:
_UpperCAmelCase = key.replace("""layers.0""" , """proj_in""" )
elif layer_nb == 1:
_UpperCAmelCase = key.replace("""layers.1""" , """layers.0""" )
elif layer_nb == 2:
_UpperCAmelCase = key.replace("""layers.2""" , """proj_out""" )
_UpperCAmelCase = value
_UpperCAmelCase = model_state_dict[
"""prompt_encoder.shared_embedding.positional_embedding"""
]
return model_state_dict
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , snake_case="ybelkada/segment-anything" ) -> Dict:
_UpperCAmelCase = hf_hub_download(snake_case , f"checkpoints/{model_name}.pth" )
if "sam_vit_b" in model_name:
_UpperCAmelCase = SamConfig()
elif "sam_vit_l" in model_name:
_UpperCAmelCase = SamVisionConfig(
hidden_size=1_0_2_4 , num_hidden_layers=2_4 , num_attention_heads=1_6 , global_attn_indexes=[5, 1_1, 1_7, 2_3] , )
_UpperCAmelCase = SamConfig(
vision_config=snake_case , )
elif "sam_vit_h" in model_name:
_UpperCAmelCase = SamVisionConfig(
hidden_size=1_2_8_0 , num_hidden_layers=3_2 , num_attention_heads=1_6 , global_attn_indexes=[7, 1_5, 2_3, 3_1] , )
_UpperCAmelCase = SamConfig(
vision_config=snake_case , )
_UpperCAmelCase = torch.load(snake_case , map_location="""cpu""" )
_UpperCAmelCase = replace_keys(snake_case )
_UpperCAmelCase = SamImageProcessor()
_UpperCAmelCase = SamProcessor(image_processor=snake_case )
_UpperCAmelCase = SamModel(snake_case )
hf_model.load_state_dict(snake_case )
_UpperCAmelCase = hf_model.to("""cuda""" )
_UpperCAmelCase = """https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"""
_UpperCAmelCase = Image.open(requests.get(snake_case , stream=snake_case ).raw ).convert("""RGB""" )
_UpperCAmelCase = [[[4_0_0, 6_5_0]]]
_UpperCAmelCase = [[1]]
_UpperCAmelCase = processor(images=np.array(snake_case ) , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
_UpperCAmelCase = hf_model(**snake_case )
_UpperCAmelCase = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579890251159668
_UpperCAmelCase = processor(
images=np.array(snake_case ) , input_points=snake_case , input_labels=snake_case , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
_UpperCAmelCase = hf_model(**snake_case )
_UpperCAmelCase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9712603092193604
_UpperCAmelCase = ((7_5, 2_7_5, 1_7_2_5, 8_5_0),)
_UpperCAmelCase = processor(images=np.array(snake_case ) , input_boxes=snake_case , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
_UpperCAmelCase = hf_model(**snake_case )
_UpperCAmelCase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8686015605926514
# Test with 2 points and 1 image.
_UpperCAmelCase = [[[4_0_0, 6_5_0], [8_0_0, 6_5_0]]]
_UpperCAmelCase = [[1, 1]]
_UpperCAmelCase = processor(
images=np.array(snake_case ) , input_points=snake_case , input_labels=snake_case , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
_UpperCAmelCase = hf_model(**snake_case )
_UpperCAmelCase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9936047792434692
if __name__ == "__main__":
a = argparse.ArgumentParser()
a = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"]
parser.add_argument(
"--model_name",
default="sam_vit_h_4b8939",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
parser.add_argument(
"--model_hub_id",
default="ybelkada/segment-anything",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
a = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 175
|
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
a = logging.get_logger(__name__)
class _A :
def __init__( self , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ):
if not conversation_id:
_UpperCAmelCase = uuid.uuida()
if past_user_inputs is None:
_UpperCAmelCase = []
if generated_responses is None:
_UpperCAmelCase = []
_UpperCAmelCase = conversation_id
_UpperCAmelCase = past_user_inputs
_UpperCAmelCase = generated_responses
_UpperCAmelCase = text
def __eq__( self , _SCREAMING_SNAKE_CASE ):
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ):
if self.new_user_input:
if overwrite:
logger.warning(
F"User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten "
F"with: \"{text}\"." )
_UpperCAmelCase = text
else:
logger.warning(
F"User input added while unprocessed input was existing: \"{self.new_user_input}\" new input "
F"ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input" )
else:
_UpperCAmelCase = text
def UpperCAmelCase ( self ):
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
_UpperCAmelCase = None
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
self.generated_responses.append(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ):
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ):
_UpperCAmelCase = F"Conversation id: {self.uuid} \n"
for is_user, text in self.iter_texts():
_UpperCAmelCase = """user""" if is_user else """bot"""
output += F"{name} >> {text} \n"
return output
@add_end_docstrings(
__lowercase , R"""
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
""" , )
class _A ( __lowercase ):
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if self.tokenizer.pad_token_id is None:
_UpperCAmelCase = self.tokenizer.eos_token
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = {}
_UpperCAmelCase = {}
_UpperCAmelCase = {}
if min_length_for_response is not None:
_UpperCAmelCase = min_length_for_response
if minimum_tokens is not None:
_UpperCAmelCase = minimum_tokens
if "max_length" in generate_kwargs:
_UpperCAmelCase = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
_UpperCAmelCase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(_SCREAMING_SNAKE_CASE )
return preprocess_params, forward_params, postprocess_params
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 , **_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = super().__call__(_SCREAMING_SNAKE_CASE , num_workers=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(_SCREAMING_SNAKE_CASE ) == 1:
return outputs[0]
return outputs
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=32 ):
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
F"Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. "
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer , """_build_conversation_input_ids""" ):
_UpperCAmelCase = self.tokenizer._build_conversation_input_ids(_SCREAMING_SNAKE_CASE )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
_UpperCAmelCase = self._legacy_parse_and_tokenize(_SCREAMING_SNAKE_CASE )
if self.framework == "pt":
_UpperCAmelCase = torch.LongTensor([input_ids] )
elif self.framework == "tf":
_UpperCAmelCase = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=10 , **_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = generate_kwargs.get("""max_length""" , self.model.config.max_length )
_UpperCAmelCase = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F"Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})" )
_UpperCAmelCase = max_length - minimum_tokens
_UpperCAmelCase = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
_UpperCAmelCase = model_inputs["""attention_mask"""][:, -trim:]
_UpperCAmelCase = model_inputs.pop("""conversation""" )
_UpperCAmelCase = max_length
_UpperCAmelCase = self.model.generate(**_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if self.model.config.is_encoder_decoder:
_UpperCAmelCase = 1
else:
_UpperCAmelCase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True ):
_UpperCAmelCase = model_outputs["""output_ids"""]
_UpperCAmelCase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(_SCREAMING_SNAKE_CASE )
return conversation
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = self.tokenizer.eos_token_id
_UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) )
if len(_SCREAMING_SNAKE_CASE ) > self.tokenizer.model_max_length:
_UpperCAmelCase = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 175
| 1
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : Optional[Any] = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class __magic_name__ ( A__ ):
lowercase : Optional[int] ='''git_vision_model'''
def __init__( self : Optional[Any] , UpperCamelCase__ : List[Any]=7_68 , UpperCamelCase__ : Any=30_72 , UpperCamelCase__ : str=12 , UpperCamelCase__ : int=12 , UpperCamelCase__ : List[str]=3 , UpperCamelCase__ : str=2_24 , UpperCamelCase__ : Optional[Any]=16 , UpperCamelCase__ : Optional[Any]="quick_gelu" , UpperCamelCase__ : List[Any]=1e-5 , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : Dict=0.02 , **UpperCamelCase__ : List[str] , ) -> str:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
UpperCAmelCase = hidden_size
UpperCAmelCase = intermediate_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = num_channels
UpperCAmelCase = patch_size
UpperCAmelCase = image_size
UpperCAmelCase = initializer_range
UpperCAmelCase = attention_dropout
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = hidden_act
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : str , UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Union[str, Any] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase__ )
UpperCAmelCase = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type" ) == "git":
UpperCAmelCase = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
class __magic_name__ ( A__ ):
lowercase : str ='''git'''
def __init__( self : str , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Any=3_05_22 , UpperCamelCase__ : Tuple=7_68 , UpperCamelCase__ : Union[str, Any]=6 , UpperCamelCase__ : List[str]=12 , UpperCamelCase__ : Union[str, Any]=30_72 , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : str=10_24 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : List[str]=1e-1_2 , UpperCamelCase__ : Dict=0 , UpperCamelCase__ : Union[str, Any]="absolute" , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : str=1_01 , UpperCamelCase__ : Tuple=1_02 , UpperCamelCase__ : Optional[int]=None , **UpperCamelCase__ : Any , ) -> Tuple:
'''simple docstring'''
super().__init__(bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
if vision_config is None:
UpperCAmelCase = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values." )
UpperCAmelCase = GitVisionConfig(**UpperCamelCase__ )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = position_embedding_type
UpperCAmelCase = use_cache
UpperCAmelCase = tie_word_embeddings
UpperCAmelCase = num_image_with_embedding
UpperCAmelCase = bos_token_id
UpperCAmelCase = eos_token_id
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> List[str]:
'''simple docstring'''
UpperCAmelCase = copy.deepcopy(self.__dict__ )
UpperCAmelCase = self.vision_config.to_dict()
UpperCAmelCase = self.__class__.model_type
return output
| 323
|
'''simple docstring'''
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( lowerCAmelCase ,unittest.TestCase ):
UpperCAmelCase =CodeGenTokenizer
UpperCAmelCase =CodeGenTokenizerFast
UpperCAmelCase =True
UpperCAmelCase ={"add_prefix_space": True}
UpperCAmelCase =False
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCAmelCase : Optional[Any] =[
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
_UpperCAmelCase : Dict =dict(zip(snake_case , range(len(snake_case))))
_UpperCAmelCase : List[Any] =['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_UpperCAmelCase : List[Any] ={'unk_token': '<unk>'}
_UpperCAmelCase : List[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
_UpperCAmelCase : List[str] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(snake_case) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(snake_case))
def lowerCAmelCase ( self , **snake_case) -> str:
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **snake_case)
def lowerCAmelCase ( self , **snake_case) -> Optional[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **snake_case)
def lowerCAmelCase ( self , snake_case) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] ='lower newer'
_UpperCAmelCase : int ='lower newer'
return input_text, output_text
def lowerCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCAmelCase : Optional[int] =CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
_UpperCAmelCase : str ='lower newer'
_UpperCAmelCase : Union[str, Any] =['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
_UpperCAmelCase : Any =tokenizer.tokenize(snake_case , add_prefix_space=snake_case)
self.assertListEqual(snake_case , snake_case)
_UpperCAmelCase : Any =tokens + [tokenizer.unk_token]
_UpperCAmelCase : List[str] =[1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case) , snake_case)
def lowerCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_UpperCAmelCase : Tuple =self.get_tokenizer()
_UpperCAmelCase : Dict =self.get_rust_tokenizer(add_prefix_space=snake_case)
_UpperCAmelCase : Union[str, Any] ='lower newer'
# Testing tokenization
_UpperCAmelCase : List[str] =tokenizer.tokenize(snake_case , add_prefix_space=snake_case)
_UpperCAmelCase : Optional[int] =rust_tokenizer.tokenize(snake_case)
self.assertListEqual(snake_case , snake_case)
# Testing conversion to ids without special tokens
_UpperCAmelCase : str =tokenizer.encode(snake_case , add_special_tokens=snake_case , add_prefix_space=snake_case)
_UpperCAmelCase : Optional[int] =rust_tokenizer.encode(snake_case , add_special_tokens=snake_case)
self.assertListEqual(snake_case , snake_case)
# Testing conversion to ids with special tokens
_UpperCAmelCase : Dict =self.get_rust_tokenizer(add_prefix_space=snake_case)
_UpperCAmelCase : Tuple =tokenizer.encode(snake_case , add_prefix_space=snake_case)
_UpperCAmelCase : int =rust_tokenizer.encode(snake_case)
self.assertListEqual(snake_case , snake_case)
# Testing the unknown token
_UpperCAmelCase : List[str] =tokens + [rust_tokenizer.unk_token]
_UpperCAmelCase : Union[str, Any] =[1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(snake_case) , snake_case)
def lowerCAmelCase ( self , *snake_case , **snake_case) -> Any:
'''simple docstring'''
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def lowerCAmelCase ( self , snake_case=1_5) -> Optional[int]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
_UpperCAmelCase : Optional[Any] =self.rust_tokenizer_class.from_pretrained(snake_case , **snake_case)
# Simple input
_UpperCAmelCase : List[str] ='This is a simple input'
_UpperCAmelCase : Optional[Any] =['This is a simple input 1', 'This is a simple input 2']
_UpperCAmelCase : int =('This is a simple input', 'This is a pair')
_UpperCAmelCase : Union[str, Any] =[
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length')
# Simple input
self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length')
# Simple input
self.assertRaises(
snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , )
# Pair input
self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length')
# Pair input
self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length')
# Pair input
self.assertRaises(
snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , )
def lowerCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Any =CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>')
# Simple input
_UpperCAmelCase : List[Any] ='This is a simple input'
_UpperCAmelCase : Any =['This is a simple input looooooooong', 'This is a simple input']
_UpperCAmelCase : List[str] =('This is a simple input', 'This is a pair')
_UpperCAmelCase : Optional[int] =[
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
_UpperCAmelCase : List[Any] =tokenizer.pad_token_id
_UpperCAmelCase : Union[str, Any] =tokenizer(snake_case , padding='max_length' , max_length=3_0 , return_tensors='np')
_UpperCAmelCase : List[Any] =tokenizer(snake_case , padding=snake_case , truncate=snake_case , return_tensors='np')
_UpperCAmelCase : Optional[Any] =tokenizer(*snake_case , padding='max_length' , max_length=6_0 , return_tensors='np')
_UpperCAmelCase : List[Any] =tokenizer(snake_case , padding=snake_case , truncate=snake_case , return_tensors='np')
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 3_0)
self.assertTrue(pad_token_id in out_s['input_ids'])
self.assertTrue(0 in out_s['attention_mask'])
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 3_3)
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0])
self.assertFalse(0 in out_sa['attention_mask'][0])
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1])
self.assertTrue(0 in out_sa['attention_mask'][1])
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 6_0)
self.assertTrue(pad_token_id in out_p['input_ids'])
self.assertTrue(0 in out_p['attention_mask'])
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 5_2)
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0])
self.assertFalse(0 in out_pa['attention_mask'][0])
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1])
self.assertTrue(0 in out_pa['attention_mask'][1])
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Any ='$$$'
_UpperCAmelCase : Any =CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=snake_case , add_bos_token=snake_case)
_UpperCAmelCase : Optional[Any] ='This is a simple input'
_UpperCAmelCase : Any =['This is a simple input 1', 'This is a simple input 2']
_UpperCAmelCase : int =tokenizer.bos_token_id
_UpperCAmelCase : Optional[Any] =tokenizer(snake_case)
_UpperCAmelCase : Tuple =tokenizer(snake_case)
self.assertEqual(out_s.input_ids[0] , snake_case)
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids))
_UpperCAmelCase : Optional[int] =tokenizer.decode(out_s.input_ids)
_UpperCAmelCase : Any =tokenizer.batch_decode(out_sa.input_ids)
self.assertEqual(decode_s.split()[0] , snake_case)
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa))
@slow
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =CodeGenTokenizer.from_pretrained('Salesforce/codegen-350M-mono')
_UpperCAmelCase : Optional[Any] ='\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'
_UpperCAmelCase : Tuple ='\nif len_a > len_b: result = a\nelse: result = b'
_UpperCAmelCase : Optional[int] =tokenizer.encode(snake_case)
_UpperCAmelCase : List[str] =['^#', re.escape('<|endoftext|>'), '^\'\'\'', '^"""', '\n\n\n']
_UpperCAmelCase : Dict =tokenizer.decode(snake_case , truncate_before_pattern=snake_case)
self.assertEqual(snake_case , snake_case)
def lowerCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
pass
| 446
| 0
|
'''simple docstring'''
import math
from datetime import datetime, timedelta
def __UpperCamelCase ( a : int ) ->datetime:
snake_case = year % 19
snake_case = year % 4
snake_case = year % 7
snake_case = math.floor(year / 100 )
snake_case = math.floor((13 + 8 * leap_day_inhibits) / 25 )
snake_case = leap_day_inhibits / 4
snake_case = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
snake_case = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
snake_case = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
snake_case = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(a , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(a , 4 , 18 )
else:
return datetime(a , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_994, 2_000, 2_010, 2_021, 2_023):
_lowercase = 'will be' if year > datetime.now().year else 'was'
print(f'Easter in {year} {tense} {gauss_easter(year)}')
| 707
|
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
def __init__( self , A__ , A__=13 , A__=30 , A__=2 , A__=3 , A__=True , A__=True , A__=32 , A__=2 , A__=4 , A__=37 , A__="gelu" , A__=0.1 , A__=0.1 , A__=10 , A__=0.0_2 , A__=3 , A__=None , ) -> List[Any]:
snake_case = parent
snake_case = batch_size
snake_case = image_size
snake_case = patch_size
snake_case = num_channels
snake_case = is_training
snake_case = use_labels
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = type_sequence_label_size
snake_case = initializer_range
snake_case = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case = (image_size // patch_size) ** 2
snake_case = num_patches + 1
def UpperCamelCase ( self ) -> int:
snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self ) -> int:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A__ , initializer_range=self.initializer_range , )
def UpperCamelCase ( self , A__ , A__ , A__ ) -> Union[str, Any]:
snake_case = TFViTModel(config=A__ )
snake_case = model(A__ , training=A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
snake_case = self.image_size // 2
snake_case = pixel_values[:, :, :image_size, :image_size]
snake_case = model(A__ , interpolate_pos_encoding=A__ , training=A__ )
snake_case = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def UpperCamelCase ( self , A__ , A__ , A__ ) -> Optional[int]:
snake_case = self.type_sequence_label_size
snake_case = TFViTForImageClassification(A__ )
snake_case = model(A__ , labels=A__ , training=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
snake_case = self.image_size // 2
snake_case = pixel_values[:, :, :image_size, :image_size]
snake_case = model(A__ , interpolate_pos_encoding=A__ , training=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case = 1
snake_case = TFViTForImageClassification(A__ )
snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase ( self ) -> Union[str, Any]:
snake_case = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case = config_and_inputs
snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class _lowercase ( __a , __a , unittest.TestCase ):
_UpperCAmelCase = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
_UpperCAmelCase = (
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def UpperCamelCase ( self ) -> List[Any]:
snake_case = TFViTModelTester(self )
snake_case = ConfigTester(self , config_class=A__ , has_text_modality=A__ , hidden_size=37 )
def UpperCamelCase ( self ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def UpperCamelCase ( self ) -> int:
pass
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def UpperCamelCase ( self ) -> str:
pass
def UpperCamelCase ( self ) -> Union[str, Any]:
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case = model_class(A__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A__ , tf.keras.layers.Layer ) )
def UpperCamelCase ( self ) -> List[Any]:
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case = model_class(A__ )
snake_case = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case = [*signature.parameters.keys()]
snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A__ )
def UpperCamelCase ( self ) -> Union[str, Any]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def UpperCamelCase ( self ) -> Optional[Any]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A__ )
@slow
def UpperCamelCase ( self ) -> Any:
snake_case = TFViTModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(A__ )
def __UpperCamelCase ( ) ->Any:
snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self ) -> Optional[int]:
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def UpperCamelCase ( self ) -> Dict:
snake_case = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' )
snake_case = self.default_image_processor
snake_case = prepare_img()
snake_case = image_processor(images=A__ , return_tensors='''tf''' )
# forward pass
snake_case = model(**A__ )
# verify the logits
snake_case = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , A__ )
snake_case = tf.constant([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] )
tf.debugging.assert_near(outputs.logits[0, :3] , A__ , atol=1e-4 )
| 44
| 0
|
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
_a : Union[str, Any] = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
_a : int = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def _a (lowercase__ : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
__snake_case = []
for i in range(len(lowercase__ ) ):
__snake_case = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
__snake_case = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(lowercase__ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(lowercase__ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(lowercase__ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
__snake_case = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(lowercase__ )
return next_generation
def _a (lowercase__ : list[list[int]] , lowercase__ : int ) -> list[Image.Image]:
"""simple docstring"""
__snake_case = []
for _ in range(lowercase__ ):
# Create output image
__snake_case = Image.new('RGB' , (len(cells[0] ), len(lowercase__ )) )
__snake_case = img.load()
# Save cells to image
for x in range(len(lowercase__ ) ):
for y in range(len(cells[0] ) ):
__snake_case = 2_5_5 - cells[y][x] * 2_5_5
__snake_case = (colour, colour, colour)
# Save image
images.append(lowercase__ )
__snake_case = new_generation(lowercase__ )
return images
if __name__ == "__main__":
_a : Union[str, Any] = generate_images(GLIDER, 16)
images[0].save("out.gif", save_all=True, append_images=images[1:])
| 56
|
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> str:
'''simple docstring'''
return "\n".join(
F'{number} * {i} = {number * i}' for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 472
| 0
|
"""simple docstring"""
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
__lowerCAmelCase : Dict =logging.get_logger(__name__)
__lowerCAmelCase : str ={"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
__lowerCAmelCase : Tuple ={
"""vocab_file""": {
"""allegro/herbert-base-cased""": """https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"""
},
"""merges_file""": {
"""allegro/herbert-base-cased""": """https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"""
},
}
__lowerCAmelCase : int ={"""allegro/herbert-base-cased""": 5_1_4}
__lowerCAmelCase : List[Any] ={}
class _A ( lowerCAmelCase ):
snake_case__ : Dict = VOCAB_FILES_NAMES
snake_case__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : Tuple = PRETRAINED_INIT_CONFIGURATION
snake_case__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : Union[str, Any] = HerbertTokenizer
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="<s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase="<mask>" , __lowerCAmelCase="</s>" , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
__lowerCAmelCase , __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , cls_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , **__lowerCAmelCase , )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
lowercase = [self.cls_token_id]
lowercase = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCAmelCase )) + [1]
return [1] + ([0] * len(__lowerCAmelCase )) + [1] + ([0] * len(__lowerCAmelCase )) + [1]
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
lowercase = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase )
return tuple(__lowerCAmelCase )
| 703
|
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _A ( lowerCAmelCase ):
snake_case__ : Union[str, Any] = (IPNDMScheduler,)
snake_case__ : List[str] = (('num_inference_steps', 50),)
def A__ ( self , **__lowerCAmelCase ):
"""simple docstring"""
lowercase = {"""num_train_timesteps""": 1000}
config.update(**__lowerCAmelCase )
return config
def A__ ( self , __lowerCAmelCase=0 , **__lowerCAmelCase ):
"""simple docstring"""
lowercase = dict(self.forward_default_kwargs )
lowercase = kwargs.pop("""num_inference_steps""" , __lowerCAmelCase )
lowercase = self.dummy_sample
lowercase = 0.1 * sample
lowercase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config(**__lowerCAmelCase )
lowercase = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals
lowercase = dummy_past_residuals[:]
if time_step is None:
lowercase = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCAmelCase )
lowercase = scheduler_class.from_pretrained(__lowerCAmelCase )
new_scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals
lowercase = dummy_past_residuals[:]
lowercase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
lowercase = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
lowercase = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def A__ ( self ):
"""simple docstring"""
pass
def A__ ( self , __lowerCAmelCase=0 , **__lowerCAmelCase ):
"""simple docstring"""
lowercase = dict(self.forward_default_kwargs )
lowercase = kwargs.pop("""num_inference_steps""" , __lowerCAmelCase )
lowercase = self.dummy_sample
lowercase = 0.1 * sample
lowercase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
lowercase = dummy_past_residuals[:]
if time_step is None:
lowercase = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCAmelCase )
lowercase = scheduler_class.from_pretrained(__lowerCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
lowercase = dummy_past_residuals[:]
lowercase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
lowercase = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
lowercase = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def A__ ( self , **__lowerCAmelCase ):
"""simple docstring"""
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(**__lowerCAmelCase )
lowercase = scheduler_class(**__lowerCAmelCase )
lowercase = 10
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
lowercase = model(__lowerCAmelCase , __lowerCAmelCase )
lowercase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
lowercase = model(__lowerCAmelCase , __lowerCAmelCase )
lowercase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample
return sample
def A__ ( self ):
"""simple docstring"""
lowercase = dict(self.forward_default_kwargs )
lowercase = kwargs.pop("""num_inference_steps""" , __lowerCAmelCase )
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**__lowerCAmelCase )
lowercase = self.dummy_sample
lowercase = 0.1 * sample
if num_inference_steps is not None and hasattr(__lowerCAmelCase , """set_timesteps""" ):
scheduler.set_timesteps(__lowerCAmelCase )
elif num_inference_steps is not None and not hasattr(__lowerCAmelCase , """set_timesteps""" ):
lowercase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
lowercase = dummy_past_residuals[:]
lowercase = scheduler.timesteps[5]
lowercase = scheduler.timesteps[6]
lowercase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
lowercase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
lowercase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
lowercase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def A__ ( self ):
"""simple docstring"""
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase , time_step=__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=__lowerCAmelCase , time_step=__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self.full_loop()
lowercase = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 254_0529 ) < 10
| 197
| 0
|
from __future__ import annotations
from typing import Generic, TypeVar
_snake_case = TypeVar("T")
class UpperCAmelCase_ ( Generic[T]):
def __init__( self, __a):
'''simple docstring'''
_lowerCAmelCase : Dict = data
_lowerCAmelCase : Union[str, Any] = self
_lowerCAmelCase : Union[str, Any] = 0
class UpperCAmelCase_ ( Generic[T]):
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : dict[T, DisjointSetTreeNode[T]] = {}
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = DisjointSetTreeNode(__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Dict = self.map[data]
if elem_ref != elem_ref.parent:
_lowerCAmelCase : Any = self.find_set(elem_ref.parent.data)
return elem_ref.parent
def snake_case__ ( self, __a, __a):
'''simple docstring'''
if nodea.rank > nodea.rank:
_lowerCAmelCase : List[Any] = nodea
else:
_lowerCAmelCase : Optional[int] = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def snake_case__ ( self, __a, __a):
'''simple docstring'''
self.link(self.find_set(__a), self.find_set(__a))
class UpperCAmelCase_ ( Generic[T]):
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : dict[T, dict[T, int]] = {}
def snake_case__ ( self, __a):
'''simple docstring'''
if node not in self.connections:
_lowerCAmelCase : Tuple = {}
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
self.add_node(__a)
self.add_node(__a)
_lowerCAmelCase : Dict = weight
_lowerCAmelCase : Optional[Any] = weight
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = []
_lowerCAmelCase : Any = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start))
edges.append((start, end, self.connections[start][end]))
edges.sort(key=lambda __a: x[2])
# creating the disjoint set
_lowerCAmelCase : List[Any] = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(__a)
# MST generation
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : Any = 0
_lowerCAmelCase : Union[str, Any] = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections) - 1:
_lowerCAmelCase : Optional[Any] = edges[index]
index += 1
_lowerCAmelCase : List[str] = disjoint_set.find_set(__a)
_lowerCAmelCase : Dict = disjoint_set.find_set(__a)
if parent_u != parent_v:
num_edges += 1
graph.add_edge(__a, __a, __a)
disjoint_set.union(__a, __a)
return graph
| 500
|
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : Dict = DownBlockaD # noqa F405
_UpperCAmelCase : List[Any] = "down"
def __lowerCamelCase ( self : Optional[Any] ) ->List[str]:
lowerCamelCase__ : List[str] = [-0.02_32, -0.98_69, 0.80_54, -0.06_37, -0.16_88, -1.42_64, 0.44_70, -1.33_94, 0.09_04]
super().test_output(A )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : Tuple = ResnetDownsampleBlockaD # noqa F405
_UpperCAmelCase : Dict = "down"
def __lowerCamelCase ( self : Optional[Any] ) ->int:
lowerCamelCase__ : int = [0.07_10, 0.24_10, -0.73_20, -1.07_57, -1.13_43, 0.35_40, -0.01_33, -0.25_76, 0.09_48]
super().test_output(A )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : str = AttnDownBlockaD # noqa F405
_UpperCAmelCase : Dict = "down"
def __lowerCamelCase ( self : Union[str, Any] ) ->List[str]:
lowerCamelCase__ : List[Any] = [0.06_36, 0.89_64, -0.62_34, -1.01_31, 0.08_44, 0.49_35, 0.34_37, 0.09_11, -0.29_57]
super().test_output(A )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : List[Any] = CrossAttnDownBlockaD # noqa F405
_UpperCAmelCase : int = "down"
def __lowerCamelCase ( self : List[str] ) ->int:
lowerCamelCase__ , lowerCamelCase__ : Any = super().prepare_init_args_and_inputs_for_common()
lowerCamelCase__ : Optional[Any] = 3_2
return init_dict, inputs_dict
def __lowerCamelCase ( self : Dict ) ->List[Any]:
lowerCamelCase__ : Optional[Any] = [0.22_38, -0.73_96, -0.22_55, -0.38_29, 0.19_25, 1.16_65, 0.06_03, -0.72_95, 0.19_83]
super().test_output(A )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : str = SimpleCrossAttnDownBlockaD # noqa F405
_UpperCAmelCase : Union[str, Any] = "down"
@property
def __lowerCamelCase ( self : Any ) ->Union[str, Any]:
return super().get_dummy_input(include_encoder_hidden_states=A )
def __lowerCamelCase ( self : Optional[Any] ) ->List[str]:
lowerCamelCase__ , lowerCamelCase__ : List[str] = super().prepare_init_args_and_inputs_for_common()
lowerCamelCase__ : Optional[int] = 3_2
return init_dict, inputs_dict
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def __lowerCamelCase ( self : Optional[Any] ) ->Tuple:
lowerCamelCase__ : Optional[Any] = [0.79_21, -0.09_92, -0.19_62, -0.76_95, -0.42_42, 0.78_04, 0.47_37, 0.27_65, 0.33_38]
super().test_output(A )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : Optional[int] = SkipDownBlockaD # noqa F405
_UpperCAmelCase : List[Any] = "down"
@property
def __lowerCamelCase ( self : Union[str, Any] ) ->List[str]:
return super().get_dummy_input(include_skip_sample=A )
def __lowerCamelCase ( self : str ) ->int:
lowerCamelCase__ : Optional[int] = [-0.08_45, -0.20_87, -0.24_65, 0.09_71, 0.19_00, -0.04_84, 0.26_64, 0.41_79, 0.50_69]
super().test_output(A )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : Any = AttnSkipDownBlockaD # noqa F405
_UpperCAmelCase : Any = "down"
@property
def __lowerCamelCase ( self : Optional[Any] ) ->Optional[Any]:
return super().get_dummy_input(include_skip_sample=A )
def __lowerCamelCase ( self : Any ) ->Dict:
lowerCamelCase__ : Any = [0.55_39, 0.16_09, 0.49_24, 0.05_37, -0.19_95, 0.40_50, 0.09_79, -0.27_21, -0.06_42]
super().test_output(A )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : Dict = DownEncoderBlockaD # noqa F405
_UpperCAmelCase : Dict = "down"
@property
def __lowerCamelCase ( self : List[Any] ) ->str:
return super().get_dummy_input(include_temb=A )
def __lowerCamelCase ( self : str ) ->Optional[Any]:
lowerCamelCase__ : List[Any] = {
'''in_channels''': 3_2,
'''out_channels''': 3_2,
}
lowerCamelCase__ : List[Any] = self.dummy_input
return init_dict, inputs_dict
def __lowerCamelCase ( self : Dict ) ->int:
lowerCamelCase__ : str = [1.11_02, 0.53_02, 0.48_72, -0.00_23, -0.80_42, 0.04_83, -0.34_89, -0.56_32, 0.76_26]
super().test_output(A )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : Tuple = AttnDownEncoderBlockaD # noqa F405
_UpperCAmelCase : str = "down"
@property
def __lowerCamelCase ( self : str ) ->List[Any]:
return super().get_dummy_input(include_temb=A )
def __lowerCamelCase ( self : Tuple ) ->List[str]:
lowerCamelCase__ : Optional[Any] = {
'''in_channels''': 3_2,
'''out_channels''': 3_2,
}
lowerCamelCase__ : List[Any] = self.dummy_input
return init_dict, inputs_dict
def __lowerCamelCase ( self : Optional[int] ) ->Union[str, Any]:
lowerCamelCase__ : Union[str, Any] = [0.89_66, -0.14_86, 0.85_68, 0.81_41, -0.90_46, -0.13_42, -0.09_72, -0.74_17, 0.15_38]
super().test_output(A )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : Union[str, Any] = UNetMidBlockaD # noqa F405
_UpperCAmelCase : Tuple = "mid"
def __lowerCamelCase ( self : Optional[int] ) ->Dict:
lowerCamelCase__ : Any = {
'''in_channels''': 3_2,
'''temb_channels''': 1_2_8,
}
lowerCamelCase__ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def __lowerCamelCase ( self : Tuple ) ->List[str]:
lowerCamelCase__ : Tuple = [-0.10_62, 1.72_48, 0.34_94, 1.45_69, -0.09_10, -1.24_21, -0.99_84, 0.67_36, 1.00_28]
super().test_output(A )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : Union[str, Any] = UNetMidBlockaDCrossAttn # noqa F405
_UpperCAmelCase : List[str] = "mid"
def __lowerCamelCase ( self : List[str] ) ->List[str]:
lowerCamelCase__ , lowerCamelCase__ : Any = super().prepare_init_args_and_inputs_for_common()
lowerCamelCase__ : List[str] = 3_2
return init_dict, inputs_dict
def __lowerCamelCase ( self : Dict ) ->Tuple:
lowerCamelCase__ : int = [0.01_87, 2.42_20, 0.44_84, 1.12_03, -0.61_21, -1.51_22, -0.82_70, 0.78_51, 1.83_35]
super().test_output(A )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : Optional[Any] = UNetMidBlockaDSimpleCrossAttn # noqa F405
_UpperCAmelCase : int = "mid"
@property
def __lowerCamelCase ( self : List[str] ) ->List[str]:
return super().get_dummy_input(include_encoder_hidden_states=A )
def __lowerCamelCase ( self : str ) ->Any:
lowerCamelCase__ , lowerCamelCase__ : Any = super().prepare_init_args_and_inputs_for_common()
lowerCamelCase__ : Optional[int] = 3_2
return init_dict, inputs_dict
def __lowerCamelCase ( self : List[Any] ) ->Optional[Any]:
lowerCamelCase__ : List[Any] = [0.71_43, 1.99_74, 0.54_48, 1.39_77, 0.12_82, -1.12_37, -1.42_38, 0.55_30, 0.88_80]
super().test_output(A )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : Tuple = UpBlockaD # noqa F405
_UpperCAmelCase : Any = "up"
@property
def __lowerCamelCase ( self : Union[str, Any] ) ->Optional[int]:
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def __lowerCamelCase ( self : List[Any] ) ->List[Any]:
lowerCamelCase__ : Optional[Any] = [-0.20_41, -0.41_65, -0.30_22, 0.00_41, -0.66_28, -0.70_53, 0.19_28, -0.03_25, 0.05_23]
super().test_output(A )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : Union[str, Any] = ResnetUpsampleBlockaD # noqa F405
_UpperCAmelCase : Optional[Any] = "up"
@property
def __lowerCamelCase ( self : Union[str, Any] ) ->List[Any]:
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def __lowerCamelCase ( self : List[Any] ) ->List[str]:
lowerCamelCase__ : List[Any] = [0.22_87, 0.35_49, -0.13_46, 0.47_97, -0.17_15, -0.96_49, 0.73_05, -0.58_64, -0.62_44]
super().test_output(A )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : List[Any] = CrossAttnUpBlockaD # noqa F405
_UpperCAmelCase : Optional[Any] = "up"
@property
def __lowerCamelCase ( self : Union[str, Any] ) ->Dict:
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def __lowerCamelCase ( self : Optional[int] ) ->Any:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = super().prepare_init_args_and_inputs_for_common()
lowerCamelCase__ : Tuple = 3_2
return init_dict, inputs_dict
def __lowerCamelCase ( self : Optional[int] ) ->Optional[Any]:
lowerCamelCase__ : Dict = [-0.14_03, -0.35_15, -0.04_20, -0.14_25, 0.31_67, 0.50_94, -0.21_81, 0.59_31, 0.55_82]
super().test_output(A )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : Optional[Any] = SimpleCrossAttnUpBlockaD # noqa F405
_UpperCAmelCase : Any = "up"
@property
def __lowerCamelCase ( self : Tuple ) ->int:
return super().get_dummy_input(include_res_hidden_states_tuple=A , include_encoder_hidden_states=A )
def __lowerCamelCase ( self : Any ) ->List[str]:
lowerCamelCase__ , lowerCamelCase__ : Any = super().prepare_init_args_and_inputs_for_common()
lowerCamelCase__ : Tuple = 3_2
return init_dict, inputs_dict
def __lowerCamelCase ( self : Tuple ) ->List[str]:
lowerCamelCase__ : str = [0.26_45, 0.14_80, 0.09_09, 0.80_44, -0.97_58, -0.90_83, 0.09_94, -1.14_53, -0.74_02]
super().test_output(A )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : List[Any] = AttnUpBlockaD # noqa F405
_UpperCAmelCase : Dict = "up"
@property
def __lowerCamelCase ( self : Optional[int] ) ->int:
return super().get_dummy_input(include_res_hidden_states_tuple=A )
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def __lowerCamelCase ( self : Optional[int] ) ->List[str]:
lowerCamelCase__ : Optional[int] = [0.09_79, 0.13_26, 0.00_21, 0.06_59, 0.22_49, 0.00_59, 0.11_32, 0.59_52, 0.10_33]
super().test_output(A )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : str = SkipUpBlockaD # noqa F405
_UpperCAmelCase : List[Any] = "up"
@property
def __lowerCamelCase ( self : Dict ) ->str:
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def __lowerCamelCase ( self : List[str] ) ->Optional[Any]:
lowerCamelCase__ : List[str] = [-0.08_93, -0.12_34, -0.15_06, -0.03_32, 0.01_23, -0.02_11, 0.05_66, 0.01_43, 0.03_62]
super().test_output(A )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : int = AttnSkipUpBlockaD # noqa F405
_UpperCAmelCase : Optional[Any] = "up"
@property
def __lowerCamelCase ( self : List[str] ) ->str:
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def __lowerCamelCase ( self : str ) ->Tuple:
lowerCamelCase__ : str = [0.03_61, 0.06_17, 0.27_87, -0.03_50, 0.03_42, 0.34_21, -0.08_43, 0.09_13, 0.30_15]
super().test_output(A )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : Dict = UpDecoderBlockaD # noqa F405
_UpperCAmelCase : Tuple = "up"
@property
def __lowerCamelCase ( self : int ) ->int:
return super().get_dummy_input(include_temb=A )
def __lowerCamelCase ( self : int ) ->Any:
lowerCamelCase__ : List[str] = {'''in_channels''': 3_2, '''out_channels''': 3_2}
lowerCamelCase__ : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def __lowerCamelCase ( self : List[Any] ) ->Union[str, Any]:
lowerCamelCase__ : Union[str, Any] = [0.44_04, 0.19_98, -0.98_86, -0.33_20, -0.31_28, -0.70_34, -0.69_55, -0.23_38, -0.31_37]
super().test_output(A )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : str = AttnUpDecoderBlockaD # noqa F405
_UpperCAmelCase : Union[str, Any] = "up"
@property
def __lowerCamelCase ( self : Dict ) ->Union[str, Any]:
return super().get_dummy_input(include_temb=A )
def __lowerCamelCase ( self : Any ) ->int:
lowerCamelCase__ : Optional[int] = {'''in_channels''': 3_2, '''out_channels''': 3_2}
lowerCamelCase__ : List[Any] = self.dummy_input
return init_dict, inputs_dict
def __lowerCamelCase ( self : Optional[Any] ) ->int:
lowerCamelCase__ : Tuple = [0.67_38, 0.44_91, 0.10_55, 1.07_10, 0.73_16, 0.33_39, 0.33_52, 0.10_23, 0.35_68]
super().test_output(A )
| 315
| 0
|
from abc import ABC, abstractmethod
from typing import List, Optional
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
def __init__( self : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
self.test()
def UpperCAmelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
lowercase__ = 0
lowercase__ = False
while not completed:
if counter == 1:
self.reset()
lowercase__ = self.advance()
if not self.does_advance(lowerCAmelCase):
raise Exception(
'Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.')
lowercase__, lowercase__, lowercase__ = self.update(lowerCAmelCase)
counter += 1
if counter > 1_00_00:
raise Exception('update() does not fulfill the constraint.')
if self.remaining() != 0:
raise Exception('Custom Constraint is not defined correctly.')
@abstractmethod
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''')
@abstractmethod
def UpperCAmelCase ( self : Any , lowerCAmelCase : int) -> List[str]:
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''')
@abstractmethod
def UpperCAmelCase ( self : int , lowerCAmelCase : int) -> Optional[int]:
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''')
@abstractmethod
def UpperCAmelCase ( self : str) -> List[Any]:
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''')
@abstractmethod
def UpperCAmelCase ( self : str) -> Any:
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''')
@abstractmethod
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : Optional[Any]=False) -> int:
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''')
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
def __init__( self : Dict , lowerCAmelCase : List[int]) -> Union[str, Any]:
"""simple docstring"""
super(lowerCAmelCase , self).__init__()
if not isinstance(lowerCAmelCase , lowerCAmelCase) or len(lowerCAmelCase) == 0:
raise ValueError(f'''`token_ids` has to be a non-empty list, but is {token_ids}.''')
if any((not isinstance(lowerCAmelCase , lowerCAmelCase) or token_id < 0) for token_id in token_ids):
raise ValueError(f'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''')
lowercase__ = token_ids
lowercase__ = len(self.token_ids)
lowercase__ = -1 # the index of the currently fulfilled step
lowercase__ = False
def UpperCAmelCase ( self : Tuple) -> Any:
"""simple docstring"""
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : int) -> int:
"""simple docstring"""
if not isinstance(lowerCAmelCase , lowerCAmelCase):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(lowerCAmelCase)}''')
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def UpperCAmelCase ( self : Any , lowerCAmelCase : int) -> Optional[int]:
"""simple docstring"""
if not isinstance(lowerCAmelCase , lowerCAmelCase):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(lowerCAmelCase)}''')
lowercase__ = False
lowercase__ = False
lowercase__ = False
if self.does_advance(lowerCAmelCase):
self.fulfilled_idx += 1
lowercase__ = True
if self.fulfilled_idx == (self.seqlen - 1):
lowercase__ = True
lowercase__ = completed
else:
# failed to make progress.
lowercase__ = True
self.reset()
return stepped, completed, reset
def UpperCAmelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
lowercase__ = False
lowercase__ = 0
def UpperCAmelCase ( self : Any) -> str:
"""simple docstring"""
return self.seqlen - (self.fulfilled_idx + 1)
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : List[str]=False) -> Any:
"""simple docstring"""
lowercase__ = PhrasalConstraint(self.token_ids)
if stateful:
lowercase__ = self.seqlen
lowercase__ = self.fulfilled_idx
lowercase__ = self.completed
return new_constraint
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase : List[List[int]] , lowerCAmelCase : Any=True) -> Tuple:
"""simple docstring"""
lowercase__ = max([len(lowerCAmelCase) for one in nested_token_ids])
lowercase__ = {}
for token_ids in nested_token_ids:
lowercase__ = root
for tidx, token_id in enumerate(lowerCAmelCase):
if token_id not in level:
lowercase__ = {}
lowercase__ = level[token_id]
if no_subsets and self.has_subsets(lowerCAmelCase , lowerCAmelCase):
raise ValueError(
'Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'
f''' {nested_token_ids}.''')
lowercase__ = root
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : int) -> List[str]:
"""simple docstring"""
lowercase__ = self.trie
for current_token in current_seq:
lowercase__ = start[current_token]
lowercase__ = list(start.keys())
return next_tokens
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : int) -> int:
"""simple docstring"""
lowercase__ = self.next_tokens(lowerCAmelCase)
return len(lowerCAmelCase) == 0
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : Dict) -> Any:
"""simple docstring"""
lowercase__ = list(root.values())
if len(lowerCAmelCase) == 0:
return 1
else:
return sum([self.count_leaves(lowerCAmelCase) for nn in next_nodes])
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : str , lowerCAmelCase : List[str]) -> Dict:
"""simple docstring"""
lowercase__ = self.count_leaves(lowerCAmelCase)
return len(lowerCAmelCase) != leaf_count
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
def __init__( self : Optional[int] , lowerCAmelCase : List[List[int]]) -> int:
"""simple docstring"""
super(lowerCAmelCase , self).__init__()
if not isinstance(lowerCAmelCase , lowerCAmelCase) or len(lowerCAmelCase) == 0:
raise ValueError(f'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''')
if any(not isinstance(lowerCAmelCase , lowerCAmelCase) for token_ids in nested_token_ids):
raise ValueError(f'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''')
if any(
any((not isinstance(lowerCAmelCase , lowerCAmelCase) or token_id < 0) for token_id in token_ids)
for token_ids in nested_token_ids):
raise ValueError(
f'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''')
lowercase__ = DisjunctiveTrie(lowerCAmelCase)
lowercase__ = nested_token_ids
lowercase__ = self.trie.max_height
lowercase__ = []
lowercase__ = False
def UpperCAmelCase ( self : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.trie.next_tokens(self.current_seq)
if len(lowerCAmelCase) == 0:
return None
else:
return token_list
def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : int) -> List[Any]:
"""simple docstring"""
if not isinstance(lowerCAmelCase , lowerCAmelCase):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(lowerCAmelCase)}''')
lowercase__ = self.trie.next_tokens(self.current_seq)
return token_id in next_tokens
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : int) -> int:
"""simple docstring"""
if not isinstance(lowerCAmelCase , lowerCAmelCase):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(lowerCAmelCase)}''')
lowercase__ = False
lowercase__ = False
lowercase__ = False
if self.does_advance(lowerCAmelCase):
self.current_seq.append(lowerCAmelCase)
lowercase__ = True
else:
lowercase__ = True
self.reset()
lowercase__ = self.trie.reached_leaf(self.current_seq)
lowercase__ = completed
return stepped, completed, reset
def UpperCAmelCase ( self : str) -> Optional[Any]:
"""simple docstring"""
lowercase__ = False
lowercase__ = []
def UpperCAmelCase ( self : int) -> Dict:
"""simple docstring"""
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq)
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : Union[str, Any]=False) -> str:
"""simple docstring"""
lowercase__ = DisjunctiveConstraint(self.token_ids)
if stateful:
lowercase__ = self.seqlen
lowercase__ = self.current_seq
lowercase__ = self.completed
return new_constraint
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : int , lowerCAmelCase : List[Constraint]) -> Tuple:
"""simple docstring"""
lowercase__ = constraints
# max # of steps required to fulfill a given constraint
lowercase__ = max([c.seqlen for c in constraints])
lowercase__ = len(lowerCAmelCase)
lowercase__ = False
self.init_state()
def UpperCAmelCase ( self : str) -> Tuple:
"""simple docstring"""
lowercase__ = []
lowercase__ = None
lowercase__ = [constraint.copy(stateful=lowerCAmelCase) for constraint in self.constraints]
def UpperCAmelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
lowercase__ = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints) * self.max_seqlen) + add
def UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
lowercase__ = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
lowercase__ = constraint.advance()
if isinstance(lowerCAmelCase , lowerCAmelCase):
token_list.append(lowerCAmelCase)
elif isinstance(lowerCAmelCase , lowerCAmelCase):
token_list.extend(lowerCAmelCase)
else:
lowercase__ = self.inprogress_constraint.advance()
if isinstance(lowerCAmelCase , lowerCAmelCase):
token_list.append(lowerCAmelCase)
elif isinstance(lowerCAmelCase , lowerCAmelCase):
token_list.extend(lowerCAmelCase)
if len(lowerCAmelCase) == 0:
return None
else:
return token_list
def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : Optional[List[int]]) -> int:
"""simple docstring"""
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
lowercase__, lowercase__ = self.add(lowerCAmelCase)
# the entire list of constraints are fulfilled
if self.completed:
break
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : int) -> Dict:
"""simple docstring"""
if not isinstance(lowerCAmelCase , lowerCAmelCase):
raise ValueError(f'''`token_id` should be an `int`, but is `{token_id}`.''')
lowercase__, lowercase__ = False, False
if self.completed:
lowercase__ = True
lowercase__ = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
lowercase__, lowercase__, lowercase__ = self.inprogress_constraint.update(lowerCAmelCase)
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=lowerCAmelCase))
lowercase__ = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint)
lowercase__ = None
if len(self.pending_constraints) == 0:
# we're done!
lowercase__ = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints):
if pending_constraint.does_advance(lowerCAmelCase):
lowercase__, lowercase__, lowercase__ = pending_constraint.update(lowerCAmelCase)
if not stepped:
raise Exception(
'`constraint.update(token_id)` is not yielding incremental progress, '
'even though `constraint.does_advance(token_id)` is true.')
if complete:
self.complete_constraints.append(lowerCAmelCase)
lowercase__ = None
if not complete and stepped:
lowercase__ = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
lowercase__ = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
lowercase__ = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def UpperCAmelCase ( self : Dict , lowerCAmelCase : Dict=True) -> List[str]:
"""simple docstring"""
lowercase__ = ConstraintListState(self.constraints) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
lowercase__ = [
constraint.copy(stateful=lowerCAmelCase) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
lowercase__ = self.inprogress_constraint.copy(stateful=lowerCAmelCase)
lowercase__ = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 642
|
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : List[Any] = None
A : Optional[int] = None
@property
def UpperCAmelCase ( self : str) -> Union[str, Any]:
"""simple docstring"""
return self.feat_extract_tester.prepare_feat_extract_dict()
def UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
self.assertTrue(hasattr(lowerCAmelCase , 'feature_size'))
self.assertTrue(hasattr(lowerCAmelCase , 'sampling_rate'))
self.assertTrue(hasattr(lowerCAmelCase , 'padding_value'))
def UpperCAmelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
self.assertTrue(all(len(lowerCAmelCase) == len(lowerCAmelCase) for x, y in zip(lowerCAmelCase , processed_features[input_name])))
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase)
lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='np')
lowercase__ = processed_features[input_name]
if len(batch_features_input.shape) < 3:
lowercase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
@require_torch
def UpperCAmelCase ( self : Dict) -> int:
"""simple docstring"""
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase)
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='pt')
lowercase__ = processed_features[input_name]
if len(batch_features_input.shape) < 3:
lowercase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
@require_tf
def UpperCAmelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase)
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='tf')
lowercase__ = processed_features[input_name]
if len(batch_features_input.shape) < 3:
lowercase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
def UpperCAmelCase ( self : str , lowerCAmelCase : str=False) -> Union[str, Any]:
"""simple docstring"""
def _inputs_have_equal_length(lowerCAmelCase : int):
lowercase__ = len(input[0])
for input_slice in input[1:]:
if len(lowerCAmelCase) != length:
return False
return True
def _inputs_are_equal(lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple):
if len(lowerCAmelCase) != len(lowerCAmelCase):
return False
for input_slice_a, input_slice_a in zip(lowerCAmelCase , lowerCAmelCase):
if not np.allclose(np.asarray(lowerCAmelCase) , np.asarray(lowerCAmelCase) , atol=1E-3):
return False
return True
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCAmelCase)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = self.feat_extract_tester.seq_length_diff
lowercase__ = self.feat_extract_tester.max_seq_length + pad_diff
lowercase__ = self.feat_extract_tester.min_seq_length
lowercase__ = self.feat_extract_tester.batch_size
lowercase__ = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
lowercase__ = feat_extract.pad(lowerCAmelCase , padding=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest')
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[-1]))
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')
lowercase__ = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='max_length')[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=lowerCAmelCase , return_tensors='np')
lowercase__ = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase))
self.assertTrue(len(input_a[0]) == pad_min_length)
self.assertTrue(len(input_a[1]) == pad_min_length + pad_diff)
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0])))
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length))
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size)
# test padding for `pad_to_multiple_of` for List[int] + numpy
lowercase__ = feat_extract.pad(lowerCAmelCase , pad_to_multiple_of=10)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , pad_to_multiple_of=10)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , pad_to_multiple_of=10 , max_length=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , pad_to_multiple_of=10 , max_length=lowerCAmelCase , return_tensors='np' , )
lowercase__ = input_a[input_name]
self.assertTrue(all(len(lowerCAmelCase) % 10 == 0 for x in input_a))
self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase))
lowercase__ = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(lowerCAmelCase) == expected_mult_pad_length for x in input_a))
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length))
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size)
# Check padding value is correct
lowercase__ = (np.ones(self.feat_extract_tester.feature_size) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0])[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length))
< 1E-3)
self.assertTrue(
abs(
np.asarray(input_a[1])[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff))
< 1E-3)
self.assertTrue(
abs(
np.asarray(input_a[2])[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff))
< 1E-3)
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length)) < 1E-3)
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length))
< 1E-3)
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : Dict=False) -> str:
"""simple docstring"""
def _inputs_have_equal_length(lowerCAmelCase : int):
lowercase__ = len(input[0])
for input_slice in input[1:]:
if len(lowerCAmelCase) != length:
return False
return True
def _inputs_are_equal(lowerCAmelCase : str , lowerCAmelCase : Optional[Any]):
if len(lowerCAmelCase) != len(lowerCAmelCase):
return False
for input_slice_a, input_slice_a in zip(lowerCAmelCase , lowerCAmelCase):
if not np.allclose(np.asarray(lowerCAmelCase) , np.asarray(lowerCAmelCase) , atol=1E-3):
return False
return True
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCAmelCase)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
# truncate to smallest
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , truncation=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]))
lowercase__ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
# truncate to smallest with np
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , return_tensors='np' , truncation=lowerCAmelCase , )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , return_tensors='np')
lowercase__ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(input_a.shape[1] == len(speech_inputs[0]))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
# truncate to middle
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , truncation=lowerCAmelCase , return_tensors='np' , )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , truncation=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , return_tensors='np')
lowercase__ = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1]))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(len(input_a[-1]) == len(speech_inputs[-1]))
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , truncation=lowerCAmelCase)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='longest' , truncation=lowerCAmelCase)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='longest' , truncation=lowerCAmelCase)[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='max_length' , truncation=lowerCAmelCase)[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
lowercase__ = 12
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , pad_to_multiple_of=lowerCAmelCase , truncation=lowerCAmelCase , )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , pad_to_multiple_of=lowerCAmelCase , )
lowercase__ = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
lowercase__ = len(speech_inputs[0])
if expected_length % pad_to_multiple_of != 0:
lowercase__ = ((len(speech_inputs[0]) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0]) == expected_length)
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
def UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
self._check_padding(numpify=lowerCAmelCase)
def UpperCAmelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
self._check_padding(numpify=lowerCAmelCase)
def UpperCAmelCase ( self : List[Any]) -> int:
"""simple docstring"""
self._check_truncation(numpify=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
self._check_truncation(numpify=lowerCAmelCase)
@require_torch
def UpperCAmelCase ( self : Dict) -> List[str]:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='pt')[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_pt.numpy().astype(np.floataa).sum()) < 1E-2)
@require_tf
def UpperCAmelCase ( self : str) -> str:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='tf')[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_tf.numpy().astype(np.floataa).sum()) < 1E-2)
def UpperCAmelCase ( self : Optional[Any]) -> Tuple:
"""simple docstring"""
lowercase__ = self.feat_extract_dict
lowercase__ = True
lowercase__ = self.feature_extraction_class(**lowerCAmelCase)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = [len(lowerCAmelCase) for x in speech_inputs]
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')
self.assertIn('attention_mask' , lowerCAmelCase)
self.assertListEqual(list(processed.attention_mask.shape) , list(processed[input_name].shape[:2]))
self.assertListEqual(processed.attention_mask.sum(-1).tolist() , lowerCAmelCase)
def UpperCAmelCase ( self : str) -> Tuple:
"""simple docstring"""
lowercase__ = self.feat_extract_dict
lowercase__ = True
lowercase__ = self.feature_extraction_class(**lowerCAmelCase)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = [len(lowerCAmelCase) for x in speech_inputs]
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = min(lowerCAmelCase)
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=lowerCAmelCase , truncation=lowerCAmelCase , return_tensors='np')
self.assertIn('attention_mask' , lowerCAmelCase)
self.assertListEqual(
list(processed_pad.attention_mask.shape) , [processed_pad[input_name].shape[0], max_length])
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1).tolist() , [max_length for x in speech_inputs])
| 642
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = "openai/whisper-base"
_UpperCAmelCase = (
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
_UpperCAmelCase = "transcriber"
_UpperCAmelCase = WhisperProcessor
_UpperCAmelCase = WhisperForConditionalGeneration
_UpperCAmelCase = ["audio"]
_UpperCAmelCase = ["text"]
def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: Union[str, Any] ) -> Dict:
return self.pre_processor(UpperCamelCase , return_tensors='pt' ).input_features
def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: Optional[int] ) -> Optional[int]:
return self.model.generate(inputs=UpperCamelCase )
def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: List[str] ) -> str:
return self.pre_processor.batch_decode(UpperCamelCase , skip_special_tokens=UpperCamelCase )[0]
| 328
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = "openai/whisper-base"
_UpperCAmelCase = (
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
_UpperCAmelCase = "transcriber"
_UpperCAmelCase = WhisperProcessor
_UpperCAmelCase = WhisperForConditionalGeneration
_UpperCAmelCase = ["audio"]
_UpperCAmelCase = ["text"]
def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: Union[str, Any] ) -> Dict:
return self.pre_processor(UpperCamelCase , return_tensors='pt' ).input_features
def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: Optional[int] ) -> Optional[int]:
return self.model.generate(inputs=UpperCamelCase )
def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: List[str] ) -> str:
return self.pre_processor.batch_decode(UpperCamelCase , skip_special_tokens=UpperCamelCase )[0]
| 328
| 1
|
'''simple docstring'''
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class lowerCAmelCase ( unittest.TestCase ):
def _A ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Dict = "hf-internal-testing/tiny-random-t5"
lowerCAmelCase__ : Tuple = AutoTokenizer.from_pretrained(a__ )
lowerCAmelCase__ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(a__ )
lowerCAmelCase__ : int = tokenizer("This is me" , return_tensors="pt" )
lowerCAmelCase__ : int = model.to_bettertransformer()
self.assertTrue(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
lowerCAmelCase__ : Optional[int] = model.generate(**a__ )
lowerCAmelCase__ : Optional[int] = model.reverse_bettertransformer()
self.assertFalse(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a__ )
lowerCAmelCase__ : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(a__ )
self.assertFalse(
any("BetterTransformer" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
lowerCAmelCase__ : List[Any] = model_reloaded.generate(**a__ )
self.assertTrue(torch.allclose(a__ , a__ ) )
def _A ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = "hf-internal-testing/tiny-random-t5"
lowerCAmelCase__ : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(a__ )
lowerCAmelCase__ : int = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(a__ ):
model.save_pretrained(a__ )
lowerCAmelCase__ : str = model.reverse_bettertransformer()
model.save_pretrained(a__ )
| 706
|
'''simple docstring'''
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
snake_case = logging.getLogger(__name__)
snake_case = 50 # max width of layer names
snake_case = 70 # max width of quantizer names
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = parser.add_argument_group("quant_trainer arguments" )
group.add_argument("--wprec" , type=lowerCamelCase_ , default=8 , help="weight precision" )
group.add_argument("--aprec" , type=lowerCamelCase_ , default=8 , help="activation precision" )
group.add_argument("--quant-per-tensor" , action="store_true" , help="per tensor weight scaling" )
group.add_argument("--quant-disable" , action="store_true" , help="disable all quantizers" )
group.add_argument("--quant-disable-embeddings" , action="store_true" , help="disable all embeddings quantizers" )
group.add_argument("--quant-disable-keyword" , type=lowerCamelCase_ , nargs="+" , help="disable quantizers by keyword" )
group.add_argument("--quant-disable-layer-module" , type=lowerCamelCase_ , help="disable quantizers by keyword under layer." )
group.add_argument("--quant-enable-layer-module" , type=lowerCamelCase_ , help="enable quantizers by keyword under layer" )
group.add_argument("--calibrator" , default="max" , help="which quantization range calibrator to use" )
group.add_argument("--percentile" , default=lowerCamelCase_ , type=lowerCamelCase_ , help="percentile for PercentileCalibrator" )
group.add_argument("--fuse-qkv" , action="store_true" , help="use the same scale factor for qkv" )
group.add_argument("--clip-gelu" , metavar="N" , type=lowerCamelCase_ , help="clip gelu output maximum value to N" )
group.add_argument(
"--recalibrate-weights" , action="store_true" , help=(
"recalibrate weight amaxes by taking the max of the weights."
" amaxes will be computed with the current quantization granularity (axis)."
) , )
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
if args.calibrator == "max":
lowerCAmelCase__ : int = "max"
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("Specify --percentile when using percentile calibrator" )
lowerCAmelCase__ : List[str] = "histogram"
elif args.calibrator == "mse":
lowerCAmelCase__ : Any = "histogram"
else:
raise ValueError(f'''Invalid calibrator {args.calibrator}''' )
lowerCAmelCase__ : Union[str, Any] = QuantDescriptor(num_bits=args.aprec , calib_method=lowerCamelCase_ )
lowerCAmelCase__ : Any = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(lowerCamelCase_ )
quant_nn.QuantLinear.set_default_quant_desc_weight(lowerCamelCase_ )
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=False ):
"""simple docstring"""
logger.info("Configuring Model for Quantization" )
logger.info(f'''using quantization package {pytorch_quantization.__file__}''' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(lowerCamelCase_ , ["embeddings"] , which="weight" , _disabled=lowerCamelCase_ )
if args.quant_disable:
set_quantizer_by_name(lowerCamelCase_ , [""] , _disabled=lowerCamelCase_ )
if args.quant_disable_keyword:
set_quantizer_by_name(lowerCamelCase_ , args.quant_disable_keyword , _disabled=lowerCamelCase_ )
if args.quant_disable_layer_module:
set_quantizer_by_name(lowerCamelCase_ , [R"layer.\d+." + args.quant_disable_layer_module] , _disabled=lowerCamelCase_ )
if args.quant_enable_layer_module:
set_quantizer_by_name(lowerCamelCase_ , [R"layer.\d+." + args.quant_enable_layer_module] , _disabled=lowerCamelCase_ )
if args.recalibrate_weights:
recalibrate_weights(lowerCamelCase_ )
if args.fuse_qkv:
fuse_qkv(lowerCamelCase_ , lowerCamelCase_ )
if args.clip_gelu:
clip_gelu(lowerCamelCase_ , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(lowerCamelCase_ )
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
logger.info("Enabling Calibration" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f'''{name:80}: {module}''' )
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
logger.info("Loading calibrated amax" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("percentile" , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(lowerCamelCase_ )
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
def fusea(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
for mod in [qq, qk, qv]:
if not hasattr(lowerCamelCase_ , "_amax" ):
print(" WARNING: NO AMAX BUFFER" )
return
lowerCAmelCase__ : Optional[int] = qq._amax.detach().item()
lowerCAmelCase__ : Optional[int] = qk._amax.detach().item()
lowerCAmelCase__ : Any = qv._amax.detach().item()
lowerCAmelCase__ : List[str] = max(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
qq._amax.fill_(lowerCamelCase_ )
qk._amax.fill_(lowerCamelCase_ )
qv._amax.fill_(lowerCamelCase_ )
logger.info(f''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' )
for name, mod in model.named_modules():
if name.endswith(".attention.self" ):
logger.info(f'''FUSE_QKV: {name:{name_width}}''' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if name.endswith(".output.dense" ) and not name.endswith("attention.output.dense" ):
lowerCAmelCase__ : Any = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=lowerCamelCase_ )
lowerCAmelCase__ : Tuple = mod._input_quantizer._amax.data.detach().item()
logger.info(f'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' )
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(lowerCamelCase_ , "_weight_quantizer" ) and mod._weight_quantizer.axis is not None:
lowerCAmelCase__ : Optional[int] = mod.weight.shape[0]
lowerCAmelCase__ : List[str] = mod._weight_quantizer._amax.detach()
lowerCAmelCase__ : List[str] = torch.ones(lowerCamelCase_ , dtype=amax.dtype , device=amax.device ) * amax
print(f'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' )
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(lowerCamelCase_ , "_weight_quantizer" ):
if not hasattr(mod.weight_quantizer , "_amax" ):
print("RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
lowerCAmelCase__ : List[Any] = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
lowerCAmelCase__ : Tuple = set(range(len(mod.weight.size() ) ) ) - axis_set
lowerCAmelCase__ : Tuple = pytorch_quantization.utils.reduce_amax(mod.weight , axis=lowerCamelCase_ , keepdims=lowerCamelCase_ ).detach()
logger.info(f'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' )
lowerCAmelCase__ : str = amax
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_=2_5 , lowerCamelCase_=1_8_0 , lowerCamelCase_=None ):
"""simple docstring"""
if ignore is None:
lowerCAmelCase__ : str = []
elif not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ : List[Any] = [ignore]
lowerCAmelCase__ : Optional[int] = 0
for name, mod in model.named_modules():
if not hasattr(lowerCamelCase_ , "weight" ):
continue
lowerCAmelCase__ : str = max(lowerCamelCase_ , len(lowerCamelCase_ ) )
for name, mod in model.named_modules():
lowerCAmelCase__ : Tuple = getattr(lowerCamelCase_ , "_input_quantizer" , lowerCamelCase_ )
lowerCAmelCase__ : Union[str, Any] = getattr(lowerCamelCase_ , "_weight_quantizer" , lowerCamelCase_ )
if not hasattr(lowerCamelCase_ , "weight" ):
continue
if type(lowerCamelCase_ ) in ignore:
continue
if [True for s in ignore if type(lowerCamelCase_ ) is str and s in name]:
continue
lowerCAmelCase__ : List[Any] = f'''Act:{input_q.extra_repr()}'''
lowerCAmelCase__ : Optional[int] = f'''Wgt:{weight_q.extra_repr()}'''
lowerCAmelCase__ : int = f'''{name:{name_width}} {act_str} {wgt_str}'''
if len(lowerCamelCase_ ) <= line_width:
logger.info(lowerCamelCase_ )
else:
logger.info(f'''{name:{name_width}} {act_str}''' )
logger.info(f'''{' ':{name_width}} {wgt_str}''' )
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = 0
for name, mod in model.named_modules():
if isinstance(lowerCamelCase_ , pytorch_quantization.nn.TensorQuantizer ):
print(f'''{name:80} {mod}''' )
count += 1
print(f'''{count} TensorQuantizers found in model''' )
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = getattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if quantizer_mod is not None:
assert hasattr(lowerCamelCase_ , lowerCamelCase_ )
setattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
logger.warning(f'''{name} has no {quantizer}''' )
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_="both" , **lowerCamelCase_ ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = f'''Warning: changing {which} quantizers of {name:{qname_width}}'''
for k, v in kwargs.items():
s += f''' {k}={v}'''
if which in ["input", "both"]:
set_quantizer(lowerCamelCase_ , lowerCamelCase_ , "_input_quantizer" , lowerCamelCase_ , lowerCamelCase_ )
if which in ["weight", "both"]:
set_quantizer(lowerCamelCase_ , lowerCamelCase_ , "_weight_quantizer" , lowerCamelCase_ , lowerCamelCase_ )
logger.info(lowerCamelCase_ )
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(lowerCamelCase_ , "_input_quantizer" ) or hasattr(lowerCamelCase_ , "_weight_quantizer" ):
for n in names:
if re.search(lowerCamelCase_ , lowerCamelCase_ ):
set_quantizers(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
elif name.endswith("_quantizer" ):
for n in names:
if re.search(lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ : Optional[Any] = f'''Warning: changing {name:{name_width}}'''
for k, v in kwargs.items():
s += f''' {k}={v}'''
setattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
logger.info(lowerCamelCase_ )
| 568
| 0
|
"""simple docstring"""
def __A ( a_ :int) -> bool:
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52
|
"""simple docstring"""
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
A = logging.getLogger(__name__)
def __A ( a_ :Union[str, Any] , a_ :Dict) -> Union[str, Any]:
__a : Optional[int] = np.argmax(a_ , axis=1)
return np.sum(outputs == labels)
def __A ( a_ :Any) -> str:
with open(a_ , encoding='''utf_8''') as f:
__a : List[Any] = csv.reader(a_)
__a : List[str] = []
next(a_) # skip the first line
for line in tqdm(a_):
output.append((''' '''.join(line[1:5]), line[5], line[6], int(line[-1]) - 1))
return output
def __A ( a_ :Dict , a_ :str , a_ :str , a_ :List[Any] , a_ :Tuple , a_ :List[Any]) -> Any:
__a : List[str] = []
for dataset in encoded_datasets:
__a : List[str] = len(a_)
__a : List[str] = np.zeros((n_batch, 2, input_len) , dtype=np.intaa)
__a : Tuple = np.zeros((n_batch, 2) , dtype=np.intaa)
__a : Tuple = np.full((n_batch, 2, input_len) , fill_value=-1_00 , dtype=np.intaa)
__a : Optional[Any] = np.zeros((n_batch,) , dtype=np.intaa)
for (
i,
(story, conta, conta, mc_label),
) in enumerate(a_):
__a : str = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__a : Tuple = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__a : Tuple = with_conta
__a : int = with_conta
__a : List[str] = len(a_) - 1
__a : int = len(a_) - 1
__a : Optional[int] = with_conta
__a : Tuple = with_conta
__a : List[Any] = mc_label
__a : Any = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(a_) for t in all_inputs))
return tensor_datasets
def __A ( ) -> Union[str, Any]:
__a : List[str] = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=a_ , default='''openai-gpt''' , help='''pretrained model name''')
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''')
parser.add_argument('''--do_eval''' , action='''store_true''' , help='''Whether to run eval on the dev set.''')
parser.add_argument(
'''--output_dir''' , default=a_ , type=a_ , required=a_ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument('''--train_dataset''' , type=a_ , default='''''')
parser.add_argument('''--eval_dataset''' , type=a_ , default='''''')
parser.add_argument('''--seed''' , type=a_ , default=42)
parser.add_argument('''--num_train_epochs''' , type=a_ , default=3)
parser.add_argument('''--train_batch_size''' , type=a_ , default=8)
parser.add_argument('''--eval_batch_size''' , type=a_ , default=16)
parser.add_argument('''--adam_epsilon''' , default=1e-8 , type=a_ , help='''Epsilon for Adam optimizer.''')
parser.add_argument('''--max_grad_norm''' , type=a_ , default=1)
parser.add_argument(
'''--max_steps''' , default=-1 , type=a_ , help=(
'''If > 0: set total number of training steps to perform. Override num_train_epochs.'''
) , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=a_ , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--learning_rate''' , type=a_ , default=6.25e-5)
parser.add_argument('''--warmup_steps''' , default=0 , type=a_ , help='''Linear warmup over warmup_steps.''')
parser.add_argument('''--lr_schedule''' , type=a_ , default='''warmup_linear''')
parser.add_argument('''--weight_decay''' , type=a_ , default=0.0_1)
parser.add_argument('''--lm_coef''' , type=a_ , default=0.9)
parser.add_argument('''--n_valid''' , type=a_ , default=3_74)
parser.add_argument('''--server_ip''' , type=a_ , default='''''' , help='''Can be used for distant debugging.''')
parser.add_argument('''--server_port''' , type=a_ , default='''''' , help='''Can be used for distant debugging.''')
__a : str = parser.parse_args()
print(a_)
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''')
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=a_)
ptvsd.wait_for_attach()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
__a : Tuple = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''')
__a : str = torch.cuda.device_count()
logger.info('''device: {}, n_gpu {}'''.format(a_ , a_))
if not args.do_train and not args.do_eval:
raise ValueError('''At least one of `do_train` or `do_eval` must be True.''')
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__a : List[str] = ['''_start_''', '''_delimiter_''', '''_classify_''']
__a : Union[str, Any] = OpenAIGPTTokenizer.from_pretrained(args.model_name)
tokenizer.add_tokens(a_)
__a : Union[str, Any] = tokenizer.convert_tokens_to_ids(a_)
__a : Optional[Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name)
model.resize_token_embeddings(len(a_))
model.to(a_)
# Load and encode the datasets
def tokenize_and_encode(a_ :List[Any]):
if isinstance(a_ , a_):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(a_))
elif isinstance(a_ , a_):
return obj
return [tokenize_and_encode(a_) for o in obj]
logger.info('''Encoding dataset...''')
__a : Dict = load_rocstories_dataset(args.train_dataset)
__a : int = load_rocstories_dataset(args.eval_dataset)
__a : Optional[int] = (train_dataset, eval_dataset)
__a : List[Any] = tokenize_and_encode(a_)
# Compute the max input length for the Transformer
__a : List[Any] = model.config.n_positions // 2 - 2
__a : int = max(
len(story[:max_length]) + max(len(conta[:max_length]) , len(conta[:max_length])) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset)
__a : Union[str, Any] = min(a_ , model.config.n_positions) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__a : Tuple = pre_process_datasets(a_ , a_ , a_ , *a_)
__a , __a : Tuple = tensor_datasets[0], tensor_datasets[1]
__a : List[str] = TensorDataset(*a_)
__a : Optional[Any] = RandomSampler(a_)
__a : str = DataLoader(a_ , sampler=a_ , batch_size=args.train_batch_size)
__a : List[str] = TensorDataset(*a_)
__a : Optional[int] = SequentialSampler(a_)
__a : Optional[Any] = DataLoader(a_ , sampler=a_ , batch_size=args.eval_batch_size)
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__a : int = args.max_steps
__a : Optional[int] = args.max_steps // (len(a_) // args.gradient_accumulation_steps) + 1
else:
__a : str = len(a_) // args.gradient_accumulation_steps * args.num_train_epochs
__a : List[Any] = list(model.named_parameters())
__a : Optional[int] = ['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight''']
__a : List[str] = [
{
'''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
'''weight_decay''': args.weight_decay,
},
{'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], '''weight_decay''': 0.0},
]
__a : int = AdamW(a_ , lr=args.learning_rate , eps=args.adam_epsilon)
__a : Union[str, Any] = get_linear_schedule_with_warmup(
a_ , num_warmup_steps=args.warmup_steps , num_training_steps=a_)
if args.do_train:
__a , __a , __a : Dict = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs) , desc='''Epoch'''):
__a : Dict = 0
__a : Dict = 0
__a : List[str] = tqdm(a_ , desc='''Training''')
for step, batch in enumerate(a_):
__a : Dict = tuple(t.to(a_) for t in batch)
__a , __a , __a , __a : str = batch
__a : List[Any] = model(a_ , mc_token_ids=a_ , lm_labels=a_ , mc_labels=a_)
__a : Optional[Any] = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__a : int = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__a : Tuple = '''Training loss: {:.2e} lr: {:.2e}'''.format(a_ , scheduler.get_lr()[0])
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__a : Dict = model.module if hasattr(a_ , '''module''') else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__a : int = os.path.join(args.output_dir , a_)
__a : str = os.path.join(args.output_dir , a_)
torch.save(model_to_save.state_dict() , a_)
model_to_save.config.to_json_file(a_)
tokenizer.save_vocabulary(args.output_dir)
# Load a trained model and vocabulary that you have fine-tuned
__a : str = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir)
__a : Union[str, Any] = OpenAIGPTTokenizer.from_pretrained(args.output_dir)
model.to(a_)
if args.do_eval:
model.eval()
__a , __a : List[Any] = 0, 0
__a , __a : Union[str, Any] = 0, 0
for batch in tqdm(a_ , desc='''Evaluating'''):
__a : str = tuple(t.to(a_) for t in batch)
__a , __a , __a , __a : List[Any] = batch
with torch.no_grad():
__a , __a , __a , __a : str = model(
a_ , mc_token_ids=a_ , lm_labels=a_ , mc_labels=a_)
__a : List[str] = mc_logits.detach().cpu().numpy()
__a : Optional[Any] = mc_labels.to('''cpu''').numpy()
__a : str = accuracy(a_ , a_)
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0)
nb_eval_steps += 1
__a : Tuple = eval_loss / nb_eval_steps
__a : List[str] = eval_accuracy / nb_eval_examples
__a : List[Any] = tr_loss / nb_tr_steps if args.do_train else None
__a : List[str] = {'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss}
__a : Dict = os.path.join(args.output_dir , '''eval_results.txt''')
with open(a_ , '''w''') as writer:
logger.info('''***** Eval results *****''')
for key in sorted(result.keys()):
logger.info(''' %s = %s''' , a_ , str(result[key]))
writer.write('''%s = %s\n''' % (key, str(result[key])))
if __name__ == "__main__":
main()
| 52
| 1
|
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class snake_case :
"""simple docstring"""
_a = 42
# setable values
_a = 42
_a = 42
_a = None
@classmethod
def a__ ( cls, _lowercase, _lowercase, _lowercase ) -> Optional[int]:
return cls(common=__lowerCamelCase, init_noise_sigma=__lowerCamelCase, timesteps=__lowerCamelCase )
@dataclass
class snake_case ( _A ):
"""simple docstring"""
_a = 42
class snake_case ( _A, _A ):
"""simple docstring"""
_a = [e.name for e in FlaxKarrasDiffusionSchedulers]
_a = 42
@property
def a__ ( self ) -> str:
return True
@register_to_config
def __init__( self, _lowercase = 1000, _lowercase = 0.0_001, _lowercase = 0.02, _lowercase = "linear", _lowercase = None, _lowercase = "fixed_small", _lowercase = True, _lowercase = "epsilon", _lowercase = jnp.floataa, ) -> List[str]:
SCREAMING_SNAKE_CASE_ = dtype
def a__ ( self, _lowercase = None ) -> DDPMSchedulerState:
if common is None:
SCREAMING_SNAKE_CASE_ = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE_ = jnp.array(1.0, dtype=self.dtype )
SCREAMING_SNAKE_CASE_ = jnp.arange(0, self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=__lowerCamelCase, init_noise_sigma=__lowerCamelCase, timesteps=__lowerCamelCase, )
def a__ ( self, _lowercase, _lowercase, _lowercase = None ) -> jnp.ndarray:
return sample
def a__ ( self, _lowercase, _lowercase, _lowercase = () ) -> DDPMSchedulerState:
SCREAMING_SNAKE_CASE_ = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE_ = (jnp.arange(0, __lowerCamelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__lowerCamelCase, timesteps=__lowerCamelCase, )
def a__ ( self, _lowercase, _lowercase, _lowercase=None, _lowercase=None ) -> Any:
SCREAMING_SNAKE_CASE_ = state.common.alphas_cumprod[t]
SCREAMING_SNAKE_CASE_ = jnp.where(t > 0, state.common.alphas_cumprod[t - 1], jnp.array(1.0, dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
SCREAMING_SNAKE_CASE_ = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
SCREAMING_SNAKE_CASE_ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
SCREAMING_SNAKE_CASE_ = jnp.clip(__lowerCamelCase, a_min=1E-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
SCREAMING_SNAKE_CASE_ = jnp.log(jnp.clip(__lowerCamelCase, a_min=1E-20 ) )
elif variance_type == "fixed_large":
SCREAMING_SNAKE_CASE_ = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
SCREAMING_SNAKE_CASE_ = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
SCREAMING_SNAKE_CASE_ = variance
SCREAMING_SNAKE_CASE_ = state.common.betas[t]
SCREAMING_SNAKE_CASE_ = (predicted_variance + 1) / 2
SCREAMING_SNAKE_CASE_ = frac * max_log + (1 - frac) * min_log
return variance
def a__ ( self, _lowercase, _lowercase, _lowercase, _lowercase, _lowercase = None, _lowercase = True, ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
SCREAMING_SNAKE_CASE_ = timestep
if key is None:
SCREAMING_SNAKE_CASE_ = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
SCREAMING_SNAKE_CASE_ = jnp.split(__lowerCamelCase, sample.shape[1], axis=1 )
else:
SCREAMING_SNAKE_CASE_ = None
# 1. compute alphas, betas
SCREAMING_SNAKE_CASE_ = state.common.alphas_cumprod[t]
SCREAMING_SNAKE_CASE_ = jnp.where(t > 0, state.common.alphas_cumprod[t - 1], jnp.array(1.0, dtype=self.dtype ) )
SCREAMING_SNAKE_CASE_ = 1 - alpha_prod_t
SCREAMING_SNAKE_CASE_ = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
SCREAMING_SNAKE_CASE_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
SCREAMING_SNAKE_CASE_ = model_output
elif self.config.prediction_type == "v_prediction":
SCREAMING_SNAKE_CASE_ = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
' for the FlaxDDPMScheduler.' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
SCREAMING_SNAKE_CASE_ = jnp.clip(__lowerCamelCase, -1, 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
SCREAMING_SNAKE_CASE_ = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
SCREAMING_SNAKE_CASE_ = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
SCREAMING_SNAKE_CASE_ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
SCREAMING_SNAKE_CASE_ = jax.random.split(__lowerCamelCase, num=1 )
SCREAMING_SNAKE_CASE_ = jax.random.normal(__lowerCamelCase, shape=model_output.shape, dtype=self.dtype )
return (self._get_variance(__lowerCamelCase, __lowerCamelCase, predicted_variance=__lowerCamelCase ) ** 0.5) * noise
SCREAMING_SNAKE_CASE_ = jnp.where(t > 0, random_variance(), jnp.zeros(model_output.shape, dtype=self.dtype ) )
SCREAMING_SNAKE_CASE_ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__lowerCamelCase, state=__lowerCamelCase )
def a__ ( self, _lowercase, _lowercase, _lowercase, _lowercase, ) -> jnp.ndarray:
return add_noise_common(state.common, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
def a__ ( self, _lowercase, _lowercase, _lowercase, _lowercase, ) -> jnp.ndarray:
return get_velocity_common(state.common, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
def __len__( self ) -> str:
return self.config.num_train_timesteps
| 717
|
'''simple docstring'''
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
class snake_case :
"""simple docstring"""
def __init__( self, _lowercase, _lowercase ) -> Dict:
SCREAMING_SNAKE_CASE_ = question_encoder
SCREAMING_SNAKE_CASE_ = generator
SCREAMING_SNAKE_CASE_ = self.question_encoder
def a__ ( self, _lowercase ) -> int:
if os.path.isfile(_lowercase ):
raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(_lowercase, exist_ok=_lowercase )
SCREAMING_SNAKE_CASE_ = os.path.join(_lowercase, 'question_encoder_tokenizer' )
SCREAMING_SNAKE_CASE_ = os.path.join(_lowercase, 'generator_tokenizer' )
self.question_encoder.save_pretrained(_lowercase )
self.generator.save_pretrained(_lowercase )
@classmethod
def a__ ( cls, _lowercase, **_lowercase ) -> List[str]:
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
SCREAMING_SNAKE_CASE_ = kwargs.pop('config', _lowercase )
if config is None:
SCREAMING_SNAKE_CASE_ = RagConfig.from_pretrained(_lowercase )
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained(
_lowercase, config=config.question_encoder, subfolder='question_encoder_tokenizer' )
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained(
_lowercase, config=config.generator, subfolder='generator_tokenizer' )
return cls(question_encoder=_lowercase, generator=_lowercase )
def __call__( self, *_lowercase, **_lowercase ) -> Dict:
return self.current_tokenizer(*_lowercase, **_lowercase )
def a__ ( self, *_lowercase, **_lowercase ) -> Any:
return self.generator.batch_decode(*_lowercase, **_lowercase )
def a__ ( self, *_lowercase, **_lowercase ) -> Optional[Any]:
return self.generator.decode(*_lowercase, **_lowercase )
def a__ ( self ) -> int:
SCREAMING_SNAKE_CASE_ = self.question_encoder
def a__ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.generator
def a__ ( self, _lowercase, _lowercase = None, _lowercase = None, _lowercase = None, _lowercase = "longest", _lowercase = None, _lowercase = True, **_lowercase, ) -> BatchEncoding:
warnings.warn(
'`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '
'regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '
'context manager to prepare your targets. See the documentation of your specific tokenizer for more '
'details', _lowercase, )
if max_length is None:
SCREAMING_SNAKE_CASE_ = self.current_tokenizer.model_max_length
SCREAMING_SNAKE_CASE_ = self(
_lowercase, add_special_tokens=_lowercase, return_tensors=_lowercase, max_length=_lowercase, padding=_lowercase, truncation=_lowercase, **_lowercase, )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
SCREAMING_SNAKE_CASE_ = self.current_tokenizer.model_max_length
SCREAMING_SNAKE_CASE_ = self(
text_target=_lowercase, add_special_tokens=_lowercase, return_tensors=_lowercase, padding=_lowercase, max_length=_lowercase, truncation=_lowercase, **_lowercase, )
SCREAMING_SNAKE_CASE_ = labels['input_ids']
return model_inputs
| 238
| 0
|
"""simple docstring"""
def __snake_case ( _lowercase ):
"""simple docstring"""
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 34
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class _lowerCamelCase :
"""simple docstring"""
# setable values
snake_case = None
snake_case = None
snake_case = None # sigma(t_i)
@classmethod
def _snake_case ( cls )->int:
'''simple docstring'''
return cls()
@dataclass
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = 42
snake_case = 42
snake_case = 42
class _lowerCamelCase ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
@property
def _snake_case ( self )->Dict:
'''simple docstring'''
return True
@register_to_config
def __init__( self , _SCREAMING_SNAKE_CASE = 0.0_2 , _SCREAMING_SNAKE_CASE = 100 , _SCREAMING_SNAKE_CASE = 1.0_0_7 , _SCREAMING_SNAKE_CASE = 80 , _SCREAMING_SNAKE_CASE = 0.0_5 , _SCREAMING_SNAKE_CASE = 50 , )->Union[str, Any]:
'''simple docstring'''
pass
def _snake_case ( self )->List[Any]:
'''simple docstring'''
return KarrasVeSchedulerState.create()
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = () )->KarrasVeSchedulerState:
'''simple docstring'''
A_ : str = jnp.arange(0 , _SCREAMING_SNAKE_CASE )[::-1].copy()
A_ : Optional[Any] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=_SCREAMING_SNAKE_CASE , schedule=jnp.array(_SCREAMING_SNAKE_CASE , dtype=jnp.floataa ) , timesteps=_SCREAMING_SNAKE_CASE , )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )->Tuple[jnp.ndarray, float]:
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
A_ : List[Any] = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
A_ : Any = 0
# sample eps ~ N(0, S_noise^2 * I)
A_ : Any = random.split(_SCREAMING_SNAKE_CASE , num=1 )
A_ : List[str] = self.config.s_noise * random.normal(key=_SCREAMING_SNAKE_CASE , shape=sample.shape )
A_ : Optional[Any] = sigma + gamma * sigma
A_ : Any = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , )->Union[FlaxKarrasVeOutput, Tuple]:
'''simple docstring'''
A_ : int = sample_hat + sigma_hat * model_output
A_ : Dict = (sample_hat - pred_original_sample) / sigma_hat
A_ : Optional[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=_SCREAMING_SNAKE_CASE , derivative=_SCREAMING_SNAKE_CASE , state=_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , )->Union[FlaxKarrasVeOutput, Tuple]:
'''simple docstring'''
A_ : List[Any] = sample_prev + sigma_prev * model_output
A_ : str = (sample_prev - pred_original_sample) / sigma_prev
A_ : List[Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=_SCREAMING_SNAKE_CASE , derivative=_SCREAMING_SNAKE_CASE , state=_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Union[str, Any]:
'''simple docstring'''
raise NotImplementedError()
| 590
| 0
|
'''simple docstring'''
import copy
import random
from transformers import CLIPTokenizer
class UpperCamelCase__ ( lowercase__ ):
def __init__( self : Optional[int] , *lowerCamelCase : Optional[Any] , **lowerCamelCase : str ):
'''simple docstring'''
super().__init__(*__lowercase , **__lowercase )
a__ = {}
def __a ( self : Union[str, Any] , lowerCamelCase : int , *lowerCamelCase : List[str] , **lowerCamelCase : Dict ):
'''simple docstring'''
a__ = super().add_tokens(__lowercase , *__lowercase , **__lowercase )
if num_added_tokens == 0:
raise ValueError(
F'''The tokenizer already contains the token {placeholder_token}. Please pass a different'''
" `placeholder_token` that is not already in the tokenizer." )
def __a ( self : Dict , lowerCamelCase : List[Any] , *lowerCamelCase : List[Any] , lowerCamelCase : str=1 , **lowerCamelCase : int ):
'''simple docstring'''
a__ = []
if num_vec_per_token == 1:
self.try_adding_tokens(__lowercase , *__lowercase , **__lowercase )
output.append(__lowercase )
else:
a__ = []
for i in range(__lowercase ):
a__ = placeholder_token + F'''_{i}'''
self.try_adding_tokens(__lowercase , *__lowercase , **__lowercase )
output.append(__lowercase )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F'''The tokenizer already has placeholder token {token} that can get confused with'''
F''' {placeholder_token}keep placeholder tokens independent''' )
a__ = output
def __a ( self : Optional[int] , lowerCamelCase : Optional[Any] , lowerCamelCase : str=False , lowerCamelCase : Dict=1.0 ):
'''simple docstring'''
if isinstance(__lowercase , __lowercase ):
a__ = []
for i in range(len(__lowercase ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=__lowercase ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
a__ = self.token_map[placeholder_token]
a__ = tokens[: 1 + int(len(__lowercase ) * prop_tokens_to_load )]
if vector_shuffle:
a__ = copy.copy(__lowercase )
random.shuffle(__lowercase )
a__ = text.replace(__lowercase , " ".join(__lowercase ) )
return text
def __call__( self : Optional[int] , lowerCamelCase : Tuple , *lowerCamelCase : Dict , lowerCamelCase : Any=False , lowerCamelCase : Optional[int]=1.0 , **lowerCamelCase : Tuple ):
'''simple docstring'''
return super().__call__(
self.replace_placeholder_tokens_in_text(
__lowercase , vector_shuffle=__lowercase , prop_tokens_to_load=__lowercase ) , *__lowercase , **__lowercase , )
def __a ( self : Optional[Any] , lowerCamelCase : Any , *lowerCamelCase : Union[str, Any] , lowerCamelCase : int=False , lowerCamelCase : Optional[int]=1.0 , **lowerCamelCase : Any ):
'''simple docstring'''
return super().encode(
self.replace_placeholder_tokens_in_text(
__lowercase , vector_shuffle=__lowercase , prop_tokens_to_load=__lowercase ) , *__lowercase , **__lowercase , )
| 715
|
'''simple docstring'''
import math
def _lowerCamelCase (__lowerCamelCase : list , __lowerCamelCase : int = 0 , __lowerCamelCase : int = 0 ) -> list:
a__ = end or len(__lowerCamelCase )
for i in range(__lowerCamelCase , __lowerCamelCase ):
a__ = i
a__ = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
a__ = array[temp_index - 1]
temp_index -= 1
a__ = temp_index_value
return array
def _lowerCamelCase (__lowerCamelCase : list , __lowerCamelCase : int , __lowerCamelCase : int ) -> None: # Max Heap
a__ = index
a__ = 2 * index + 1 # Left Node
a__ = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
a__ = left_index
if right_index < heap_size and array[largest] < array[right_index]:
a__ = right_index
if largest != index:
a__ , a__ = array[largest], array[index]
heapify(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _lowerCamelCase (__lowerCamelCase : list ) -> list:
a__ = len(__lowerCamelCase )
for i in range(n // 2 , -1 , -1 ):
heapify(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
for i in range(n - 1 , 0 , -1 ):
a__ , a__ = array[0], array[i]
heapify(__lowerCamelCase , 0 , __lowerCamelCase )
return array
def _lowerCamelCase (__lowerCamelCase : list , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ) -> int:
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def _lowerCamelCase (__lowerCamelCase : list , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ) -> int:
a__ = low
a__ = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
a__ , a__ = array[j], array[i]
i += 1
def _lowerCamelCase (__lowerCamelCase : list ) -> list:
if len(__lowerCamelCase ) == 0:
return array
a__ = 2 * math.ceil(math.loga(len(__lowerCamelCase ) ) )
a__ = 16
return intro_sort(__lowerCamelCase , 0 , len(__lowerCamelCase ) , __lowerCamelCase , __lowerCamelCase )
def _lowerCamelCase (__lowerCamelCase : list , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ) -> list:
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(__lowerCamelCase )
max_depth -= 1
a__ = median_of_a(__lowerCamelCase , __lowerCamelCase , start + ((end - start) // 2) + 1 , end - 1 )
a__ = partition(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
intro_sort(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
a__ = p
return insertion_sort(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase_ : Optional[int] = input("Enter numbers separated by a comma : ").strip()
lowerCAmelCase_ : List[str] = [float(item) for item in user_input.split(",")]
print(sort(unsorted))
| 289
| 0
|
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
_SCREAMING_SNAKE_CASE : List[str] = '''
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
'''
_SCREAMING_SNAKE_CASE : Union[str, Any] = '''
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{\'f1\': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results[\'f1\'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results[\'f1\'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")
>>> print(round(results[\'f1\'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'f1\': array([0.8, 0. , 0. ])}
'''
_SCREAMING_SNAKE_CASE : int = '''
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def UpperCamelCase ( self : List[str] ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32' ) ),
'references': datasets.Sequence(datasets.Value('int32' ) ),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) , reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'] , )
def UpperCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Tuple=1 , __SCREAMING_SNAKE_CASE : List[str]="binary" , __SCREAMING_SNAKE_CASE : int=None ) -> Tuple:
lowerCamelCase_ = fa_score(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , pos_label=__SCREAMING_SNAKE_CASE , average=__SCREAMING_SNAKE_CASE , sample_weight=__SCREAMING_SNAKE_CASE )
return {"f1": float(__SCREAMING_SNAKE_CASE ) if score.size == 1 else score}
| 549
|
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a :
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[str]=3 , __SCREAMING_SNAKE_CASE : Optional[int]=32 , __SCREAMING_SNAKE_CASE : int=3 , __SCREAMING_SNAKE_CASE : str=10 , __SCREAMING_SNAKE_CASE : Optional[Any]=[10, 20, 30, 40] , __SCREAMING_SNAKE_CASE : List[Any]=[1, 1, 2, 1] , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : Optional[int]="relu" , __SCREAMING_SNAKE_CASE : Tuple=3 , __SCREAMING_SNAKE_CASE : Dict=None , ) -> Optional[int]:
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = embeddings_size
lowerCamelCase_ = hidden_sizes
lowerCamelCase_ = depths
lowerCamelCase_ = is_training
lowerCamelCase_ = use_labels
lowerCamelCase_ = hidden_act
lowerCamelCase_ = num_labels
lowerCamelCase_ = scope
lowerCamelCase_ = len(__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : List[Any] ) -> Dict:
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self : List[str] ) -> str:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def UpperCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int ) -> List[Any]:
lowerCamelCase_ = RegNetModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ = model(__SCREAMING_SNAKE_CASE )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Union[str, Any]:
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = RegNetForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase_ = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self : Optional[Any] ) -> str:
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a ( __snake_case , __snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE : int = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : int = (
{"""feature-extraction""": RegNetModel, """image-classification""": RegNetForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : int = False
def UpperCamelCase ( self : Optional[int] ) -> List[Any]:
lowerCamelCase_ = RegNetModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Dict ) -> Any:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase ( self : Any ) -> Optional[Any]:
return
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
pass
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def UpperCamelCase ( self : Any ) -> Dict:
pass
def UpperCamelCase ( self : Union[str, Any] ) -> Dict:
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Tuple ) -> List[str]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Tuple ) -> Dict:
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(config=__SCREAMING_SNAKE_CASE )
for name, module in model.named_modules():
if isinstance(__SCREAMING_SNAKE_CASE , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
def check_hidden_states_output(__SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] ):
lowerCamelCase_ = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowerCamelCase_ = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
lowerCamelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase_ = self.model_tester.num_stages
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCamelCase_ = layer_type
lowerCamelCase_ = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : List[Any] ) -> Tuple:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@slow
def UpperCamelCase ( self : int ) -> Union[str, Any]:
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = RegNetModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( ) -> Optional[Any]:
lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCamelCase ( self : Optional[int] ) -> Dict:
lowerCamelCase_ = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='pt' ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowerCamelCase_ = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
lowerCamelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ = torch.tensor([-0.4_180, -1.5_051, -3.4_836] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 549
| 1
|
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
a_ :int = logging.get_logger(__name__)
a_ :Optional[int] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
a_ :str = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def a ( A__ , A__ , A__ , A__ , A__ ) -> str:
'''simple docstring'''
for attribute in key.split('''.''' ):
SCREAMING_SNAKE_CASE__ : Optional[int] = getattr(A__ , A__ )
if weight_type is not None:
SCREAMING_SNAKE_CASE__ : str = getattr(A__ , A__ ).shape
else:
SCREAMING_SNAKE_CASE__ : int = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
SCREAMING_SNAKE_CASE__ : Dict = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE__ : List[str] = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE__ : List[str] = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = value
else:
SCREAMING_SNAKE_CASE__ : Any = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def a ( A__ , A__ ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = []
SCREAMING_SNAKE_CASE__ : Tuple = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE__ : List[Any] = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
SCREAMING_SNAKE_CASE__ : Tuple = None
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE__ : int = False
if "conv_layers" in name:
load_conv_layer(
A__ , A__ , A__ , A__ , hf_model.config.feat_extract_norm == '''group''' , )
SCREAMING_SNAKE_CASE__ : Optional[int] = True
elif name.split('''.''' )[0] == "proj":
SCREAMING_SNAKE_CASE__ : Any = fairseq_model.proj
SCREAMING_SNAKE_CASE__ : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
SCREAMING_SNAKE_CASE__ : int = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE__ : Tuple = name.split(A__ )[0].split('''.''' )[-2]
SCREAMING_SNAKE_CASE__ : str = mapped_key.replace('''*''' , A__ )
if "weight_g" in name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = '''weight_g'''
elif "weight_v" in name:
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''weight_v'''
elif "bias" in name:
SCREAMING_SNAKE_CASE__ : str = '''bias'''
elif "weight" in name:
SCREAMING_SNAKE_CASE__ : Tuple = '''weight'''
else:
SCREAMING_SNAKE_CASE__ : int = None
set_recursively(A__ , A__ , A__ , A__ , A__ )
continue
if not is_used:
unused_weights.append(A__ )
logger.warning(f"""Unused weights: {unused_weights}""" )
return proj_weight
def a ( A__ , A__ , A__ , A__ , A__ ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = full_name.split('''conv_layers.''' )[-1]
SCREAMING_SNAKE_CASE__ : Optional[Any] = name.split('''.''' )
SCREAMING_SNAKE_CASE__ : Dict = int(items[0] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
SCREAMING_SNAKE_CASE__ : Tuple = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
SCREAMING_SNAKE_CASE__ : Optional[int] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(A__ )
def a ( A__ ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = emb.weight.shape
SCREAMING_SNAKE_CASE__ : Dict = nn.Linear(A__ , A__ , bias=A__ )
SCREAMING_SNAKE_CASE__ : Tuple = emb.weight.data
return lin_layer
def a ( A__ ) -> List[Any]:
'''simple docstring'''
with open(A__ , '''r''' , encoding='''utf-8''' ) as f:
SCREAMING_SNAKE_CASE__ : Dict = f.readlines()
SCREAMING_SNAKE_CASE__ : List[str] = [line.split(''' ''' )[0] for line in lines]
SCREAMING_SNAKE_CASE__ : int = len(A__ )
SCREAMING_SNAKE_CASE__ : str = {
'''<s>''': 0,
'''<pad>''': 1,
'''</s>''': 2,
'''<unk>''': 3,
}
vocab_dict.update(dict(zip(A__ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def a ( A__ , A__ , A__ , A__ , A__ , A__ , A__ , ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = WavaVecaConfig.from_pretrained(A__ )
SCREAMING_SNAKE_CASE__ : Dict = SpeechaTextaConfig.from_pretrained(
A__ , vocab_size=A__ , decoder_layers=A__ , do_stable_layer_norm=A__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=A__ , return_attention_mask=A__ , )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
SCREAMING_SNAKE_CASE__ : int = model[0].eval()
# set weights for wav2vec2 encoder
SCREAMING_SNAKE_CASE__ : int = WavaVecaModel(A__ )
SCREAMING_SNAKE_CASE__ : int = recursively_load_weights_wavaveca(model.encoder , A__ )
SCREAMING_SNAKE_CASE__ : List[Any] = SpeechaTextaForCausalLM(A__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=A__ )
# set output linear layer
unexpected_keys.remove('''embed_out''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = SpeechEncoderDecoderModel(encoder=A__ , decoder=A__ )
SCREAMING_SNAKE_CASE__ : Dict = False
# add projection layer
SCREAMING_SNAKE_CASE__ : Optional[int] = nn.Parameter(projection_layer.weight )
SCREAMING_SNAKE_CASE__ : Optional[Any] = nn.Parameter(projection_layer.bias )
SCREAMING_SNAKE_CASE__ : List[str] = create_vocab_dict(A__ )
with open(os.path.join(A__ , '''vocab.json''' ) , '''w''' ) as fp:
json.dump(A__ , A__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = SpeechaTextaTokenizer(os.path.join(A__ , '''vocab.json''' ) )
tokenizer.save_pretrained(A__ )
SCREAMING_SNAKE_CASE__ : List[str] = hf_wavavec.config.to_dict()
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.pad_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.bos_token_id
SCREAMING_SNAKE_CASE__ : str = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE__ : List[str] = '''speech_to_text_2'''
SCREAMING_SNAKE_CASE__ : Optional[int] = '''wav2vec2'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = SpeechEncoderDecoderConfig.from_dict(A__ )
hf_wavavec.save_pretrained(A__ )
feature_extractor.save_pretrained(A__ )
if __name__ == "__main__":
a_ :int = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-large-lv60',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/s2t-small-mustc-en-fr-st',
type=str,
help='Path to hf decoder s2t checkpoint config',
)
parser.add_argument('--vocab_size', default=1_02_24, type=int, help='Vocab size of decoder')
parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers')
a_ :List[str] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 250
|
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
a_ :Optional[Any] = logging.get_logger(__name__)
class lowercase :
def __init__( self : Dict , _lowercase : str = None , _lowercase : uuid.UUID = None , _lowercase : List[str]=None , _lowercase : List[Any]=None ):
if not conversation_id:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = uuid.uuida()
if past_user_inputs is None:
SCREAMING_SNAKE_CASE__ : List[str] = []
if generated_responses is None:
SCREAMING_SNAKE_CASE__ : int = []
SCREAMING_SNAKE_CASE__ : uuid.UUID = conversation_id
SCREAMING_SNAKE_CASE__ : List[str] = past_user_inputs
SCREAMING_SNAKE_CASE__ : List[str] = generated_responses
SCREAMING_SNAKE_CASE__ : Optional[str] = text
def __eq__( self : Optional[Any] , _lowercase : List[str] ):
if not isinstance(_lowercase , _lowercase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowercase__ ( self : int , _lowercase : str , _lowercase : bool = False ):
if self.new_user_input:
if overwrite:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """
f"""with: \"{text}\".""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = text
else:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """
f"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = text
def lowercase__ ( self : Union[str, Any] ):
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
SCREAMING_SNAKE_CASE__ : List[Any] = None
def lowercase__ ( self : Optional[int] , _lowercase : str ):
self.generated_responses.append(_lowercase )
def lowercase__ ( self : int ):
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Any ):
SCREAMING_SNAKE_CASE__ : Dict = f"""Conversation id: {self.uuid} \n"""
for is_user, text in self.iter_texts():
SCREAMING_SNAKE_CASE__ : Dict = '''user''' if is_user else '''bot'''
output += f"""{name} >> {text} \n"""
return output
@add_end_docstrings(
_UpperCAmelCase , r'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''' , )
class lowercase ( _UpperCAmelCase ):
def __init__( self : str , *_lowercase : List[Any] , **_lowercase : Union[str, Any] ):
super().__init__(*_lowercase , **_lowercase )
if self.tokenizer.pad_token_id is None:
SCREAMING_SNAKE_CASE__ : int = self.tokenizer.eos_token
def lowercase__ ( self : List[Any] , _lowercase : Union[str, Any]=None , _lowercase : Dict=None , _lowercase : Tuple=None , **_lowercase : List[Any] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = {}
SCREAMING_SNAKE_CASE__ : List[Any] = {}
SCREAMING_SNAKE_CASE__ : Tuple = {}
if min_length_for_response is not None:
SCREAMING_SNAKE_CASE__ : List[str] = min_length_for_response
if minimum_tokens is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = minimum_tokens
if "max_length" in generate_kwargs:
SCREAMING_SNAKE_CASE__ : List[Any] = generate_kwargs['''max_length''']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
SCREAMING_SNAKE_CASE__ : str = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(_lowercase )
return preprocess_params, forward_params, postprocess_params
def __call__( self : Union[str, Any] , _lowercase : Union[Conversation, List[Conversation]] , _lowercase : Dict=0 , **_lowercase : Optional[int] ):
SCREAMING_SNAKE_CASE__ : Tuple = super().__call__(_lowercase , num_workers=_lowercase , **_lowercase )
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) == 1:
return outputs[0]
return outputs
def lowercase__ ( self : str , _lowercase : Conversation , _lowercase : Optional[int]=32 ):
if not isinstance(_lowercase , _lowercase ):
raise ValueError('''ConversationalPipeline, expects Conversation as inputs''' )
if conversation.new_user_input is None:
raise ValueError(
f"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """
'''Add user inputs with the conversation\'s `add_user_input` method''' )
if hasattr(self.tokenizer , '''_build_conversation_input_ids''' ):
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer._build_conversation_input_ids(_lowercase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
SCREAMING_SNAKE_CASE__ : Optional[int] = self._legacy_parse_and_tokenize(_lowercase )
if self.framework == "pt":
SCREAMING_SNAKE_CASE__ : List[Any] = torch.LongTensor([input_ids] )
elif self.framework == "tf":
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowercase__ ( self : int , _lowercase : Optional[int] , _lowercase : Dict=10 , **_lowercase : Any ):
SCREAMING_SNAKE_CASE__ : List[str] = generate_kwargs.get('''max_length''' , self.model.config.max_length )
SCREAMING_SNAKE_CASE__ : Any = model_inputs['''input_ids'''].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = max_length - minimum_tokens
SCREAMING_SNAKE_CASE__ : Tuple = model_inputs['''input_ids'''][:, -trim:]
if "attention_mask" in model_inputs:
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_inputs['''attention_mask'''][:, -trim:]
SCREAMING_SNAKE_CASE__ : Dict = model_inputs.pop('''conversation''' )
SCREAMING_SNAKE_CASE__ : Any = max_length
SCREAMING_SNAKE_CASE__ : Tuple = self.model.generate(**_lowercase , **_lowercase )
if self.model.config.is_encoder_decoder:
SCREAMING_SNAKE_CASE__ : List[str] = 1
else:
SCREAMING_SNAKE_CASE__ : List[str] = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowercase__ ( self : Union[str, Any] , _lowercase : List[str] , _lowercase : Dict=True ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_outputs['''output_ids''']
SCREAMING_SNAKE_CASE__ : List[str] = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , )
SCREAMING_SNAKE_CASE__ : Optional[int] = model_outputs['''conversation''']
conversation.mark_processed()
conversation.append_response(_lowercase )
return conversation
def lowercase__ ( self : Any , _lowercase : Conversation ):
SCREAMING_SNAKE_CASE__ : int = self.tokenizer.eos_token_id
SCREAMING_SNAKE_CASE__ : int = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) )
if len(_lowercase ) > self.tokenizer.model_max_length:
SCREAMING_SNAKE_CASE__ : Optional[int] = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 250
| 1
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def A_ ( _lowerCAmelCase : List[str]=None ):
"""simple docstring"""
_lowerCamelCase : List[Any] = argparse.ArgumentParser(add_help=_lowerCAmelCase , allow_abbrev=_lowerCAmelCase )
# The main config parser
_lowerCamelCase : int = config_command_parser(_lowerCAmelCase )
# The subparser to add commands to
_lowerCamelCase : Any = config_parser.add_subparsers(title="subcommands" , dest="subcommand" )
# Then add other parsers with the parent parser
default_command_parser(_lowerCAmelCase , parents=[parent_parser] )
update_command_parser(_lowerCAmelCase , parents=[parent_parser] )
return config_parser
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Dict = get_config_parser()
_lowerCamelCase : Optional[int] = config_parser.parse_args()
if not hasattr(_lowerCAmelCase , "func" ):
config_parser.print_help()
exit(1 )
# Run
args.func(_lowerCAmelCase )
if __name__ == "__main__":
main()
| 44
|
import numpy as np
from PIL import Image
def snake_case__ ( UpperCAmelCase : np.ndarray , UpperCAmelCase : int , UpperCAmelCase : int ):
lowerCAmelCase__ :Union[str, Any] = np.array(UpperCAmelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("The input array is not a square matrix" )
lowerCAmelCase__ :Tuple = 0
lowerCAmelCase__ :Optional[Any] = 0
lowerCAmelCase__ :Tuple = 0
lowerCAmelCase__ :int = 0
# compute the shape of the output matrix
lowerCAmelCase__ :List[Any] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
lowerCAmelCase__ :Union[str, Any] = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
lowerCAmelCase__ :List[Any] = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCAmelCase__ :Tuple = 0
lowerCAmelCase__ :Tuple = 0
return updated_arr
def snake_case__ ( UpperCAmelCase : np.ndarray , UpperCAmelCase : int , UpperCAmelCase : int ):
lowerCAmelCase__ :str = np.array(UpperCAmelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("The input array is not a square matrix" )
lowerCAmelCase__ :Optional[Any] = 0
lowerCAmelCase__ :int = 0
lowerCAmelCase__ :Dict = 0
lowerCAmelCase__ :Tuple = 0
# compute the shape of the output matrix
lowerCAmelCase__ :int = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
lowerCAmelCase__ :Tuple = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
lowerCAmelCase__ :List[Any] = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCAmelCase__ :List[Any] = 0
lowerCAmelCase__ :List[str] = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name="""avgpooling""", verbose=True)
# Loading the image
_a : str = Image.open("""path_to_image""")
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 145
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase , unittest.TestCase ):
UpperCAmelCase = KandinskyImgaImgPipeline
UpperCAmelCase = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''']
UpperCAmelCase = [
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
UpperCAmelCase = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
UpperCAmelCase = False
@property
def lowercase_ ( self :str ) -> Dict:
"""simple docstring"""
return 32
@property
def lowercase_ ( self :Tuple ) -> List[Any]:
"""simple docstring"""
return 32
@property
def lowercase_ ( self :List[Any] ) -> Tuple:
"""simple docstring"""
return self.time_input_dim
@property
def lowercase_ ( self :Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowercase_ ( self :Tuple ) -> str:
"""simple docstring"""
return 1_00
@property
def lowercase_ ( self :Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__ : Optional[int] = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def lowercase_ ( self :Optional[Any] ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] = MCLIPConfig(
numDims=self.cross_attention_dim ,transformerDimensions=self.text_embedder_hidden_size ,hidden_size=self.text_embedder_hidden_size ,intermediate_size=37 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=10_05 ,)
lowerCamelCase__ : Optional[Any] = MultilingualCLIP(__UpperCAmelCase )
lowerCamelCase__ : Dict = text_encoder.eval()
return text_encoder
@property
def lowercase_ ( self :List[Any] ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase__ : Dict = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowerCamelCase__ : str = UNetaDConditionModel(**__UpperCAmelCase )
return model
@property
def lowercase_ ( self :Dict ) -> Optional[int]:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowercase_ ( self :Optional[int] ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase__ : str = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase_ ( self :List[str] ) -> Any:
"""simple docstring"""
lowerCamelCase__ : Optional[int] = self.dummy_text_encoder
lowerCamelCase__ : Tuple = self.dummy_tokenizer
lowerCamelCase__ : int = self.dummy_unet
lowerCamelCase__ : Tuple = self.dummy_movq
lowerCamelCase__ : List[str] = {
'''num_train_timesteps''': 10_00,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.00_085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
lowerCamelCase__ : Any = DDIMScheduler(**__UpperCAmelCase )
lowerCamelCase__ : List[str] = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def lowercase_ ( self :Tuple ,__UpperCAmelCase :List[Any] ,__UpperCAmelCase :Tuple=0 ) -> Any:
"""simple docstring"""
lowerCamelCase__ : str = floats_tensor((1, self.cross_attention_dim) ,rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
lowerCamelCase__ : Dict = floats_tensor((1, self.cross_attention_dim) ,rng=random.Random(seed + 1 ) ).to(__UpperCAmelCase )
# create init_image
lowerCamelCase__ : Optional[Any] = floats_tensor((1, 3, 64, 64) ,rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
lowerCamelCase__ : List[str] = image.cpu().permute(0 ,2 ,3 ,1 )[0]
lowerCamelCase__ : List[str] = Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert('''RGB''' ).resize((2_56, 2_56) )
if str(__UpperCAmelCase ).startswith('''mps''' ):
lowerCamelCase__ : Tuple = torch.manual_seed(__UpperCAmelCase )
else:
lowerCamelCase__ : Dict = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = {
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def lowercase_ ( self :int ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ : List[str] = '''cpu'''
lowerCamelCase__ : Optional[int] = self.get_dummy_components()
lowerCamelCase__ : int = self.pipeline_class(**__UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCamelCase__ : List[str] = pipe(**self.get_dummy_inputs(__UpperCAmelCase ) )
lowerCamelCase__ : str = output.images
lowerCamelCase__ : Dict = pipe(
**self.get_dummy_inputs(__UpperCAmelCase ) ,return_dict=__UpperCAmelCase ,)[0]
lowerCamelCase__ : Tuple = image[0, -3:, -3:, -1]
lowerCamelCase__ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase__ : Optional[int] = np.array(
[0.61_474_943, 0.6_073_539, 0.43_308_544, 0.5_928_269, 0.47_493_595, 0.46_755_973, 0.4_613_838, 0.45_368_797, 0.50_119_233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def lowercase_ ( self :Optional[int] ) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self :Optional[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase__ : List[str] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
lowerCamelCase__ : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
lowerCamelCase__ : Tuple = '''A red cartoon frog, 4k'''
lowerCamelCase__ : int = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' ,torch_dtype=torch.floataa )
pipe_prior.to(__UpperCAmelCase )
lowerCamelCase__ : Dict = KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' ,torch_dtype=torch.floataa )
lowerCamelCase__ : Any = pipeline.to(__UpperCAmelCase )
pipeline.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCamelCase__ : List[Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = pipe_prior(
__UpperCAmelCase ,generator=__UpperCAmelCase ,num_inference_steps=5 ,negative_prompt='''''' ,).to_tuple()
lowerCamelCase__ : int = pipeline(
__UpperCAmelCase ,image=__UpperCAmelCase ,image_embeds=__UpperCAmelCase ,negative_image_embeds=__UpperCAmelCase ,generator=__UpperCAmelCase ,num_inference_steps=1_00 ,height=7_68 ,width=7_68 ,strength=0.2 ,output_type='''np''' ,)
lowerCamelCase__ : Any = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__UpperCAmelCase ,__UpperCAmelCase )
| 121
|
"""simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def __a ( _lowercase , _lowercase , _lowercase=None ):
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, f"""{torch_layer} layer.weight does not match"""
lowerCamelCase__ : Any = nn.Parameter(_lowercase )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f"""{torch_layer} layer.bias does not match"""
lowerCamelCase__ : Optional[int] = nn.Parameter(_lowercase )
def __a ( _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
lowerCamelCase__ : int = np.asarray(weights[0] )
lowerCamelCase__ : List[Any] = np.asarray(weights[1] )
lowerCamelCase__ : Union[str, Any] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(_lowercase ).transpose(1 , 2 ).contiguous().view(-1 , _lowercase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(_lowercase ).transpose(1 , 2 ).contiguous().view(-1 , _lowercase ) , )
set_param(
torch_layer.output.dense , torch.tensor(_lowercase ).view(-1 , _lowercase ).contiguous().transpose(0 , 1 ) , )
def __a ( _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = np.asarray(weights[0] )
lowerCamelCase__ : str = np.asarray(weights[1] )
lowerCamelCase__ : str = np.asarray(weights[2] )
lowerCamelCase__ : Optional[Any] = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(_lowercase ).transpose(1 , 2 ).contiguous().view(-1 , _lowercase ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(_lowercase ).transpose(1 , 2 ).contiguous().view(-1 , _lowercase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(_lowercase ).transpose(1 , 2 ).contiguous().view(-1 , _lowercase ) , )
set_param(
torch_layer.output.dense , torch.tensor(_lowercase ).view(-1 , _lowercase ).contiguous().transpose(0 , 1 ) , )
def __a ( _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = weights[0][0][0]
lowerCamelCase__ : List[Any] = np.asarray(layer_norm_a[0] )
lowerCamelCase__ : Any = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(_lowercase ) , torch.tensor(_lowercase ) , )
# lsh weights + output
lowerCamelCase__ : Any = weights[0][1]
if len(_lowercase ) < 4:
set_layer_weights_in_torch_lsh(_lowercase , torch_block.attention , _lowercase )
else:
set_layer_weights_in_torch_local(_lowercase , torch_block.attention , _lowercase )
# intermediate weighs
lowerCamelCase__ : Optional[int] = weights[2][0][1][2]
# Chunked Feed Forward
if len(_lowercase ) == 4:
lowerCamelCase__ : Tuple = intermediate_weights[2]
# layernorm 2
lowerCamelCase__ : Optional[Any] = np.asarray(intermediate_weights[0][0] )
lowerCamelCase__ : Dict = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(_lowercase ) , torch.tensor(_lowercase ) , )
# intermediate dense
lowerCamelCase__ : List[Any] = np.asarray(intermediate_weights[1][0] )
lowerCamelCase__ : Dict = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(_lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(_lowercase ) , )
# intermediate out
lowerCamelCase__ : Union[str, Any] = np.asarray(intermediate_weights[4][0] )
lowerCamelCase__ : Optional[Any] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(_lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(_lowercase ) , )
def __a ( _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
lowerCamelCase__ : int = torch_model.reformer
# word embeds
lowerCamelCase__ : Union[str, Any] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(_lowercase ) , )
if isinstance(weights[3] , _lowercase ):
lowerCamelCase__ : List[Any] = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
lowerCamelCase__ : Tuple = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f"""{position_embeddings[emb_idx]} emb does not match"""
lowerCamelCase__ : List[Any] = nn.Parameter(torch.tensor(_lowercase ) )
lowerCamelCase__ : Optional[int] = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
_lowercase ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
lowerCamelCase__ : Optional[int] = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(_lowercase , _lowercase , _lowercase )
# output layer norm
lowerCamelCase__ : Optional[int] = np.asarray(weights[7][0] )
lowerCamelCase__ : List[str] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(_lowercase ) , torch.tensor(_lowercase ) , )
# output embeddings
lowerCamelCase__ : Any = np.asarray(weights[9][0] )
lowerCamelCase__ : Tuple = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(_lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(_lowercase ) , )
def __a ( _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = ReformerConfig.from_json_file(_lowercase )
print(f"""Building PyTorch model from configuration: {config}""" )
lowerCamelCase__ : List[Any] = ReformerModelWithLMHead(_lowercase )
with open(_lowercase , '''rb''' ) as f:
lowerCamelCase__ : Optional[Any] = pickle.load(_lowercase )['''weights''']
set_model_weights_in_torch(_lowercase , _lowercase , config.hidden_size )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _lowercase )
if __name__ == "__main__":
UpperCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained Reformer model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase : List[str] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 121
| 1
|
from __future__ import annotations
def _UpperCAmelCase ( A , A ):
'''simple docstring'''
if len(A ) < k or k < 0:
raise ValueError("Invalid Input" )
UpperCAmelCase__ =UpperCAmelCase__ =sum(array[:k] )
for i in range(len(A ) - k ):
UpperCAmelCase__ =current_sum - array[i] + array[i + k]
UpperCAmelCase__ =max(A , A )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
UpperCamelCase_ = [randint(-10_00, 10_00) for i in range(1_00)]
UpperCamelCase_ = randint(0, 1_10)
print(f"""The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}""")
| 625
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase_ = {
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['OwlViTFeatureExtractor']
UpperCamelCase_ = ['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 625
| 1
|
"""simple docstring"""
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def __magic_name__ ( __snake_case : int="" ) -> str:
lowercase : int = tempfile.mkdtemp()
return os.path.join(lowerCAmelCase__ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class a__ ( unittest.TestCase ):
def __magic_name__ ( self ):
lowercase : str = torch.rand(12 , dtype=torch.floataa ) - 0.5
lowercase : str = AgentAudio(_UpperCAmelCase )
lowercase : str = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(_UpperCAmelCase , agent_type.to_raw() , atol=1E-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(_UpperCAmelCase ) )
# Ensure that the file contains the same value as the original tensor
lowercase , lowercase : List[Any] = sf.read(_UpperCAmelCase )
self.assertTrue(torch.allclose(_UpperCAmelCase , torch.tensor(_UpperCAmelCase ) , atol=1E-4 ) )
def __magic_name__ ( self ):
lowercase : str = torch.rand(12 , dtype=torch.floataa ) - 0.5
lowercase : str = get_new_path(suffix=".wav" )
sf.write(_UpperCAmelCase , _UpperCAmelCase , 16_000 )
lowercase : Optional[Any] = AgentAudio(_UpperCAmelCase )
self.assertTrue(torch.allclose(_UpperCAmelCase , agent_type.to_raw() , atol=1E-4 ) )
self.assertEqual(agent_type.to_string() , _UpperCAmelCase )
@require_vision
@require_torch
class a__ ( unittest.TestCase ):
def __magic_name__ ( self ):
lowercase : List[str] = torch.randint(0 , 256 , (64, 64, 3) )
lowercase : Any = AgentImage(_UpperCAmelCase )
lowercase : int = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(_UpperCAmelCase , agent_type._tensor , atol=1E-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(_UpperCAmelCase ) )
def __magic_name__ ( self ):
lowercase : Tuple = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png"
lowercase : List[str] = Image.open(_UpperCAmelCase )
lowercase : Tuple = AgentImage(_UpperCAmelCase )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(_UpperCAmelCase ) )
def __magic_name__ ( self ):
lowercase : Optional[Any] = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png"
lowercase : int = Image.open(_UpperCAmelCase )
lowercase : Any = AgentImage(_UpperCAmelCase )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(_UpperCAmelCase ) )
class a__ ( unittest.TestCase ):
def __magic_name__ ( self ):
lowercase : Optional[int] = "Hey!"
lowercase : Optional[Any] = AgentText(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , agent_type.to_string() )
self.assertEqual(_UpperCAmelCase , agent_type.to_raw() )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
| 714
|
"""simple docstring"""
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"kwargs, expected" , [
({"num_shards": 0, "max_num_jobs": 1}, []),
({"num_shards": 10, "max_num_jobs": 1}, [range(10 )]),
({"num_shards": 10, "max_num_jobs": 10}, [range(__snake_case , i + 1 ) for i in range(10 )]),
({"num_shards": 1, "max_num_jobs": 10}, [range(1 )]),
({"num_shards": 10, "max_num_jobs": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"num_shards": 3, "max_num_jobs": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def __magic_name__ ( __snake_case : Optional[Any] , __snake_case : Optional[int] ) -> Tuple:
lowercase : int = _distribute_shards(**__snake_case )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, max_num_jobs, expected" , [
({"foo": 0}, 10, [{"foo": 0}]),
({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]),
({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]),
({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]),
({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]),
] , )
def __magic_name__ ( __snake_case : List[str] , __snake_case : Any , __snake_case : Optional[int] ) -> Optional[Any]:
lowercase : Any = _split_gen_kwargs(__snake_case , __snake_case )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, expected" , [
({"foo": 0}, 1),
({"shards": [0]}, 1),
({"shards": [0, 1, 2, 3]}, 4),
({"shards": [0, 1, 2, 3], "foo": 0}, 4),
({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4),
({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError),
] , )
def __magic_name__ ( __snake_case : str , __snake_case : Union[str, Any] ) -> List[str]:
if expected is RuntimeError:
with pytest.raises(__snake_case ):
_number_of_shards_in_gen_kwargs(__snake_case )
else:
lowercase : Any = _number_of_shards_in_gen_kwargs(__snake_case )
assert out == expected
| 518
| 0
|
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase__ :
def __init__( self : Tuple , _lowercase : List[str] , _lowercase : Any=13 , _lowercase : int=32 , _lowercase : Optional[Any]=2 , _lowercase : Optional[Any]=3 , _lowercase : Dict=16 , _lowercase : List[Any]=[1, 2, 1] , _lowercase : Dict=[2, 2, 4] , _lowercase : Dict=2 , _lowercase : Optional[int]=2.0 , _lowercase : Tuple=True , _lowercase : int=0.0 , _lowercase : int=0.0 , _lowercase : Optional[Any]=0.1 , _lowercase : List[Any]="gelu" , _lowercase : List[str]=False , _lowercase : int=True , _lowercase : Optional[int]=0.0_2 , _lowercase : Dict=1e-5 , _lowercase : int=True , _lowercase : Union[str, Any]=None , _lowercase : List[str]=True , _lowercase : int=10 , _lowercase : Any=8 , ):
A = parent
A = batch_size
A = image_size
A = patch_size
A = num_channels
A = embed_dim
A = depths
A = num_heads
A = window_size
A = mlp_ratio
A = qkv_bias
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = drop_path_rate
A = hidden_act
A = use_absolute_embeddings
A = patch_norm
A = layer_norm_eps
A = initializer_range
A = is_training
A = scope
A = use_labels
A = type_sequence_label_size
A = encoder_stride
def __a ( self : Optional[int] ):
A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A = self.get_config()
return config, pixel_values, labels
def __a ( self : List[Any] ):
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __a ( self : List[str] , _lowercase : Optional[Any] , _lowercase : str , _lowercase : Dict ):
A = SwinvaModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
A = model(lowercase__ )
A = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
A = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __a ( self : List[Any] , _lowercase : List[str] , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] ):
A = SwinvaForMaskedImageModeling(config=lowercase__ )
model.to(lowercase__ )
model.eval()
A = model(lowercase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
A = 1
A = SwinvaForMaskedImageModeling(lowercase__ )
model.to(lowercase__ )
model.eval()
A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __a ( self : Optional[Any] , _lowercase : Any , _lowercase : List[Any] , _lowercase : Optional[Any] ):
A = self.type_sequence_label_size
A = SwinvaForImageClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
A = model(lowercase__ , labels=lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self : Any ):
A = self.prepare_config_and_inputs()
A = config_and_inputs
A = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( snake_case_ , snake_case_ , unittest.TestCase ):
lowerCAmelCase = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
lowerCAmelCase = (
{'''feature-extraction''': SwinvaModel, '''image-classification''': SwinvaForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def __a ( self : str ):
A = SwinvaModelTester(self )
A = ConfigTester(self , config_class=lowercase__ , embed_dim=37 )
def __a ( self : Union[str, Any] ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __a ( self : Union[str, Any] ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
@unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.' )
def __a ( self : Any ):
pass
@unittest.skip(reason='Swinv2 does not use inputs_embeds' )
def __a ( self : Any ):
pass
def __a ( self : Optional[Any] ):
A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(lowercase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase__ , nn.Linear ) )
def __a ( self : int ):
A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(lowercase__ )
A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase__ )
def __a ( self : str ):
A = self.model_tester.prepare_config_and_inputs_for_common()
A = True
for model_class in self.all_model_classes:
A = True
A = False
A = True
A = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
A = model(**self._prepare_for_class(lowercase__ , lowercase__ ) )
A = outputs.attentions
A = len(self.model_tester.depths )
self.assertEqual(len(lowercase__ ) , lowercase__ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A = True
A = config.window_size**2
A = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
A = model(**self._prepare_for_class(lowercase__ , lowercase__ ) )
A = outputs.attentions
self.assertEqual(len(lowercase__ ) , lowercase__ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
A = len(lowercase__ )
# Check attention is always last and order is fine
A = True
A = True
A = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
A = model(**self._prepare_for_class(lowercase__ , lowercase__ ) )
if hasattr(self.model_tester , 'num_hidden_states_types' ):
A = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
A = 2
self.assertEqual(out_len + added_hidden_states , len(lowercase__ ) )
A = outputs.attentions
self.assertEqual(len(lowercase__ ) , lowercase__ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def __a ( self : str , _lowercase : List[Any] , _lowercase : str , _lowercase : Optional[Any] , _lowercase : int ):
A = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
A = model(**self._prepare_for_class(lowercase__ , lowercase__ ) )
A = outputs.hidden_states
A = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowercase__ ) , lowercase__ )
# Swinv2 has a different seq_length
A = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
A = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
A = outputs.reshaped_hidden_states
self.assertEqual(len(lowercase__ ) , lowercase__ )
A = reshaped_hidden_states[0].shape
A = (
reshaped_hidden_states[0].view(lowercase__ , lowercase__ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __a ( self : Union[str, Any] ):
A = self.model_tester.prepare_config_and_inputs_for_common()
A = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
A = True
self.check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A = True
self.check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def __a ( self : Tuple ):
A = self.model_tester.prepare_config_and_inputs_for_common()
A = 3
A = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
A = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
A = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
A = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
A = True
self.check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A = True
self.check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ , (padded_height, padded_width) )
def __a ( self : str ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowercase__ )
def __a ( self : Any ):
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase__ )
@slow
def __a ( self : Any ):
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = SwinvaModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def __a ( self : Any ):
A = self.model_tester.prepare_config_and_inputs_for_common()
A = _config_zero_init(lowercase__ )
for model_class in self.all_model_classes:
A = model_class(config=lowercase__ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
@cached_property
def __a ( self : str ):
return (
AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' )
if is_vision_available()
else None
)
@slow
def __a ( self : Tuple ):
A = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ).to(
lowercase__ )
A = self.default_image_processor
A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
A = image_processor(images=lowercase__ , return_tensors='pt' ).to(lowercase__ )
# forward pass
with torch.no_grad():
A = model(**lowercase__ )
# verify the logits
A = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowercase__ )
A = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(lowercase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1e-4 ) )
| 690
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase_ : Dict = {
'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : int = [
'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'GraphormerForGraphClassification',
'GraphormerModel',
'GraphormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 442
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ : Optional[Any] = {
"""configuration_pix2struct""": [
"""PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Pix2StructConfig""",
"""Pix2StructTextConfig""",
"""Pix2StructVisionConfig""",
],
"""processing_pix2struct""": ["""Pix2StructProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Any = ["""Pix2StructImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[Any] = [
"""PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Pix2StructPreTrainedModel""",
"""Pix2StructForConditionalGeneration""",
"""Pix2StructVisionModel""",
"""Pix2StructTextModel""",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
lowerCamelCase__ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 208
|
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:Any , _a:Tuple ):
return F"""gaussian_noise_s={seed}_shape={'_'.join([str(_a ) for s in shape] )}.npy"""
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:Tuple=0 , _a:Dict=(4, 4, 64, 64) , _a:List[Any]=False ):
snake_case__ = jnp.bfloataa if fpaa else jnp.floataa
snake_case__ = jnp.array(load_hf_numpy(self.get_file_format(_a , _a ) ) , dtype=_a )
return image
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:List[Any]=False , _a:str="CompVis/stable-diffusion-v1-4" ):
snake_case__ = jnp.bfloataa if fpaa else jnp.floataa
snake_case__ = '''bf16''' if fpaa else None
snake_case__ , snake_case__ = FlaxUNetaDConditionModel.from_pretrained(
_a , subfolder='''unet''' , dtype=_a , revision=_a )
return model, params
def SCREAMING_SNAKE_CASE__ ( self:Any , _a:Optional[int]=0 , _a:Tuple=(4, 77, 7_68) , _a:int=False ):
snake_case__ = jnp.bfloataa if fpaa else jnp.floataa
snake_case__ = jnp.array(load_hf_numpy(self.get_file_format(_a , _a ) ) , dtype=_a )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 10_00, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE__ ( self:int , _a:Tuple , _a:Optional[Any] , _a:Optional[int] ):
snake_case__ , snake_case__ = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=_a )
snake_case__ = self.get_latents(_a , fpaa=_a )
snake_case__ = self.get_encoder_hidden_states(_a , fpaa=_a )
snake_case__ = model.apply(
{'''params''': params} , _a , jnp.array(_a , dtype=jnp.intaa ) , encoder_hidden_states=_a , ).sample
assert sample.shape == latents.shape
snake_case__ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
snake_case__ = jnp.array(_a , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(_a , _a , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 10_00, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE__ ( self:Tuple , _a:Optional[int] , _a:Tuple , _a:str ):
snake_case__ , snake_case__ = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=_a )
snake_case__ = self.get_latents(_a , shape=(4, 4, 96, 96) , fpaa=_a )
snake_case__ = self.get_encoder_hidden_states(_a , shape=(4, 77, 10_24) , fpaa=_a )
snake_case__ = model.apply(
{'''params''': params} , _a , jnp.array(_a , dtype=jnp.intaa ) , encoder_hidden_states=_a , ).sample
assert sample.shape == latents.shape
snake_case__ = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
snake_case__ = jnp.array(_a , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(_a , _a , atol=1e-2 )
| 208
| 1
|
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCamelCase__ ( unittest.TestCase ):
@property
def lowerCAmelCase (self : int ):
torch.manual_seed(0 )
__a : List[str] = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def lowerCAmelCase (self : str ):
__a : int = self.dummy_uncond_unet
__a : Any = ScoreSdeVeScheduler()
__a : Dict = ScoreSdeVePipeline(unet=snake_case_ , scheduler=snake_case_ )
sde_ve.to(snake_case_ )
sde_ve.set_progress_bar_config(disable=snake_case_ )
__a : List[str] = torch.manual_seed(0 )
__a : int = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=snake_case_ ).images
__a : Any = torch.manual_seed(0 )
__a : Any = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=snake_case_ , return_dict=snake_case_ )[
0
]
__a : Dict = image[0, -3:, -3:, -1]
__a : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__a : List[str] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
def lowerCAmelCase (self : str ):
__a : Dict = '''google/ncsnpp-church-256'''
__a : int = UNetaDModel.from_pretrained(snake_case_ )
__a : Any = ScoreSdeVeScheduler.from_pretrained(snake_case_ )
__a : Tuple = ScoreSdeVePipeline(unet=snake_case_ , scheduler=snake_case_ )
sde_ve.to(snake_case_ )
sde_ve.set_progress_bar_config(disable=snake_case_ )
__a : Any = torch.manual_seed(0 )
__a : str = sde_ve(num_inference_steps=1_0 , output_type='''numpy''' , generator=snake_case_ ).images
__a : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
__a : Dict = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 521
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def __UpperCamelCase ( lowerCAmelCase__ : List[str] ):
__a : Any = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class UpperCamelCase__ ( __lowercase ,__lowercase ,__lowercase ,unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Any = StableDiffusionLatentUpscalePipeline
_SCREAMING_SNAKE_CASE : Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"height",
"width",
"cross_attention_kwargs",
"negative_prompt_embeds",
"prompt_embeds",
}
_SCREAMING_SNAKE_CASE : Union[str, Any] = PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"}
_SCREAMING_SNAKE_CASE : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_SCREAMING_SNAKE_CASE : Union[str, Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_SCREAMING_SNAKE_CASE : Optional[int] = frozenset([] )
_SCREAMING_SNAKE_CASE : Optional[int] = True
@property
def lowerCAmelCase (self : Optional[int] ):
__a : Union[str, Any] = 1
__a : Dict = 4
__a : int = (1_6, 1_6)
__a : Tuple = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(snake_case_ )
return image
def lowerCAmelCase (self : int ):
torch.manual_seed(0 )
__a : Dict = UNetaDConditionModel(
act_fn='''gelu''' , attention_head_dim=8 , norm_num_groups=snake_case_ , block_out_channels=[3_2, 3_2, 6_4, 6_4] , time_cond_proj_dim=1_6_0 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=3_2 , down_block_types=(
'''KDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
) , in_channels=8 , mid_block_type=snake_case_ , only_cross_attention=snake_case_ , out_channels=5 , resnet_time_scale_shift='''scale_shift''' , time_embedding_type='''fourier''' , timestep_post_act='''gelu''' , up_block_types=('''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KUpBlock2D''') , )
__a : Optional[int] = AutoencoderKL(
block_out_channels=[3_2, 3_2, 6_4, 6_4] , in_channels=3 , out_channels=3 , down_block_types=[
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
__a : Dict = EulerDiscreteScheduler(prediction_type='''sample''' )
__a : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''quick_gelu''' , projection_dim=5_1_2 , )
__a : int = CLIPTextModel(snake_case_ )
__a : Any = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__a : Tuple = {
'''unet''': model.eval(),
'''vae''': vae.eval(),
'''scheduler''': scheduler,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def lowerCAmelCase (self : List[str] , snake_case_ : Tuple , snake_case_ : List[Any]=0 ):
if str(snake_case_ ).startswith('''mps''' ):
__a : Any = torch.manual_seed(snake_case_ )
else:
__a : Optional[Any] = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
__a : Tuple = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': self.dummy_image.cpu(),
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase (self : Tuple ):
__a : Optional[int] = '''cpu'''
__a : Union[str, Any] = self.get_dummy_components()
__a : Any = self.pipeline_class(**snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
__a : int = self.get_dummy_inputs(snake_case_ )
__a : Dict = pipe(**snake_case_ ).images
__a : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_5_6, 2_5_6, 3) )
__a : List[str] = np.array(
[0.4722_2412, 0.4192_1633, 0.4471_7434, 0.4687_4192, 0.4258_8258, 0.4615_0726, 0.467_7534, 0.4558_3832, 0.4857_9055] )
__a : int = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(snake_case_ , 1E-3 )
def lowerCAmelCase (self : Tuple ):
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def lowerCAmelCase (self : Any ):
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def lowerCAmelCase (self : Optional[Any] ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def lowerCAmelCase (self : Optional[Any] ):
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def lowerCAmelCase (self : List[str] ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def lowerCAmelCase (self : Tuple ):
super().test_save_load_local(expected_max_difference=3E-3 )
def lowerCAmelCase (self : Optional[int] ):
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def lowerCAmelCase (self : Union[str, Any] ):
__a : List[Any] = [
'''DDIMScheduler''',
'''DDPMScheduler''',
'''PNDMScheduler''',
'''HeunDiscreteScheduler''',
'''EulerAncestralDiscreteScheduler''',
'''KDPM2DiscreteScheduler''',
'''KDPM2AncestralDiscreteScheduler''',
'''DPMSolverSDEScheduler''',
]
__a : List[str] = self.get_dummy_components()
__a : List[Any] = self.pipeline_class(**snake_case_ )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
__a : Optional[Any] = self.get_dummy_inputs(snake_case_ )
__a : List[Any] = 2
__a : str = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
__a : Union[str, Any] = getattr(snake_case_ , scheduler_enum.name )
__a : Any = scheduler_cls.from_config(pipe.scheduler.config )
__a : Any = pipe(**snake_case_ )[0]
outputs.append(snake_case_ )
assert check_same_shape(snake_case_ )
@require_torch_gpu
@slow
class UpperCamelCase__ ( unittest.TestCase ):
def lowerCAmelCase (self : Any ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase (self : Union[str, Any] ):
__a : Union[str, Any] = torch.manual_seed(3_3 )
__a : List[Any] = StableDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
__a : str = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
__a : int = '''a photo of an astronaut high resolution, unreal engine, ultra realistic'''
__a : Dict = pipe(snake_case_ , generator=snake_case_ , output_type='''latent''' ).images
__a : Any = upscaler(
prompt=snake_case_ , image=snake_case_ , num_inference_steps=2_0 , guidance_scale=0 , generator=snake_case_ , output_type='''np''' , ).images[0]
__a : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy''' )
assert np.abs((expected_image - image).mean() ) < 5E-2
def lowerCAmelCase (self : List[Any] ):
__a : int = torch.manual_seed(3_3 )
__a : Union[str, Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
__a : Optional[int] = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'''
__a : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png''' )
__a : Any = upscaler(
prompt=snake_case_ , image=snake_case_ , num_inference_steps=2_0 , guidance_scale=0 , generator=snake_case_ , output_type='''np''' , ).images[0]
__a : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy''' )
assert np.abs((expected_image - image).max() ) < 5E-2
| 521
| 1
|
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def __a ( _lowercase , _lowercase = "cpu" , _lowercase = None ):
"""simple docstring"""
lowerCamelCase__ : str = torch.load(_lowercase , map_location=_lowercase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(_lowercase , torch.Tensor ):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' )
lowerCamelCase__ : Optional[Any] = v.half()
if save_path is None: # overwrite src_path
lowerCamelCase__ : Optional[Any] = src_path
torch.save(_lowercase , _lowercase )
if __name__ == "__main__":
fire.Fire(convert)
| 121
|
"""simple docstring"""
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __SCREAMING_SNAKE_CASE :
def __init__( self :Tuple ,__UpperCAmelCase :str ,__UpperCAmelCase :Tuple=3 ,__UpperCAmelCase :Optional[int]=32 ,__UpperCAmelCase :Optional[int]=3 ,__UpperCAmelCase :Any=10 ,__UpperCAmelCase :str=[8, 16, 32, 64] ,__UpperCAmelCase :str=[1, 1, 2, 1] ,__UpperCAmelCase :List[Any]=True ,__UpperCAmelCase :List[Any]=True ,__UpperCAmelCase :Optional[Any]="relu" ,__UpperCAmelCase :Any=3 ,__UpperCAmelCase :str=None ,__UpperCAmelCase :Union[str, Any]=["stage2", "stage3", "stage4"] ,__UpperCAmelCase :Union[str, Any]=[2, 3, 4] ,__UpperCAmelCase :Union[str, Any]=1 ,) -> List[str]:
"""simple docstring"""
lowerCamelCase__ : List[str] = parent
lowerCamelCase__ : List[Any] = batch_size
lowerCamelCase__ : Optional[int] = image_size
lowerCamelCase__ : Any = num_channels
lowerCamelCase__ : Any = embeddings_size
lowerCamelCase__ : Tuple = hidden_sizes
lowerCamelCase__ : Optional[int] = depths
lowerCamelCase__ : List[str] = is_training
lowerCamelCase__ : Tuple = use_labels
lowerCamelCase__ : Dict = hidden_act
lowerCamelCase__ : List[str] = num_labels
lowerCamelCase__ : Tuple = scope
lowerCamelCase__ : List[Any] = len(__UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = out_features
lowerCamelCase__ : List[str] = out_indices
lowerCamelCase__ : List[Any] = num_groups
def lowercase_ ( self :Optional[Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[Any] = None
if self.use_labels:
lowerCamelCase__ : Dict = ids_tensor([self.batch_size] ,self.num_labels )
lowerCamelCase__ : List[str] = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self :str ) -> Any:
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,out_features=self.out_features ,out_indices=self.out_indices ,num_groups=self.num_groups ,)
def lowercase_ ( self :List[Any] ,__UpperCAmelCase :Optional[int] ,__UpperCAmelCase :Tuple ,__UpperCAmelCase :Tuple ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__ : str = BitModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCamelCase__ : Optional[Any] = model(__UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def lowercase_ ( self :int ,__UpperCAmelCase :int ,__UpperCAmelCase :int ,__UpperCAmelCase :Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ : List[Any] = self.num_labels
lowerCamelCase__ : Union[str, Any] = BitForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCamelCase__ : str = model(__UpperCAmelCase ,labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def lowercase_ ( self :Any ,__UpperCAmelCase :Dict ,__UpperCAmelCase :Optional[int] ,__UpperCAmelCase :str ) -> str:
"""simple docstring"""
lowerCamelCase__ : int = BitBackbone(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCamelCase__ : List[str] = model(__UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCamelCase__ : str = None
lowerCamelCase__ : Optional[int] = BitBackbone(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCamelCase__ : List[Any] = model(__UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def lowercase_ ( self :int ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ : Optional[int] = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = config_and_inputs
lowerCamelCase__ : Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
UpperCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
UpperCAmelCase = (
{'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def lowercase_ ( self :List[str] ) -> Any:
"""simple docstring"""
lowerCamelCase__ : Any = BitModelTester(self )
lowerCamelCase__ : Any = ConfigTester(self ,config_class=__UpperCAmelCase ,has_text_modality=__UpperCAmelCase )
def lowercase_ ( self :Optional[Any] ) -> Dict:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_ ( self :List[Any] ) -> Dict:
"""simple docstring"""
return
@unittest.skip(reason='''Bit does not output attentions''' )
def lowercase_ ( self :List[Any] ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def lowercase_ ( self :List[Any] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def lowercase_ ( self :Tuple ) -> List[Any]:
"""simple docstring"""
pass
def lowercase_ ( self :Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] = model_class(__UpperCAmelCase )
lowerCamelCase__ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : int = [*signature.parameters.keys()]
lowerCamelCase__ : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,__UpperCAmelCase )
def lowercase_ ( self :str ) -> List[str]:
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowercase_ ( self :Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__UpperCAmelCase )
def lowercase_ ( self :List[str] ) -> List[str]:
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Any = model_class(config=__UpperCAmelCase )
for name, module in model.named_modules():
if isinstance(__UpperCAmelCase ,(nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) ,msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
self.assertTrue(
torch.all(module.bias == 0 ) ,msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
def lowercase_ ( self :List[str] ) -> Dict:
"""simple docstring"""
def check_hidden_states_output(__UpperCAmelCase :Tuple ,__UpperCAmelCase :str ,__UpperCAmelCase :Tuple ):
lowerCamelCase__ : List[str] = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Optional[Any] = model(**self._prepare_for_class(__UpperCAmelCase ,__UpperCAmelCase ) )
lowerCamelCase__ : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase__ : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(__UpperCAmelCase ) ,expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
lowerCamelCase__ , lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Any = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCamelCase__ : List[Any] = layer_type
lowerCamelCase__ : Tuple = True
check_hidden_states_output(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : Optional[Any] = True
check_hidden_states_output(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def lowercase_ ( self :Optional[int] ) -> Optional[int]:
"""simple docstring"""
pass
def lowercase_ ( self :Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
@slow
def lowercase_ ( self :Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Optional[int] = BitModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def __a ( ):
"""simple docstring"""
lowerCamelCase__ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def lowercase_ ( self :Tuple ) -> Dict:
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def lowercase_ ( self :List[Any] ) -> int:
"""simple docstring"""
lowerCamelCase__ : Optional[int] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__UpperCAmelCase )
lowerCamelCase__ : Any = self.default_image_processor
lowerCamelCase__ : Optional[int] = prepare_img()
lowerCamelCase__ : List[Any] = image_processor(images=__UpperCAmelCase ,return_tensors='''pt''' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
lowerCamelCase__ : Union[str, Any] = model(**__UpperCAmelCase )
# verify the logits
lowerCamelCase__ : int = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape ,__UpperCAmelCase )
lowerCamelCase__ : int = torch.tensor([[-0.6_526, -0.5_263, -1.4_398]] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__UpperCAmelCase ,atol=1E-4 ) )
@require_torch
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase , unittest.TestCase ):
UpperCAmelCase = (BitBackbone,) if is_torch_available() else ()
UpperCAmelCase = BitConfig
UpperCAmelCase = False
def lowercase_ ( self :List[str] ) -> int:
"""simple docstring"""
lowerCamelCase__ : Any = BitModelTester(self )
| 121
| 1
|
"""simple docstring"""
from maths.prime_factors import prime_factors
def lowerCamelCase_ (UpperCamelCase__ : int ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_UpperCAmelCase : str = F'Input value of [number={number}] must be an integer'
raise TypeError(UpperCamelCase__ )
if number < 1:
raise ValueError('''Input must be a positive integer''' )
return -1 if len(prime_factors(UpperCamelCase__ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 506
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
_lowerCAmelCase :Tuple = logging.get_logger(__name__)
_lowerCAmelCase :Union[str, Any] = {
'Intel/dpt-large': 'https://huggingface.co/Intel/dpt-large/resolve/main/config.json',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''dpt'''
def __init__( self , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.0 , A=0.0 , A=0.02 , A=1E-12 , A=3_8_4 , A=1_6 , A=3 , A=False , A=True , A=[2, 5, 8, 1_1] , A="project" , A=[4, 2, 1, 0.5] , A=[9_6, 1_9_2, 3_8_4, 7_6_8] , A=2_5_6 , A=-1 , A=False , A=True , A=0.4 , A=2_5_5 , A=0.1 , A=[1, 1_0_2_4, 2_4, 2_4] , A=[0, 1] , A=None , **A , ) -> int:
super().__init__(**A )
_UpperCAmelCase : Union[str, Any] = hidden_size
_UpperCAmelCase : Union[str, Any] = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('''Initializing the config with a `BiT` backbone.''' )
_UpperCAmelCase : List[Any] = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
}
_UpperCAmelCase : int = BitConfig(**A )
elif isinstance(A , A ):
logger.info('''Initializing the config with a `BiT` backbone.''' )
_UpperCAmelCase : Union[str, Any] = BitConfig(**A )
elif isinstance(A , A ):
_UpperCAmelCase : Tuple = backbone_config
else:
raise ValueError(
f'backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.' )
_UpperCAmelCase : int = backbone_featmap_shape
_UpperCAmelCase : Dict = neck_ignore_stages
if readout_type != "project":
raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''' )
else:
_UpperCAmelCase : Tuple = None
_UpperCAmelCase : Any = None
_UpperCAmelCase : Any = []
_UpperCAmelCase : str = num_hidden_layers
_UpperCAmelCase : Any = num_attention_heads
_UpperCAmelCase : Dict = intermediate_size
_UpperCAmelCase : Optional[Any] = hidden_act
_UpperCAmelCase : List[str] = hidden_dropout_prob
_UpperCAmelCase : List[Any] = attention_probs_dropout_prob
_UpperCAmelCase : Union[str, Any] = initializer_range
_UpperCAmelCase : int = layer_norm_eps
_UpperCAmelCase : int = image_size
_UpperCAmelCase : int = patch_size
_UpperCAmelCase : str = num_channels
_UpperCAmelCase : Tuple = qkv_bias
_UpperCAmelCase : str = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''' )
_UpperCAmelCase : List[Any] = readout_type
_UpperCAmelCase : int = reassemble_factors
_UpperCAmelCase : int = neck_hidden_sizes
_UpperCAmelCase : Tuple = fusion_hidden_size
_UpperCAmelCase : Any = head_in_index
_UpperCAmelCase : Optional[int] = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
_UpperCAmelCase : List[str] = use_auxiliary_head
_UpperCAmelCase : int = auxiliary_loss_weight
_UpperCAmelCase : Any = semantic_loss_ignore_index
_UpperCAmelCase : List[Any] = semantic_classifier_dropout
def __lowerCAmelCase ( self ) -> str:
_UpperCAmelCase : Optional[Any] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
_UpperCAmelCase : Any = self.backbone_config.to_dict()
_UpperCAmelCase : List[str] = self.__class__.model_type
return output
| 506
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Union[str, Any] =logging.get_logger(__name__)
_UpperCAmelCase : Dict ={
"""tanreinama/GPTSAN-2.8B-spout_is_uniform""": (
"""https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"""
),
}
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = """gptsan-japanese"""
SCREAMING_SNAKE_CASE__ : str = [
"""past_key_values""",
]
SCREAMING_SNAKE_CASE__ : str = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , __lowercase=3_6_0_0_0 , __lowercase=1_2_8_0 , __lowercase=1_0_2_4 , __lowercase=8_1_9_2 , __lowercase=4_0_9_6 , __lowercase=1_2_8 , __lowercase=1_0 , __lowercase=0 , __lowercase=1_6 , __lowercase=1_6 , __lowercase=1_2_8 , __lowercase=0.0 , __lowercase=1e-5 , __lowercase=False , __lowercase=0.0 , __lowercase="float32" , __lowercase=False , __lowercase=False , __lowercase=False , __lowercase=0.0_02 , __lowercase=False , __lowercase=True , __lowercase=3_5_9_9_8 , __lowercase=3_5_9_9_5 , __lowercase=3_5_9_9_9 , **__lowercase , ) -> Tuple:
lowerCAmelCase_ : str = vocab_size
lowerCAmelCase_ : str = max_position_embeddings
lowerCAmelCase_ : Optional[int] = d_model
lowerCAmelCase_ : Any = d_ff
lowerCAmelCase_ : Tuple = d_ext
lowerCAmelCase_ : List[str] = d_spout
lowerCAmelCase_ : Optional[Any] = num_switch_layers
lowerCAmelCase_ : Tuple = num_ext_layers
lowerCAmelCase_ : List[Any] = num_switch_layers + num_ext_layers
lowerCAmelCase_ : int = num_heads
lowerCAmelCase_ : str = num_experts
lowerCAmelCase_ : Optional[int] = expert_capacity
lowerCAmelCase_ : int = dropout_rate
lowerCAmelCase_ : Any = layer_norm_epsilon
lowerCAmelCase_ : Dict = router_bias
lowerCAmelCase_ : Optional[int] = router_jitter_noise
lowerCAmelCase_ : Tuple = router_dtype
lowerCAmelCase_ : Union[str, Any] = router_ignore_padding_tokens
lowerCAmelCase_ : Tuple = output_hidden_states
lowerCAmelCase_ : Optional[int] = output_attentions
lowerCAmelCase_ : int = initializer_factor
lowerCAmelCase_ : Any = output_router_logits
lowerCAmelCase_ : int = use_cache
super().__init__(
separator_token_id=__lowercase , pad_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase , )
| 619
|
def lowerCAmelCase ( lowerCAmelCase_ = 1_000_000 )-> int:
lowerCAmelCase_ : Dict = 1
lowerCAmelCase_ : List[Any] = 1
lowerCAmelCase_ : Optional[Any] = {1: 1}
for inputa in range(2 , lowerCAmelCase_ ):
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : Dict = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
lowerCAmelCase_ : Any = (3 * number) + 1
counter += 1
if inputa not in counters:
lowerCAmelCase_ : Tuple = counter
if counter > pre_counter:
lowerCAmelCase_ : Optional[int] = inputa
lowerCAmelCase_ : Union[str, Any] = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 619
| 1
|
'''simple docstring'''
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def __UpperCAmelCase ( a_: str = "isbn/0140328726" ):
_UpperCAmelCase : Optional[int] = olid.strip().strip("/" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("/" ) != 1:
_UpperCAmelCase : Optional[Any] = f"""{olid} is not a valid Open Library olid"""
raise ValueError(a_ )
return requests.get(f"""https://openlibrary.org/{new_olid}.json""" ).json()
def __UpperCAmelCase ( a_: dict ):
_UpperCAmelCase : Optional[int] = {
"title": "Title",
"publish_date": "Publish date",
"authors": "Authors",
"number_of_pages": "Number of pages:",
"first_sentence": "First sentence",
"isbn_10": "ISBN (10)",
"isbn_13": "ISBN (13)",
}
_UpperCAmelCase : List[Any] = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
_UpperCAmelCase : List[str] = [
get_openlibrary_data(author["key"] )["name"] for author in data["Authors"]
]
_UpperCAmelCase : List[str] = data["First sentence"]["value"]
for key, value in data.items():
if isinstance(a_, a_ ):
_UpperCAmelCase : Any = ", ".join(a_ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
__a = input('\nEnter the ISBN code to search (or \'quit\' to stop): ').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f'Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.')
continue
print(f'\nSearching Open Library for ISBN: {isbn}...\n')
try:
__a = summarize_book(get_openlibrary_data(f'isbn/{isbn}'))
print('\n'.join(f'{key}: {value}' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'Sorry, there are no results for ISBN: {isbn}.')
| 494
|
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( a_: str ):
return [ord(a_ ) - 96 for elem in plain]
def __UpperCAmelCase ( a_: list[int] ):
return "".join(chr(elem + 96 ) for elem in encoded )
def __UpperCAmelCase ( ):
_UpperCAmelCase : List[str] = encode(input("-> " ).strip().lower() )
print("Encoded: ", a_ )
print("Decoded:", decode(a_ ) )
if __name__ == "__main__":
main()
| 494
| 1
|
"""simple docstring"""
def A_ ( _lowerCAmelCase : int = 10_00 ):
"""simple docstring"""
return sum(2 * a * ((a - 1) // 2) for a in range(3, n + 1 ) )
if __name__ == "__main__":
print(solution())
| 710
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__snake_case = logging.get_logger(__name__)
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : Any = ['pixel_values']
def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BILINEAR , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None:
super().__init__(**__UpperCAmelCase )
_a = size if size is not None else {'''shortest_edge''': 384}
_a = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
_a = do_resize
_a = size
# Default value set here for backwards compatibility where the value in config is None
_a = crop_pct if crop_pct is not None else 224 / 256
_a = resample
_a = do_rescale
_a = rescale_factor
_a = do_normalize
_a = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_a = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray:
_a = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
_a = size['''shortest_edge''']
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
_a = int(shortest_edge / crop_pct )
_a = get_resize_output_image_size(__UpperCAmelCase , size=__UpperCAmelCase , default_to_square=__UpperCAmelCase )
_a = resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__UpperCAmelCase , size=(shortest_edge, shortest_edge) , data_format=__UpperCAmelCase , **__UpperCAmelCase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__UpperCAmelCase , size=(shortest_edge, shortest_edge) , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> Optional[int]:
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> np.ndarray:
return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ) -> PIL.Image.Image:
_a = do_resize if do_resize is not None else self.do_resize
_a = crop_pct if crop_pct is not None else self.crop_pct
_a = resample if resample is not None else self.resample
_a = do_rescale if do_rescale is not None else self.do_rescale
_a = rescale_factor if rescale_factor is not None else self.rescale_factor
_a = do_normalize if do_normalize is not None else self.do_normalize
_a = image_mean if image_mean is not None else self.image_mean
_a = image_std if image_std is not None else self.image_std
_a = size if size is not None else self.size
_a = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
_a = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError('''crop_pct must be specified if size < 384.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_a = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_resize:
_a = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , crop_pct=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images]
if do_rescale:
_a = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images]
if do_normalize:
_a = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images]
_a = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
_a = {'''pixel_values''': images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
| 285
| 0
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case_ ( __lowercase ,__lowercase ,__lowercase ,unittest.TestCase ):
__lowerCAmelCase = AltDiffusionPipeline
__lowerCAmelCase = TEXT_TO_IMAGE_PARAMS
__lowerCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
__lowerCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
__lowerCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
def snake_case_ ( self ):
torch.manual_seed(0 )
a_ : Dict = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
a_ : Any = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=a_ , set_alpha_to_one=a_ , )
torch.manual_seed(0 )
a_ : Any = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
a_ : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_2 , )
a_ : Dict = CLIPTextModel(a_ )
a_ : Optional[int] = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
a_ : str = 7_7
a_ : List[Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def snake_case_ ( self , a_ , a_=0 ):
if str(a_ ).startswith("mps" ):
a_ : int = torch.manual_seed(a_ )
else:
a_ : Optional[Any] = torch.Generator(device=a_ ).manual_seed(a_ )
a_ : List[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def snake_case_ ( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def snake_case_ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def snake_case_ ( self ):
a_ : Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator
a_ : Optional[Any] = self.get_dummy_components()
torch.manual_seed(0 )
a_ : str = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
a_ : Optional[int] = RobertaSeriesModelWithTransformation(a_ )
a_ : Optional[Any] = text_encoder
a_ : Any = AltDiffusionPipeline(**a_ )
a_ : Any = alt_pipe.to(a_ )
alt_pipe.set_progress_bar_config(disable=a_ )
a_ : int = self.get_dummy_inputs(a_ )
a_ : Union[str, Any] = """A photo of an astronaut"""
a_ : Any = alt_pipe(**a_ )
a_ : List[Any] = output.images
a_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
a_ : int = np.array(
[0.5_748_162, 0.60_447_145, 0.48_821_217, 0.50_100_636, 0.5_431_185, 0.45_763_683, 0.49_657_696, 0.48_132_733, 0.47_573_093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ ( self ):
a_ : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
a_ : Any = self.get_dummy_components()
a_ : List[Any] = PNDMScheduler(skip_prk_steps=a_ )
torch.manual_seed(0 )
a_ : int = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
a_ : Dict = RobertaSeriesModelWithTransformation(a_ )
a_ : Tuple = text_encoder
a_ : Optional[Any] = AltDiffusionPipeline(**a_ )
a_ : Optional[int] = alt_pipe.to(a_ )
alt_pipe.set_progress_bar_config(disable=a_ )
a_ : Dict = self.get_dummy_inputs(a_ )
a_ : List[Any] = alt_pipe(**a_ )
a_ : Dict = output.images
a_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
a_ : str = np.array(
[0.51_605_093, 0.5_707_241, 0.47_365_507, 0.50_578_886, 0.5_633_877, 0.4_642_503, 0.5_182_081, 0.48_763_484, 0.49_084_237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
def snake_case_ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ):
a_ : List[str] = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , safety_checker=a_ )
a_ : List[str] = alt_pipe.to(a_ )
alt_pipe.set_progress_bar_config(disable=a_ )
a_ : int = """A painting of a squirrel eating a burger"""
a_ : Tuple = torch.manual_seed(0 )
a_ : Dict = alt_pipe([prompt] , generator=a_ , guidance_scale=6.0 , num_inference_steps=2_0 , output_type="np" )
a_ : Optional[Any] = output.images
a_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a_ : str = np.array([0.1_010, 0.0_800, 0.0_794, 0.0_885, 0.0_843, 0.0_762, 0.0_769, 0.0_729, 0.0_586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ ( self ):
a_ : Union[str, Any] = DDIMScheduler.from_pretrained("BAAI/AltDiffusion" , subfolder="scheduler" )
a_ : Dict = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , scheduler=a_ , safety_checker=a_ )
a_ : List[str] = alt_pipe.to(a_ )
alt_pipe.set_progress_bar_config(disable=a_ )
a_ : Dict = """A painting of a squirrel eating a burger"""
a_ : Dict = torch.manual_seed(0 )
a_ : Any = alt_pipe([prompt] , generator=a_ , num_inference_steps=2 , output_type="numpy" )
a_ : List[Any] = output.images
a_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a_ : Union[str, Any] = np.array([0.4_019, 0.4_052, 0.3_810, 0.4_119, 0.3_916, 0.3_982, 0.4_651, 0.4_195, 0.5_323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 237
|
'''simple docstring'''
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class _A ( __lowercase ):
def lowercase__ ( self : Any ) -> str:
"""simple docstring"""
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def lowercase__ ( self : str ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(__magic_name__ )
def lowercase__ ( self : str ) -> List[Any]:
"""simple docstring"""
__snake_case : Any = self._create_example_records()
__snake_case : str = Dataset.from_list(__magic_name__ )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(__magic_name__ ):
self.assertDictEqual(__magic_name__ , example_records[i] )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case : List[Any] = self._create_example_records()
__snake_case : Dict = Dataset.from_list(__magic_name__ )
__snake_case : List[Any] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def lowercase__ ( self : str ) -> List[Any]: # checks what happens with missing columns
"""simple docstring"""
__snake_case : Union[str, Any] = [{"""col_1""": 1}, {"""col_2""": """x"""}]
__snake_case : Optional[int] = Dataset.from_list(__magic_name__ )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def lowercase__ ( self : List[str] ) -> Optional[Any]: # checks if the type can be inferred from the second record
"""simple docstring"""
__snake_case : List[Any] = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
__snake_case : int = Dataset.from_list(__magic_name__ )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def lowercase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Tuple = Dataset.from_list([] )
self.assertEqual(len(__magic_name__ ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 26
| 0
|
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : Dict = old_name
if "patch_embed" in old_name:
a__ , a__ , a__ : Union[str, Any] = old_name.split("." )
if layer == "0":
a__ : Union[str, Any] = old_name.replace("0" , "convolution1" )
elif layer == "1":
a__ : Dict = old_name.replace("1" , "batchnorm_before" )
elif layer == "3":
a__ : List[str] = old_name.replace("3" , "convolution2" )
else:
a__ : Optional[Any] = old_name.replace("4" , "batchnorm_after" )
if "network" in old_name and re.search(r"\d\.\d" , lowerCamelCase ):
a__ : List[str] = r"\b\d{2}\b"
if bool(re.search(lowerCamelCase , lowerCamelCase ) ):
a__ : Optional[int] = re.search(r"\d\.\d\d." , lowerCamelCase ).group()
else:
a__ : Any = re.search(r"\d\.\d." , lowerCamelCase ).group()
if int(match[0] ) < 6:
a__ : List[Any] = old_name.replace(lowerCamelCase , "" )
a__ : int = trimmed_name.replace("network" , match[0] + ".meta4D_layers.blocks." + match[2:-1] )
a__ : List[Any] = "intermediate_stages." + trimmed_name
else:
a__ : Union[str, Any] = old_name.replace(lowerCamelCase , "" )
if int(match[2] ) < num_meta4D_last_stage:
a__ : Optional[Any] = trimmed_name.replace("network" , "meta4D_layers.blocks." + match[2] )
else:
a__ : Union[str, Any] = str(int(match[2] ) - num_meta4D_last_stage )
a__ : str = trimmed_name.replace("network" , "meta3D_layers.blocks." + layer_index )
if "norm1" in old_name:
a__ : List[str] = trimmed_name.replace("norm1" , "layernorm1" )
elif "norm2" in old_name:
a__ : Optional[int] = trimmed_name.replace("norm2" , "layernorm2" )
elif "fc1" in old_name:
a__ : List[str] = trimmed_name.replace("fc1" , "linear_in" )
elif "fc2" in old_name:
a__ : Any = trimmed_name.replace("fc2" , "linear_out" )
a__ : Any = "last_stage." + trimmed_name
elif "network" in old_name and re.search(r".\d." , lowerCamelCase ):
a__ : List[str] = old_name.replace("network" , "intermediate_stages" )
if "fc" in new_name:
a__ : str = new_name.replace("fc" , "convolution" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
a__ : str = new_name.replace("norm1" , "batchnorm_before" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
a__ : Any = new_name.replace("norm2" , "batchnorm_after" )
if "proj" in new_name:
a__ : Optional[int] = new_name.replace("proj" , "projection" )
if "dist_head" in new_name:
a__ : Tuple = new_name.replace("dist_head" , "distillation_classifier" )
elif "head" in new_name:
a__ : Optional[int] = new_name.replace("head" , "classifier" )
elif "patch_embed" in new_name:
a__ : Tuple = "efficientformer." + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
a__ : Union[str, Any] = new_name.replace("norm" , "layernorm" )
a__ : Optional[int] = "efficientformer." + new_name
else:
a__ : List[Any] = "efficientformer.encoder." + new_name
return new_name
def _A ( lowerCamelCase , lowerCamelCase ):
for key in checkpoint.copy().keys():
a__ : Optional[Any] = checkpoint.pop(lowerCamelCase )
a__ : Dict = val
return checkpoint
def _A ( ):
a__ : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
a__ : List[Any] = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return image
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
a__ : List[str] = torch.load(lowerCamelCase , map_location="cpu" )["model"]
a__ : str = EfficientFormerConfig.from_json_file(lowerCamelCase )
a__ : int = EfficientFormerForImageClassificationWithTeacher(lowerCamelCase )
a__ : Optional[Any] = "_".join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] )
a__ : Tuple = config.depths[-1] - config.num_metaad_blocks + 1
a__ : Union[str, Any] = convert_torch_checkpoint(lowerCamelCase , lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
a__ : Dict = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
# prepare image
a__ : str = prepare_img()
a__ : Dict = 256
a__ : Union[str, Any] = 224
a__ : List[str] = EfficientFormerImageProcessor(
size={"shortest_edge": image_size} , crop_size={"height": crop_size, "width": crop_size} , resample=pillow_resamplings["bicubic"] , )
a__ : List[str] = processor(images=lowerCamelCase , return_tensors="pt" ).pixel_values
# original processing pipeline
a__ : List[str] = Compose(
[
Resize(lowerCamelCase , interpolation=pillow_resamplings["bicubic"] ),
CenterCrop(lowerCamelCase ),
ToTensor(),
Normalize(lowerCamelCase , lowerCamelCase ),
] )
a__ : List[Any] = image_transforms(lowerCamelCase ).unsqueeze(0 )
assert torch.allclose(lowerCamelCase , lowerCamelCase )
a__ : Optional[int] = model(lowerCamelCase )
a__ : Any = outputs.logits
a__ : Optional[Any] = (1, 1000)
if "l1" in model_name:
a__ : Tuple = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
a__ : int = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
a__ : Optional[Any] = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(lowerCamelCase )
print(F"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print("Pushing model to the hub..." )
model.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add model" , use_temp_dir=lowerCamelCase , )
processor.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add image processor" , use_temp_dir=lowerCamelCase , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""",
default=None,
type=str,
required=True,
help="""Path to EfficientFormer pytorch checkpoint.""",
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for EfficientFormer model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
parser.set_defaults(push_to_hub=True)
SCREAMING_SNAKE_CASE__ : Optional[int] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 629
|
from __future__ import annotations
from random import random
class __lowerCAmelCase :
def __init__( self , snake_case = None ) -> Any:
"""simple docstring"""
a__ : Optional[int] = value
a__ : Tuple = random()
a__ : Node | None = None
a__ : Node | None = None
def __repr__( self ) -> str:
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return F"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{F"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 )
def __str__( self ) -> str:
"""simple docstring"""
a__ : List[Any] = str(self.value ) + " "
a__ : List[str] = str(self.left or "" )
a__ : Tuple = str(self.right or "" )
return value + left + right
def _A ( lowerCamelCase , lowerCamelCase ):
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
a__ , a__ : Dict = split(root.left , lowerCamelCase )
return left, root
else:
a__ , a__ : int = split(root.right , lowerCamelCase )
return root, right
def _A ( lowerCamelCase , lowerCamelCase ):
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
a__ : List[Any] = merge(left.right , lowerCamelCase )
return left
else:
a__ : int = merge(lowerCamelCase , right.left )
return right
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : Any = Node(lowerCamelCase )
a__ , a__ : List[str] = split(lowerCamelCase , lowerCamelCase )
return merge(merge(lowerCamelCase , lowerCamelCase ) , lowerCamelCase )
def _A ( lowerCamelCase , lowerCamelCase ):
a__ , a__ : Optional[int] = split(lowerCamelCase , value - 1 )
a__ , a__ : Tuple = split(lowerCamelCase , lowerCamelCase )
return merge(lowerCamelCase , lowerCamelCase )
def _A ( lowerCamelCase ):
if not root: # None
return
else:
inorder(root.left )
print(root.value , end="," )
inorder(root.right )
def _A ( lowerCamelCase , lowerCamelCase ):
for arg in args.split():
if arg[0] == "+":
a__ : int = insert(lowerCamelCase , int(arg[1:] ) )
elif arg[0] == "-":
a__ : Union[str, Any] = erase(lowerCamelCase , int(arg[1:] ) )
else:
print("Unknown command" )
return root
def _A ( ):
a__ : List[str] = None
print(
"enter numbers to create a tree, + value to add value into treap, "
"- value to erase all nodes with value. 'q' to quit. " )
a__ : int = input()
while args != "q":
a__ : Union[str, Any] = interact_treap(lowerCamelCase , lowerCamelCase )
print(lowerCamelCase )
a__ : Optional[Any] = input()
print("good by!" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 629
| 1
|
from argparse import ArgumentParser
from .env import EnvironmentCommand
def UpperCamelCase ( ):
'''simple docstring'''
__snake_case :Dict = ArgumentParser("""Diffusers CLI tool""" ,usage="""diffusers-cli <command> [<args>]""" )
__snake_case :Optional[Any] = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(snake_case__ )
# Let's go
__snake_case :Any = parser.parse_args()
if not hasattr(snake_case__ ,"""func""" ):
parser.print_help()
exit(1 )
# Run
__snake_case :Union[str, Any] = args.func(snake_case__ )
service.run()
if __name__ == "__main__":
main()
| 455
|
lowerCamelCase__ = 8.3_1_4_4_5_9_8
def UpperCamelCase ( snake_case__ : float ,snake_case__ : float ):
'''simple docstring'''
if temperature < 0:
raise Exception("""Temperature cannot be less than 0 K""" )
if molar_mass <= 0:
raise Exception("""Molar mass cannot be less than or equal to 0 kg/mol""" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
lowerCamelCase__ = 300
lowerCamelCase__ = 28
lowerCamelCase__ = rms_speed_of_molecule(temperature, molar_mass)
print(f'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 455
| 1
|
"""simple docstring"""
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=True , __a=False , __a=False , __a=False , __a=2 , __a=99 , __a=0 , __a=32 , __a=5 , __a=4 , __a=0.1 , __a=0.1 , __a=5_12 , __a=2 , __a=0.0_2 , __a=2 , __a=4 , __a="last" , __a=True , __a=None , __a=0 , ):
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_input_lengths
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = gelu_activation
__lowerCAmelCase = sinusoidal_embeddings
__lowerCAmelCase = causal
__lowerCAmelCase = asm
__lowerCAmelCase = n_langs
__lowerCAmelCase = vocab_size
__lowerCAmelCase = n_special
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = num_labels
__lowerCAmelCase = num_choices
__lowerCAmelCase = summary_type
__lowerCAmelCase = use_proj
__lowerCAmelCase = scope
__lowerCAmelCase = bos_token_id
def snake_case ( self ):
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase = None
if self.use_input_lengths:
__lowerCAmelCase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__lowerCAmelCase = None
if self.use_token_type_ids:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase = ids_tensor([self.batch_size] , 2 ).float()
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def snake_case ( self ):
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ):
__lowerCAmelCase = XLMModel(config=A_ )
model.to(A_ )
model.eval()
__lowerCAmelCase = model(A_ , lengths=A_ , langs=A_ )
__lowerCAmelCase = model(A_ , langs=A_ )
__lowerCAmelCase = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ):
__lowerCAmelCase = XLMWithLMHeadModel(A_ )
model.to(A_ )
model.eval()
__lowerCAmelCase = model(A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ):
__lowerCAmelCase = XLMForQuestionAnsweringSimple(A_ )
model.to(A_ )
model.eval()
__lowerCAmelCase = model(A_ )
__lowerCAmelCase = model(A_ , start_positions=A_ , end_positions=A_ )
__lowerCAmelCase = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ):
__lowerCAmelCase = XLMForQuestionAnswering(A_ )
model.to(A_ )
model.eval()
__lowerCAmelCase = model(A_ )
__lowerCAmelCase = model(
A_ , start_positions=A_ , end_positions=A_ , cls_index=A_ , is_impossible=A_ , p_mask=A_ , )
__lowerCAmelCase = model(
A_ , start_positions=A_ , end_positions=A_ , cls_index=A_ , is_impossible=A_ , )
((__lowerCAmelCase ) , ) = result_with_labels.to_tuple()
__lowerCAmelCase = model(A_ , start_positions=A_ , end_positions=A_ )
((__lowerCAmelCase ) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ):
__lowerCAmelCase = XLMForSequenceClassification(A_ )
model.to(A_ )
model.eval()
__lowerCAmelCase = model(A_ )
__lowerCAmelCase = model(A_ , labels=A_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ):
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = XLMForTokenClassification(A_ )
model.to(A_ )
model.eval()
__lowerCAmelCase = model(A_ , attention_mask=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ):
__lowerCAmelCase = self.num_choices
__lowerCAmelCase = XLMForMultipleChoice(config=A_ )
model.to(A_ )
model.eval()
__lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = model(
A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case ( self ):
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( _lowercase ,_lowercase ,_lowercase ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Dict =(
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
__UpperCAmelCase : Dict =(
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
__UpperCAmelCase : Optional[Any] =(
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def snake_case ( self , __a , __a , __a , __a , __a ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def snake_case ( self , __a , __a , __a=False ):
__lowerCAmelCase = super()._prepare_for_class(A_ , A_ , return_labels=A_ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A_ )
__lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A_ )
return inputs_dict
def snake_case ( self ):
__lowerCAmelCase = XLMModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=A_ , emb_dim=37 )
def snake_case ( self ):
self.config_tester.run_common_tests()
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*A_ )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*A_ )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*A_ )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*A_ )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*A_ )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*A_ )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*A_ )
def snake_case ( self , __a , __a , __a , __a , __a , __a=False , __a=1 ):
self.assertIsInstance(A_ , A_ )
self.assertListEqual(
[isinstance(A_ , A_ ) for iter_attentions in attentions] , [True] * len(A_ ) )
self.assertEqual(len(A_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(A_ ):
# adds PAD dummy token
__lowerCAmelCase = min_length + idx + 1
__lowerCAmelCase = min_length + idx + 1
__lowerCAmelCase = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(A_ ) )
def snake_case ( self , __a , __a , __a , __a , __a , __a=False , __a=1 ):
self.assertIsInstance(A_ , A_ )
self.assertListEqual(
[isinstance(A_ , A_ ) for iter_hidden_states in hidden_states] , [True] * len(A_ ) , )
self.assertEqual(len(A_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(A_ ):
# adds PAD dummy token
__lowerCAmelCase = min_length + idx + 1
__lowerCAmelCase = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(A_ ) , )
pass
@slow
def snake_case ( self ):
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = XLMModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case ( self ):
__lowerCAmelCase = XLMWithLMHeadModel.from_pretrained("xlm-mlm-en-2048" )
model.to(A_ )
__lowerCAmelCase = torch.tensor([[14, 4_47]] , dtype=torch.long , device=A_ ) # the president
__lowerCAmelCase = [
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__lowerCAmelCase = model.generate(A_ , do_sample=A_ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , A_ )
| 700
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A : Union[str, Any] = {
"configuration_x_clip": [
"XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XCLIPConfig",
"XCLIPTextConfig",
"XCLIPVisionConfig",
],
"processing_x_clip": ["XCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[int] = [
"XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"XCLIPModel",
"XCLIPPreTrainedModel",
"XCLIPTextModel",
"XCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
A : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 282
| 0
|
def __UpperCamelCase ( lowerCAmelCase__ : str ):
if n_term == "":
return []
__a : list = []
for temp in range(int(lowerCAmelCase__ ) ):
series.append(f"1/{temp + 1}" if series else '''1''' )
return series
if __name__ == "__main__":
lowercase__ =input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 521
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ ={
'configuration_funnel': ['FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FunnelConfig'],
'convert_funnel_original_tf_checkpoint_to_pytorch': [],
'tokenization_funnel': ['FunnelTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =['FunnelTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =[
'FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'FunnelBaseModel',
'FunnelForMaskedLM',
'FunnelForMultipleChoice',
'FunnelForPreTraining',
'FunnelForQuestionAnswering',
'FunnelForSequenceClassification',
'FunnelForTokenClassification',
'FunnelModel',
'FunnelPreTrainedModel',
'load_tf_weights_in_funnel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =[
'TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFFunnelBaseModel',
'TFFunnelForMaskedLM',
'TFFunnelForMultipleChoice',
'TFFunnelForPreTraining',
'TFFunnelForQuestionAnswering',
'TFFunnelForSequenceClassification',
'TFFunnelForTokenClassification',
'TFFunnelModel',
'TFFunnelPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowercase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 521
| 1
|
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
__lowercase = """
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
"""
__lowercase = """
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{'f1': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results['f1'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results['f1'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")
>>> print(round(results['f1'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")
>>> print(round(results['f1'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")
>>> print(round(results['f1'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'f1': array([0.8, 0. , 0. ])}
"""
__lowercase = """
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
"""simple docstring"""
def __snake_case ( self : List[str]):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32")),
"references": datasets.Sequence(datasets.Value("int32")),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32"),
"references": datasets.Value("int32"),
}) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"] , )
def __snake_case ( self : str , __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple , __UpperCAmelCase : int=None , __UpperCAmelCase : Union[str, Any]=1 , __UpperCAmelCase : Any="binary" , __UpperCAmelCase : Union[str, Any]=None):
a : Tuple = fa_score(
__UpperCAmelCase , __UpperCAmelCase , labels=__UpperCAmelCase , pos_label=__UpperCAmelCase , average=__UpperCAmelCase , sample_weight=__UpperCAmelCase)
return {"f1": float(__UpperCAmelCase) if score.size == 1 else score}
| 135
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase = {"""configuration_wavlm""": ["""WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WavLMConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""WavLMForAudioFrameClassification""",
"""WavLMForCTC""",
"""WavLMForSequenceClassification""",
"""WavLMForXVector""",
"""WavLMModel""",
"""WavLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 135
| 1
|
'''simple docstring'''
from __future__ import annotations
def A_( A : int , A : int):
if b == 0:
return (1, 0)
((UpperCamelCase) , (UpperCamelCase)) = extended_euclid(A , a % b)
UpperCamelCase = a // b
return (y, x - k * y)
def A_( A : int , A : int , A : int , A : int):
((UpperCamelCase) , (UpperCamelCase)) = extended_euclid(A , A)
UpperCamelCase = na * na
UpperCamelCase = ra * x * na + ra * y * na
return (n % m + m) % m
def A_( A : int , A : int):
((UpperCamelCase) , (UpperCamelCase)) = extended_euclid(A , A)
if b < 0:
UpperCamelCase = (b % n + n) % n
return b
def A_( A : int , A : int , A : int , A : int):
UpperCamelCase , UpperCamelCase = invert_modulo(A , A), invert_modulo(A , A)
UpperCamelCase = na * na
UpperCamelCase = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='chinese_remainder_theorem', verbose=True)
testmod(name='chinese_remainder_theorem2', verbose=True)
testmod(name='invert_modulo', verbose=True)
testmod(name='extended_euclid', verbose=True)
| 3
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
lowerCAmelCase : Tuple = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
lowerCAmelCase : Optional[int] = TaTokenizerFast
lowerCAmelCase : Any = {'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = [
'MT5EncoderModel',
'MT5ForConditionalGeneration',
'MT5ForQuestionAnswering',
'MT5Model',
'MT5PreTrainedModel',
'MT5Stack',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict = ['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = ['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
lowerCAmelCase : Tuple = _LazyModule(
__name__,
globals()['__file__'],
_import_structure,
extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast},
module_spec=__spec__,
)
| 3
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase : Optional[int] = {
'''configuration_blenderbot''': [
'''BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlenderbotConfig''',
'''BlenderbotOnnxConfig''',
],
'''tokenization_blenderbot''': ['''BlenderbotTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = ['''BlenderbotTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = [
'''BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlenderbotForCausalLM''',
'''BlenderbotForConditionalGeneration''',
'''BlenderbotModel''',
'''BlenderbotPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[str] = [
'''TFBlenderbotForConditionalGeneration''',
'''TFBlenderbotModel''',
'''TFBlenderbotPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = [
'''FlaxBlenderbotForConditionalGeneration''',
'''FlaxBlenderbotModel''',
'''FlaxBlenderbotPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 647
|
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self : Union[str, Any] , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any]=7 , _UpperCamelCase : Any=3 , _UpperCamelCase : str=18 , _UpperCamelCase : Tuple=30 , _UpperCamelCase : Optional[int]=400 , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : int=True , _UpperCamelCase : Optional[int]=[0.5, 0.5, 0.5] , _UpperCamelCase : List[str]=[0.5, 0.5, 0.5] , ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = size if size is not None else {"height": 18, "width": 18}
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = min_resolution
SCREAMING_SNAKE_CASE = max_resolution
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = do_normalize
SCREAMING_SNAKE_CASE = image_mean
SCREAMING_SNAKE_CASE = image_std
def __snake_case( self : List[Any] ) -> Any:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowercase ( a , unittest.TestCase ):
lowercase__ : Any = DPTImageProcessor if is_vision_available() else None
def __snake_case( self : List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = DPTImageProcessingTester(self )
@property
def __snake_case( self : List[Any] ) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case( self : str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase , "image_mean" ) )
self.assertTrue(hasattr(_UpperCamelCase , "image_std" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_resize" ) )
self.assertTrue(hasattr(_UpperCamelCase , "size" ) )
def __snake_case( self : Dict ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def __snake_case( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __snake_case( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , numpify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __snake_case( self : Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
| 647
| 1
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def lowerCamelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
__lowerCamelCase = ArgumentParser('Accelerate CLI tool' , usage='accelerate <command> [<args>]' , allow_abbrev=UpperCamelCase__ )
__lowerCamelCase = parser.add_subparsers(help='accelerate command helpers' )
# Register commands
get_config_parser(subparsers=UpperCamelCase__ )
env_command_parser(subparsers=UpperCamelCase__ )
launch_command_parser(subparsers=UpperCamelCase__ )
tpu_command_parser(subparsers=UpperCamelCase__ )
test_command_parser(subparsers=UpperCamelCase__ )
# Let's go
__lowerCamelCase = parser.parse_args()
if not hasattr(UpperCamelCase__ , 'func' ):
parser.print_help()
exit(1 )
# Run
args.func(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 469
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json",
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''lxmert'''
snake_case_ = {}
def __init__( self , lowerCamelCase__=30_522 , lowerCamelCase__=768 , lowerCamelCase__=12 , lowerCamelCase__=9_500 , lowerCamelCase__=1_600 , lowerCamelCase__=400 , lowerCamelCase__=3_072 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=1e-12 , lowerCamelCase__=9 , lowerCamelCase__=5 , lowerCamelCase__=5 , lowerCamelCase__=2_048 , lowerCamelCase__=4 , lowerCamelCase__=6.67 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , **lowerCamelCase__ , ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = num_qa_labels
__lowerCamelCase = num_object_labels
__lowerCamelCase = num_attr_labels
__lowerCamelCase = l_layers
__lowerCamelCase = x_layers
__lowerCamelCase = r_layers
__lowerCamelCase = visual_feat_dim
__lowerCamelCase = visual_pos_dim
__lowerCamelCase = visual_loss_normalizer
__lowerCamelCase = task_matched
__lowerCamelCase = task_mask_lm
__lowerCamelCase = task_obj_predict
__lowerCamelCase = task_qa
__lowerCamelCase = visual_obj_loss
__lowerCamelCase = visual_attr_loss
__lowerCamelCase = visual_feat_loss
__lowerCamelCase = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers}
super().__init__(**lowerCamelCase__ )
| 469
| 1
|
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def lowerCAmelCase( ):
'''simple docstring'''
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
lowerCamelCase__ = "__test_patch_submodule_mock__"
with patch_submodule(_test_patching , "os.path.join" , a__ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def lowerCAmelCase( ):
'''simple docstring'''
assert _test_patching.open is open
lowerCamelCase__ = "__test_patch_submodule_builtin_mock__"
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , "open" , a__ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def lowerCAmelCase( ):
'''simple docstring'''
lowerCamelCase__ = "__test_patch_submodule_missing_mock__"
with patch_submodule(_test_patching , "pandas.read_csv" , a__ ):
pass
def lowerCAmelCase( ):
'''simple docstring'''
lowerCamelCase__ = "__test_patch_submodule_missing_builtin_mock__"
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , "len" , a__ ) is None
with patch_submodule(_test_patching , "len" , a__ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def lowerCAmelCase( ):
'''simple docstring'''
lowerCamelCase__ = "__test_patch_submodule_start_and_stop_mock__"
lowerCamelCase__ = patch_submodule(_test_patching , "open" , a__ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def lowerCAmelCase( ):
'''simple docstring'''
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
lowerCamelCase__ = "__test_patch_submodule_successive_join__"
lowerCamelCase__ = "__test_patch_submodule_successive_dirname__"
lowerCamelCase__ = "__test_patch_submodule_successive_rename__"
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , "os.path.join" , a__ ):
with patch_submodule(_test_patching , "os.rename" , a__ ):
with patch_submodule(_test_patching , "os.path.dirname" , a__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , "os.rename" , a__ ):
with patch_submodule(_test_patching , "os.path.join" , a__ ):
with patch_submodule(_test_patching , "os.path.dirname" , a__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def lowerCAmelCase( ):
'''simple docstring'''
lowerCamelCase__ = "__test_patch_submodule_doesnt_exist_mock__"
with patch_submodule(_test_patching , "__module_that_doesn_exist__.__attribute_that_doesn_exist__" , a__ ):
pass
with patch_submodule(_test_patching , "os.__attribute_that_doesn_exist__" , a__ ):
pass
| 426
|
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def lowerCAmelCase( a__ : List[str] , a__ : str , a__ : List[Any]=None , **a__ : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ = [x.strip() for x in open(a__ ).readlines()]
lowerCamelCase__ = [x.strip() for x in open(a__ ).readlines()][: len(a__ )]
lowerCamelCase__ = calculate_rouge(a__ , a__ , **a__ )
if save_path is not None:
save_json(a__ , a__ , indent=a__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 426
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A : str = logging.get_logger(__name__)
A : Tuple = '''▁'''
A : int = {'''vocab_file''': '''sentencepiece.bpe.model'''}
A : int = {
'''vocab_file''': {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model'''
),
}
}
A : Optional[Any] = {
'''xlm-roberta-base''': 5_1_2,
'''xlm-roberta-large''': 5_1_2,
'''xlm-roberta-large-finetuned-conll02-dutch''': 5_1_2,
'''xlm-roberta-large-finetuned-conll02-spanish''': 5_1_2,
'''xlm-roberta-large-finetuned-conll03-english''': 5_1_2,
'''xlm-roberta-large-finetuned-conll03-german''': 5_1_2,
}
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCamelCase : Tuple = VOCAB_FILES_NAMES
__lowerCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Any = ['''input_ids''', '''attention_mask''']
def __init__( self : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : str="<s>" , __lowerCAmelCase : Any="</s>" , __lowerCAmelCase : Optional[Any]="</s>" , __lowerCAmelCase : Any="<s>" , __lowerCAmelCase : str="<unk>" , __lowerCAmelCase : Tuple="<pad>" , __lowerCAmelCase : Any="<mask>" , __lowerCAmelCase : Optional[Dict[str, Any]] = None , **__lowerCAmelCase : Any , ) -> None:
"""simple docstring"""
A__ = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else mask_token
A__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , )
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowerCAmelCase ) )
A__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
A__ = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
A__ = 1
A__ = len(self.sp_model ) + self.fairseq_offset
A__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Any ) -> Any:
"""simple docstring"""
A__ = self.__dict__.copy()
A__ = None
A__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Optional[int] , __lowerCAmelCase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
A__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A__ = {}
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def a_ ( self : int , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A__ = [self.cls_token_id]
A__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a_ ( self : Dict , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCAmelCase )) + [1]
return [1] + ([0] * len(__lowerCAmelCase )) + [1, 1] + ([0] * len(__lowerCAmelCase )) + [1]
def a_ ( self : List[str] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def a_ ( self : Dict ) -> List[str]:
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def a_ ( self : Dict ) -> int:
"""simple docstring"""
A__ = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a_ ( self : Tuple , __lowerCAmelCase : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
def a_ ( self : List[Any] , __lowerCAmelCase : List[str] ) -> Any:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
A__ = self.sp_model.PieceToId(__lowerCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def a_ ( self : Any , __lowerCAmelCase : Dict ) -> Any:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def a_ ( self : Any , __lowerCAmelCase : Any ) -> Dict:
"""simple docstring"""
A__ = """""".join(__lowerCAmelCase ).replace(__lowerCAmelCase , """ """ ).strip()
return out_string
def a_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A__ = os.path.join(
__lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCAmelCase , """wb""" ) as fi:
A__ = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (out_vocab_file,)
| 176
|
def __lowerCamelCase ( __a :Dict ) -> Optional[int]:
"""simple docstring"""
A__ = []
A__ = []
A__ = {
"""^""": 3,
"""*""": 2,
"""/""": 2,
"""%""": 2,
"""+""": 1,
"""-""": 1,
} # Priority of each operator
A__ = len(__a ) if (len(__a ) > 7) else 7
# Print table header for output
print(
"""Symbol""".center(8 ) , """Stack""".center(__a ) , """Postfix""".center(__a ) , sep=""" | """ , )
print("""-""" * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(__a ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(__a ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(__a ) == 0:
stack.append(__a ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(__a ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(__a ) # push x to stack
print(
x.center(8 ) , ("""""".join(__a )).ljust(__a ) , ("""""".join(__a )).ljust(__a ) , sep=""" | """ , ) # Output in tabular format
while len(__a ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
""" """.center(8 ) , ("""""".join(__a )).ljust(__a ) , ("""""".join(__a )).ljust(__a ) , sep=""" | """ , ) # Output in tabular format
return "".join(__a ) # return Postfix as str
def __lowerCamelCase ( __a :List[str] ) -> Union[str, Any]:
"""simple docstring"""
A__ = list(infix[::-1] ) # reverse the infix equation
for i in range(len(__a ) ):
if infix[i] == "(":
A__ = """)""" # change "(" to ")"
elif infix[i] == ")":
A__ = """(""" # change ")" to "("
return (infix_2_postfix("""""".join(__a ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
A : Union[str, Any] = input('''\nEnter an Infix Equation = ''') # Input an Infix equation
A : Dict = ''''''.join(Infix.split()) # Remove spaces from the input
print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''')
| 176
| 1
|
'''simple docstring'''
import unittest
from knapsack import knapsack as k
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = 0
lowerCamelCase_ = [0]
lowerCamelCase_ = [0]
lowerCamelCase_ = len(UpperCamelCase )
self.assertEqual(k.knapsack(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) , 0 )
lowerCamelCase_ = [60]
lowerCamelCase_ = [10]
lowerCamelCase_ = len(UpperCamelCase )
self.assertEqual(k.knapsack(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) , 0 )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = 3
lowerCamelCase_ = [1, 2, 3]
lowerCamelCase_ = [3, 2, 1]
lowerCamelCase_ = len(UpperCamelCase )
self.assertEqual(k.knapsack(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) , 5 )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = 50
lowerCamelCase_ = [60, 100, 120]
lowerCamelCase_ = [10, 20, 30]
lowerCamelCase_ = len(UpperCamelCase )
self.assertEqual(k.knapsack(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) , 220 )
if __name__ == "__main__":
unittest.main()
| 445
|
'''simple docstring'''
def __snake_case ( UpperCAmelCase_ : int = 1000 ):
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 445
| 1
|
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
snake_case : int = get_tests_dir('''fixtures''')
snake_case : str = get_tests_dir('''fixtures/dummy_feature_extractor_config.json''')
snake_case : Optional[int] = get_tests_dir('''fixtures/dummy-config.json''')
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Optional[Any] ) -> Dict:
a__ = 0
def lowerCamelCase__( self :Union[str, Any] ) -> List[str]:
a__ = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(__snake_case ,__snake_case )
def lowerCamelCase__( self :Dict ) -> Tuple:
a__ = AutoFeatureExtractor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case ,__snake_case )
def lowerCamelCase__( self :Optional[int] ) -> Any:
with tempfile.TemporaryDirectory() as tmpdirname:
a__ = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
a__ = AutoFeatureExtractor.from_pretrained(__snake_case ).to_dict()
config_dict.pop('feature_extractor_type' )
a__ = WavaVecaFeatureExtractor(**__snake_case )
# save in new folder
model_config.save_pretrained(__snake_case )
config.save_pretrained(__snake_case )
a__ = AutoFeatureExtractor.from_pretrained(__snake_case )
# make sure private variable is not incorrectly saved
a__ = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(__snake_case ,__snake_case )
def lowerCamelCase__( self :Tuple ) -> List[str]:
a__ = AutoFeatureExtractor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case ,__snake_case )
def lowerCamelCase__( self :Union[str, Any] ) -> Any:
with self.assertRaisesRegex(
__snake_case ,'bert-base is not a local folder and is not a valid model identifier' ):
a__ = AutoFeatureExtractor.from_pretrained('bert-base' )
def lowerCamelCase__( self :str ) -> Union[str, Any]:
with self.assertRaisesRegex(
__snake_case ,R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
a__ = AutoFeatureExtractor.from_pretrained(__snake_case ,revision='aaaaaa' )
def lowerCamelCase__( self :Optional[Any] ) -> List[str]:
with self.assertRaisesRegex(
__snake_case ,'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' ,):
a__ = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' )
def lowerCamelCase__( self :int ) -> Optional[int]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__snake_case ):
a__ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__snake_case ):
a__ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' ,trust_remote_code=__snake_case )
a__ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' ,trust_remote_code=__snake_case )
self.assertEqual(feature_extractor.__class__.__name__ ,'NewFeatureExtractor' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__snake_case )
a__ = AutoFeatureExtractor.from_pretrained(__snake_case ,trust_remote_code=__snake_case )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ ,'NewFeatureExtractor' )
def lowerCamelCase__( self :str ) -> List[Any]:
try:
AutoConfig.register('custom' ,__snake_case )
AutoFeatureExtractor.register(__snake_case ,__snake_case )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__snake_case ):
AutoFeatureExtractor.register(__snake_case ,__snake_case )
# Now that the config is registered, it can be used as any other config with the auto-API
a__ = CustomFeatureExtractor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__snake_case )
a__ = AutoFeatureExtractor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case ,__snake_case )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def lowerCamelCase__( self :List[Any] ) -> Optional[Any]:
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : int = True
try:
AutoConfig.register('custom' ,__snake_case )
AutoFeatureExtractor.register(__snake_case ,__snake_case )
# If remote code is not set, the default is to use local
a__ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
self.assertEqual(feature_extractor.__class__.__name__ ,'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
a__ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' ,trust_remote_code=__snake_case )
self.assertEqual(feature_extractor.__class__.__name__ ,'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
a__ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' ,trust_remote_code=__snake_case )
self.assertEqual(feature_extractor.__class__.__name__ ,'NewFeatureExtractor' )
self.assertTrue(not hasattr(__snake_case ,'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 335
|
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
snake_case : str = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
snake_case : str = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
snake_case : Union[str, Any] = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def __lowercase ( __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] ):
return float((preds == labels).mean() )
def __lowercase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple ):
a__ = simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )
a__ = float(fa_score(y_true=__lowerCAmelCase , y_pred=__lowerCAmelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : str ):
a__ = float(pearsonr(__lowerCAmelCase , __lowerCAmelCase )[0] )
a__ = float(spearmanr(__lowerCAmelCase , __lowerCAmelCase )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ (datasets.Metric ):
def lowerCamelCase__( self :str ) -> Any:
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
} ) ,codebase_urls=[] ,reference_urls=[] ,format='numpy' ,)
def lowerCamelCase__( self :List[Any] ,__snake_case :str ,__snake_case :List[str] ) -> Optional[Any]:
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(__snake_case ,__snake_case )}
elif self.config_name == "stsb":
return pearson_and_spearman(__snake_case ,__snake_case )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(__snake_case ,__snake_case )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(__snake_case ,__snake_case )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
| 335
| 1
|
"""simple docstring"""
def a__ ( snake_case__ ) -> list[list]:
lowerCamelCase = current_set.copy()
for row_index, row in enumerate(snake_case__ ):
lowerCamelCase = row[0]
for column_index, column in enumerate(snake_case__ ):
if magnitude == 0:
lowerCamelCase = column
continue
lowerCamelCase = column / magnitude
# Subtract to cancel term
lowerCamelCase = current_set[0]
lowerCamelCase = [first_row]
lowerCamelCase = current_set[1::]
for row in current_set:
lowerCamelCase = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(snake_case__ )
continue
for column_index in range(len(snake_case__ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(snake_case__ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
lowerCamelCase = final_set[0]
lowerCamelCase = []
lowerCamelCase = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
lowerCamelCase = simplify(snake_case__ )
for i in range(len(snake_case__ ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , snake_case__ )
lowerCamelCase = resultant
return final_set
def a__ ( snake_case__ ) -> list:
if len(snake_case__ ) == 0:
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
lowerCamelCase = len(snake_case__ ) + 1
if any(len(snake_case__ ) != _length for item in equations ):
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
for row in equations:
if any(not isinstance(snake_case__ , (int, float) ) for column in row ):
raise ValueError("""solve_simultaneous() requires lists of integers""" )
if len(snake_case__ ) == 1:
return [equations[0][-1] / equations[0][0]]
lowerCamelCase = equations.copy()
if any(0 in row for row in data_set ):
lowerCamelCase = data_set.copy()
lowerCamelCase = []
for row_index, row in enumerate(snake_case__ ):
if 0 not in row:
lowerCamelCase = data_set.pop(snake_case__ )
break
if not full_row:
raise ValueError("""solve_simultaneous() requires at least 1 full equation""" )
data_set.insert(0 , snake_case__ )
lowerCamelCase = data_set.copy()
lowerCamelCase = simplify(snake_case__ )
lowerCamelCase = simplified[::-1]
lowerCamelCase = []
for row in simplified:
lowerCamelCase = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
lowerCamelCase = row.copy()[: len(snake_case__ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(snake_case__ ) == 0:
solutions.append(0 )
continue
lowerCamelCase = temp_row[1::]
lowerCamelCase = temp_row[::-1]
for column_index, column in enumerate(snake_case__ ):
current_solution -= column * solutions[column_index]
solutions.append(snake_case__ )
lowerCamelCase = []
for item in solutions:
final.append(float(round(snake_case__ , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : str = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 533
|
"""simple docstring"""
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def a__ ( snake_case__ ) -> Dict:
lowerCamelCase = [False] * len(snake_case__ )
lowerCamelCase = [-1] * len(snake_case__ )
def dfs(snake_case__ , snake_case__ ):
lowerCamelCase = True
lowerCamelCase = c
for u in graph[v]:
if not visited[u]:
dfs(snake_case__ , 1 - c )
for i in range(len(snake_case__ ) ):
if not visited[i]:
dfs(snake_case__ , 0 )
for i in range(len(snake_case__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
lowerCAmelCase : str = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 533
| 1
|
"""simple docstring"""
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
SCREAMING_SNAKE_CASE_ = '''src/transformers'''
SCREAMING_SNAKE_CASE_ = '''docs/source/en/tasks'''
def lowercase__ ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Dict ) -> Dict:
"""simple docstring"""
with open(lowerCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
UpperCAmelCase = f.readlines()
# Find the start prompt.
UpperCAmelCase = 0
while not lines[start_index].startswith(lowerCAmelCase ):
start_index += 1
start_index += 1
UpperCAmelCase = start_index
while not lines[end_index].startswith(lowerCAmelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
SCREAMING_SNAKE_CASE_ = direct_transformers_import(TRANSFORMERS_PATH)
SCREAMING_SNAKE_CASE_ = {
'''asr.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'''audio_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'''language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'''image_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'''masked_language_modeling.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'''multiple_choice.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'''object_detection.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'''question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'''semantic_segmentation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'''sequence_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'''summarization.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''token_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'''translation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'''video_classification.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'''document_question_answering.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'''monocular_depth_estimation.md''': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
SCREAMING_SNAKE_CASE_ = {
'''summarization.md''': ('''nllb''',),
'''translation.md''': ('''nllb''',),
}
def lowercase__ ( lowerCAmelCase : int ) -> Any:
"""simple docstring"""
UpperCAmelCase = TASK_GUIDE_TO_MODELS[task_guide]
UpperCAmelCase = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(lowerCAmelCase , set() )
UpperCAmelCase = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"[{name}](../model_doc/{code})" for code, name in model_names.items()] ) + "\n"
def lowercase__ ( lowerCAmelCase : str , lowerCAmelCase : Dict=False ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = _find_text_in_file(
filename=os.path.join(lowerCAmelCase , lowerCAmelCase ) , start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' , end_prompt='<!--End of the generated tip-->' , )
UpperCAmelCase = get_model_list_for_task(lowerCAmelCase )
if current_list != new_list:
if overwrite:
with open(os.path.join(lowerCAmelCase , lowerCAmelCase ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"
' to fix this.' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
SCREAMING_SNAKE_CASE_ = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 373
|
"""simple docstring"""
import math
def lowercase__ ( lowerCAmelCase : str , lowerCAmelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(lowerCAmelCase )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError('This should never happen' )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
SCREAMING_SNAKE_CASE_ = '''Enter the base and the power separated by a comma: '''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = map(int, input(prompt).split(''','''))
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = map(int, input(prompt).split(''','''))
# We find the log of each number, using the function res(), which takes two
# arguments.
SCREAMING_SNAKE_CASE_ = res(xa, ya)
SCREAMING_SNAKE_CASE_ = res(xa, ya)
# We check for the largest number
if resa > resa:
print('''Largest number is''', xa, '''^''', ya)
elif resa > resa:
print('''Largest number is''', xa, '''^''', ya)
else:
print('''Both are equal''')
| 373
| 1
|
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
_SCREAMING_SNAKE_CASE = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
_SCREAMING_SNAKE_CASE = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def _lowerCAmelCase ( lowerCamelCase_ : list[list[int]] ):
__lowercase = []
for i in range(len(lowerCamelCase_ ) ):
__lowercase = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
__lowercase = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(lowerCamelCase_ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(lowerCamelCase_ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(lowerCamelCase_ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
__lowercase = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(lowerCamelCase_ )
return next_generation
def _lowerCAmelCase ( lowerCamelCase_ : list[list[int]] , lowerCamelCase_ : int ):
__lowercase = []
for _ in range(lowerCamelCase_ ):
# Create output image
__lowercase = Image.new('''RGB''' , (len(cells[0] ), len(lowerCamelCase_ )) )
__lowercase = img.load()
# Save cells to image
for x in range(len(lowerCamelCase_ ) ):
for y in range(len(cells[0] ) ):
__lowercase = 2_5_5 - cells[y][x] * 2_5_5
__lowercase = (colour, colour, colour)
# Save image
images.append(lowerCamelCase_ )
__lowercase = new_generation(lowerCamelCase_ )
return images
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = generate_images(GLIDER, 1_6)
images[0].save('''out.gif''', save_all=True, append_images=images[1:])
| 56
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 56
| 1
|
"""simple docstring"""
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class _lowerCAmelCase ( unittest.TestCase ):
def _a ( self ) -> Optional[Any]:
_UpperCAmelCase = ["a", "b", "c"]
# Defaults to last layer if both are None
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(a_ , a_ , a_ )
self.assertEqual(a_ , ["c"] )
self.assertEqual(a_ , [2] )
# Out indices set to match out features
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(["a", "c"] , a_ , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features set to match out indices
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(a_ , [0, 2] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [0, 2] )
# Out features selected from negative indices
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(a_ , [-3, -1] , a_ )
self.assertEqual(a_ , ["a", "c"] )
self.assertEqual(a_ , [-3, -1] )
def _a ( self ) -> Optional[int]:
# Stage names must be set
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , a_ )
# Out features must be a list
with self.assertRaises(a_ ):
verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] )
# Out features must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] )
# Out indices must be a list or tuple
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , 0 , ["a", "b"] )
# Out indices must be a subset of stage names
with self.assertRaises(a_ ):
verify_out_features_out_indices(a_ , (0, 1) , ["a"] )
# Out features and out indices must be the same length
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] )
# Out features should match out indices
with self.assertRaises(a_ ):
verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] )
# Out features and out indices should be in order
with self.assertRaises(a_ ):
verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] )
# Check passes with valid inputs
verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] )
def _a ( self ) -> int:
_UpperCAmelCase = BackboneMixin()
_UpperCAmelCase = ["a", "b", "c"]
_UpperCAmelCase = ["a", "c"]
_UpperCAmelCase = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
_UpperCAmelCase = ["a", "b"]
self.assertEqual(backbone.out_features , ["a", "b"] )
self.assertEqual(backbone.out_indices , [0, 1] )
_UpperCAmelCase = [-3, -1]
self.assertEqual(backbone.out_features , ["a", "c"] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 657
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __lowerCamelCase ( UpperCamelCase__=None ):
"""simple docstring"""
if subparsers is not None:
_UpperCAmelCase = subparsers.add_parser("test" )
else:
_UpperCAmelCase = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=UpperCamelCase__ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase__ )
return parser
def __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCAmelCase = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
_UpperCAmelCase = script_name
else:
_UpperCAmelCase = f"--config_file={args.config_file} {script_name}"
_UpperCAmelCase = ["accelerate-launch"] + test_args.split()
_UpperCAmelCase = execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def __lowerCamelCase ( ):
"""simple docstring"""
_UpperCAmelCase = test_command_parser()
_UpperCAmelCase = parser.parse_args()
test_command(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 657
| 1
|
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
A: Union[str, Any] = tuple[int, int]
class SCREAMING_SNAKE_CASE__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
UpperCAmelCase : set[int] = vertices
UpperCAmelCase : dict[EdgeT, int] = {
(min(_SCREAMING_SNAKE_CASE ), max(_SCREAMING_SNAKE_CASE )): weight for edge, weight in edges.items()
}
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
UpperCAmelCase : List[str] = weight
def SCREAMING_SNAKE_CASE ( self ) -> Graph:
'''simple docstring'''
UpperCAmelCase : Graph = Graph({min(self.vertices )} , {} )
UpperCAmelCase : EdgeT
UpperCAmelCase : int
UpperCAmelCase : EdgeT
UpperCAmelCase : int
while len(subgraph.vertices ) < len(self.vertices ):
UpperCAmelCase : Union[str, Any] = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
UpperCAmelCase : Optional[Any] = edge
UpperCAmelCase : Any = weight
subgraph.add_edge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return subgraph
def _snake_case ( UpperCamelCase : str = "p107_network.txt" ):
UpperCAmelCase : str = os.path.abspath(os.path.dirname(UpperCamelCase ) )
UpperCAmelCase : str = os.path.join(UpperCamelCase , UpperCamelCase )
UpperCAmelCase : dict[EdgeT, int] = {}
UpperCAmelCase : list[str]
UpperCAmelCase : int
UpperCAmelCase : int
with open(UpperCamelCase ) as f:
UpperCAmelCase : Dict = f.read().strip().split("""\n""" )
UpperCAmelCase : List[Any] = [line.split(""",""" ) for line in data]
for edgea in range(1 , len(UpperCamelCase ) ):
for edgea in range(UpperCamelCase ):
if adjaceny_matrix[edgea][edgea] != "-":
UpperCAmelCase : Any = int(adjaceny_matrix[edgea][edgea] )
UpperCAmelCase : Graph = Graph(set(range(len(UpperCamelCase ) ) ) , UpperCamelCase )
UpperCAmelCase : Graph = graph.prims_algorithm()
UpperCAmelCase : int = sum(graph.edges.values() )
UpperCAmelCase : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 359
|
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
@register_to_config
def __init__( self , _SCREAMING_SNAKE_CASE = 128 , _SCREAMING_SNAKE_CASE = 256 , _SCREAMING_SNAKE_CASE = 2000.0 , _SCREAMING_SNAKE_CASE = 768 , _SCREAMING_SNAKE_CASE = 12 , _SCREAMING_SNAKE_CASE = 12 , _SCREAMING_SNAKE_CASE = 64 , _SCREAMING_SNAKE_CASE = 2048 , _SCREAMING_SNAKE_CASE = 0.1 , ) -> int:
'''simple docstring'''
super().__init__()
UpperCAmelCase : Tuple = nn.Sequential(
nn.Linear(_SCREAMING_SNAKE_CASE , d_model * 4 , bias=_SCREAMING_SNAKE_CASE ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=_SCREAMING_SNAKE_CASE ) , nn.SiLU() , )
UpperCAmelCase : Tuple = nn.Embedding(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = False
UpperCAmelCase : Optional[Any] = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = nn.Dropout(p=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = nn.ModuleList()
for lyr_num in range(_SCREAMING_SNAKE_CASE ):
# FiLM conditional T5 decoder
UpperCAmelCase : List[Any] = DecoderLayer(d_model=_SCREAMING_SNAKE_CASE , d_kv=_SCREAMING_SNAKE_CASE , num_heads=_SCREAMING_SNAKE_CASE , d_ff=_SCREAMING_SNAKE_CASE , dropout_rate=_SCREAMING_SNAKE_CASE )
self.decoders.append(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = TaLayerNorm(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = nn.Dropout(p=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : str = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
UpperCAmelCase : Union[str, Any] = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
UpperCAmelCase : Any = self.conditioning_emb(_SCREAMING_SNAKE_CASE ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
UpperCAmelCase : Dict = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
UpperCAmelCase : int = torch.broadcast_to(
torch.arange(_SCREAMING_SNAKE_CASE , device=decoder_input_tokens.device ) , (batch, seq_length) , )
UpperCAmelCase : List[str] = self.position_encoding(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = self.continuous_inputs_projection(_SCREAMING_SNAKE_CASE )
inputs += position_encodings
UpperCAmelCase : List[str] = self.dropout(_SCREAMING_SNAKE_CASE )
# decoder: No padding present.
UpperCAmelCase : Dict = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
UpperCAmelCase : Optional[int] = [(x, self.encoder_decoder_mask(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
UpperCAmelCase : Optional[Any] = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
UpperCAmelCase : Union[str, Any] = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
UpperCAmelCase : List[Any] = lyr(
_SCREAMING_SNAKE_CASE , conditioning_emb=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , encoder_attention_mask=_SCREAMING_SNAKE_CASE , )[0]
UpperCAmelCase : List[Any] = self.decoder_norm(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = self.post_dropout(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = self.spec_out(_SCREAMING_SNAKE_CASE )
return spec_out
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1E-6 ) -> List[str]:
'''simple docstring'''
super().__init__()
UpperCAmelCase : List[Any] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_SCREAMING_SNAKE_CASE , d_kv=_SCREAMING_SNAKE_CASE , num_heads=_SCREAMING_SNAKE_CASE , dropout_rate=_SCREAMING_SNAKE_CASE ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_SCREAMING_SNAKE_CASE , d_kv=_SCREAMING_SNAKE_CASE , num_heads=_SCREAMING_SNAKE_CASE , dropout_rate=_SCREAMING_SNAKE_CASE , layer_norm_epsilon=_SCREAMING_SNAKE_CASE , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_SCREAMING_SNAKE_CASE , d_ff=_SCREAMING_SNAKE_CASE , dropout_rate=_SCREAMING_SNAKE_CASE , layer_norm_epsilon=_SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Any = self.layer[0](
_SCREAMING_SNAKE_CASE , conditioning_emb=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , )
if encoder_hidden_states is not None:
UpperCAmelCase : Tuple = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
UpperCAmelCase : List[Any] = self.layer[1](
_SCREAMING_SNAKE_CASE , key_value_states=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , )
# Apply Film Conditional Feed Forward layer
UpperCAmelCase : Dict = self.layer[-1](_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return (hidden_states,)
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
super().__init__()
UpperCAmelCase : Union[str, Any] = TaLayerNorm(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = TaFiLMLayer(in_features=d_model * 4 , out_features=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = Attention(query_dim=_SCREAMING_SNAKE_CASE , heads=_SCREAMING_SNAKE_CASE , dim_head=_SCREAMING_SNAKE_CASE , out_bias=_SCREAMING_SNAKE_CASE , scale_qk=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = nn.Dropout(_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.layer_norm(_SCREAMING_SNAKE_CASE )
if conditioning_emb is not None:
UpperCAmelCase : str = self.FiLMLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Self-attention block
UpperCAmelCase : List[Any] = self.attention(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = hidden_states + self.dropout(_SCREAMING_SNAKE_CASE )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
UpperCAmelCase : List[str] = Attention(query_dim=_SCREAMING_SNAKE_CASE , heads=_SCREAMING_SNAKE_CASE , dim_head=_SCREAMING_SNAKE_CASE , out_bias=_SCREAMING_SNAKE_CASE , scale_qk=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = TaLayerNorm(_SCREAMING_SNAKE_CASE , eps=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = nn.Dropout(_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Tuple = self.layer_norm(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = self.attention(
_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , attention_mask=attention_mask.squeeze(1 ) , )
UpperCAmelCase : Dict = hidden_states + self.dropout(_SCREAMING_SNAKE_CASE )
return layer_output
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
super().__init__()
UpperCAmelCase : int = TaDenseGatedActDense(d_model=_SCREAMING_SNAKE_CASE , d_ff=_SCREAMING_SNAKE_CASE , dropout_rate=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = TaFiLMLayer(in_features=d_model * 4 , out_features=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = TaLayerNorm(_SCREAMING_SNAKE_CASE , eps=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = nn.Dropout(_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Any:
'''simple docstring'''
UpperCAmelCase : List[str] = self.layer_norm(_SCREAMING_SNAKE_CASE )
if conditioning_emb is not None:
UpperCAmelCase : Union[str, Any] = self.film(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = self.DenseReluDense(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = hidden_states + self.dropout(_SCREAMING_SNAKE_CASE )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
super().__init__()
UpperCAmelCase : Any = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = nn.Dropout(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = NewGELUActivation()
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.act(self.wi_a(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase : Optional[Any] = self.wi_a(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = hidden_gelu * hidden_linear
UpperCAmelCase : int = self.dropout(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = self.wo(_SCREAMING_SNAKE_CASE )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1E-6 ) -> List[str]:
'''simple docstring'''
super().__init__()
UpperCAmelCase : List[str] = nn.Parameter(torch.ones(_SCREAMING_SNAKE_CASE ) )
UpperCAmelCase : Optional[Any] = eps
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
UpperCAmelCase : str = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> torch.Tensor:
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(_SCREAMING_SNAKE_CASE , 3.0 )) ))
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
super().__init__()
UpperCAmelCase : Union[str, Any] = nn.Linear(_SCREAMING_SNAKE_CASE , out_features * 2 , bias=_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
UpperCAmelCase : int = self.scale_bias(_SCREAMING_SNAKE_CASE )
UpperCAmelCase , UpperCAmelCase : Optional[int] = torch.chunk(_SCREAMING_SNAKE_CASE , 2 , -1 )
UpperCAmelCase : Optional[int] = x * (1 + scale) + shift
return x
| 359
| 1
|
"""simple docstring"""
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
__A = TypeVar('''T''')
class _snake_case ( Generic[T] ):
def __init__( self : Any , UpperCAmelCase : bool = True ):
__lowerCamelCase : dict[T, list[T]] = {} # dictionary of lists
__lowerCamelCase : Any = directed
def lowerCamelCase__ ( self : List[str] , UpperCAmelCase : T , UpperCAmelCase : T ):
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCAmelCase )
self.adj_list[destination_vertex].append(UpperCAmelCase )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCAmelCase )
__lowerCamelCase : str = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(UpperCAmelCase )
__lowerCamelCase : str = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
__lowerCamelCase : Any = [destination_vertex]
__lowerCamelCase : Union[str, Any] = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCAmelCase )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCAmelCase )
__lowerCamelCase : Tuple = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
__lowerCamelCase : Optional[int] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
__lowerCamelCase : Tuple = [destination_vertex]
__lowerCamelCase : Union[str, Any] = []
return self
def __repr__( self : Dict ):
return pformat(self.adj_list )
| 646
|
"""simple docstring"""
from manim import *
class _snake_case ( a__ ):
def lowerCamelCase__ ( self : str ):
__lowerCamelCase : Tuple = Rectangle(height=0.5 , width=0.5 )
__lowerCamelCase : Dict = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
__lowerCamelCase : str = [mem.copy() for i in range(6 )]
__lowerCamelCase : str = [mem.copy() for i in range(6 )]
__lowerCamelCase : Union[str, Any] = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
__lowerCamelCase : List[str] = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
__lowerCamelCase : Dict = VGroup(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
__lowerCamelCase : str = Text("CPU" , font_size=24 )
__lowerCamelCase : List[Any] = Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCAmelCase )
__lowerCamelCase : Tuple = [mem.copy() for i in range(1 )]
__lowerCamelCase : List[str] = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
__lowerCamelCase : Optional[Any] = Text("GPU" , font_size=24 )
__lowerCamelCase : Any = Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase )
gpu.align_to(UpperCAmelCase , UpperCAmelCase )
gpu.set_x(gpu.get_x() - 1 )
self.add(UpperCAmelCase )
__lowerCamelCase : List[Any] = [mem.copy() for i in range(6 )]
__lowerCamelCase : Optional[int] = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
__lowerCamelCase : List[str] = Text("Model" , font_size=24 )
__lowerCamelCase : Tuple = Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.play(
Create(UpperCAmelCase , run_time=1 ) , Create(UpperCAmelCase , run_time=1 ) , Create(UpperCAmelCase , run_time=1 ) , )
__lowerCamelCase : int = MarkupText(
F"""First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.""" , font_size=24 , )
__lowerCamelCase : Dict = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__lowerCamelCase : str = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase , run_time=2.5 ) , Write(UpperCAmelCase ) , Write(UpperCAmelCase ) )
self.add(UpperCAmelCase )
__lowerCamelCase : Any = []
__lowerCamelCase : int = []
__lowerCamelCase : Optional[Any] = []
for i, rect in enumerate(UpperCAmelCase ):
__lowerCamelCase : Union[str, Any] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(UpperCAmelCase , opacity=0.7 )
cpu_target.move_to(UpperCAmelCase )
cpu_target.generate_target()
__lowerCamelCase : Optional[Any] = 0.4_6 / 4
__lowerCamelCase : Dict = 0.4_6 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=UpperCAmelCase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=UpperCAmelCase , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=UpperCAmelCase , buff=0.0 )
cpu_targs.append(UpperCAmelCase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(UpperCAmelCase ) )
second_animations.append(MoveToTarget(UpperCAmelCase , run_time=1.5 ) )
self.play(*UpperCAmelCase )
self.play(*UpperCAmelCase )
self.wait()
| 646
| 1
|
'''simple docstring'''
class __lowercase :
def __init__( self , UpperCamelCase ) -> None:
__a = size
__a = [0] * size
__a = [0] * size
@staticmethod
def UpperCamelCase__ ( UpperCamelCase ) -> int:
return index | (index + 1)
@staticmethod
def UpperCamelCase__ ( UpperCamelCase ) -> int:
return (index & (index + 1)) - 1
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase ) -> None:
__a = value
while index < self.size:
__a = self.get_prev(UpperCamelCase ) + 1
if current_left_border == index:
__a = value
else:
__a = max(UpperCamelCase , UpperCamelCase , UpperCamelCase )
__a = self.get_next(UpperCamelCase )
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase ) -> int:
right -= 1 # Because of right is exclusive
__a = 0
while left <= right:
__a = self.get_prev(UpperCamelCase )
if left <= current_left:
__a = max(UpperCamelCase , self.tree[right] )
__a = current_left
else:
__a = max(UpperCamelCase , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709
|
'''simple docstring'''
import numpy as np
import datasets
UpperCAmelCase_ = "\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n"
UpperCAmelCase_ = "\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n"
UpperCAmelCase_ = "\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {'mahalanobis': array([0.5])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
def UpperCamelCase__ ( self ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'X': datasets.Sequence(datasets.Value('float' , id='sequence' ) , id='X' ),
} ) , )
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase ) -> Optional[int]:
# convert to numpy arrays
__a = np.array(UpperCamelCase )
__a = np.array(UpperCamelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError('Expected `X` to be a 2D vector' )
if len(reference_distribution.shape ) != 2:
raise ValueError('Expected `reference_distribution` to be a 2D vector' )
if reference_distribution.shape[0] < 2:
raise ValueError(
'Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension' )
# Get mahalanobis distance for each prediction
__a = X - np.mean(UpperCamelCase )
__a = np.cov(reference_distribution.T )
try:
__a = np.linalg.inv(UpperCamelCase )
except np.linalg.LinAlgError:
__a = np.linalg.pinv(UpperCamelCase )
__a = np.dot(UpperCamelCase , UpperCamelCase )
__a = np.dot(UpperCamelCase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 490
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.