code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 103
|
'''simple docstring'''
from __future__ import annotations
import time
import numpy as np
_UpperCAmelCase : int = [8, 5, 9, 7]
_UpperCAmelCase : List[str] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
_UpperCAmelCase : Union[str, Any] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class __magic_name__ :
def __init__( self , snake_case_ , snake_case_ , snake_case_ , ):
lowercase =claim_vector
lowercase =allocated_resources_table
lowercase =maximum_claim_table
def _A( self ):
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def _A( self ):
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def _A( self ):
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(snake_case_ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def _A( self ):
return {self.__need().index(snake_case_ ): i for i in self.__need()}
def _A( self , **snake_case_ ):
lowercase =self.__need()
lowercase =self.__allocated_resources_table
lowercase =self.__available_resources()
lowercase =self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('''_''' * 50 + '''\n''' )
while need_list:
lowercase =False
for each_need in need_list:
lowercase =True
for index, need in enumerate(snake_case_ ):
if need > available_resources[index]:
lowercase =False
break
if execution:
lowercase =True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
lowercase =original_need_index
print(f'Process {process_number + 1} is executing.' )
# remove the process run from stack
need_list.remove(snake_case_ )
# update available/freed resources stack
lowercase =np.array(snake_case_ ) + np.array(
alloc_resources_table[process_number] )
print(
'''Updated available resource stack for processes: '''
+ ''' '''.join([str(snake_case_ ) for x in available_resources] ) )
break
if safe:
print('''The process is in a safe state.\n''' )
else:
print('''System in unsafe state. Aborting...\n''' )
break
def _A( self ):
print(''' ''' * 9 + '''Allocated Resource Table''' )
for item in self.__allocated_resources_table:
print(
f'P{self.__allocated_resources_table.index(snake_case_ ) + 1}'
+ ''' '''.join(f'{it:>8}' for it in item )
+ '''\n''' )
print(''' ''' * 9 + '''System Resource Table''' )
for item in self.__maximum_claim_table:
print(
f'P{self.__maximum_claim_table.index(snake_case_ ) + 1}'
+ ''' '''.join(f'{it:>8}' for it in item )
+ '''\n''' )
print(
'''Current Usage by Active Processes: '''
+ ''' '''.join(str(snake_case_ ) for x in self.__claim_vector ) )
print(
'''Initial Available Resources: '''
+ ''' '''.join(str(snake_case_ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72
| 0
|
"""simple docstring"""
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class A__ ( unittest.TestCase):
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) )
for a, b in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertAlmostEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , delta=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = None
ops.enable_eager_execution_internal()
__lowerCAmelCase : Tuple = tf.config.list_physical_devices('CPU' )
if len(_SCREAMING_SNAKE_CASE ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
__lowerCAmelCase : List[str] = tf.config.list_logical_devices(device_type='CPU' )
__lowerCAmelCase : Optional[int] = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
__lowerCAmelCase : Dict = GradientAccumulator()
__lowerCAmelCase : Union[str, Any] = tf.Variable([4.0, 3.0] )
__lowerCAmelCase : Any = create_optimizer(5E-5 , 10 , 5 )
__lowerCAmelCase : Optional[int] = tf.Variable([0.0, 0.0] , trainable=_SCREAMING_SNAKE_CASE )
def accumulate_on_replica(_SCREAMING_SNAKE_CASE ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
with strategy.scope():
__lowerCAmelCase : Any = strategy.experimental_local_results(_SCREAMING_SNAKE_CASE )
local_variables[0].assign(_SCREAMING_SNAKE_CASE )
local_variables[1].assign(_SCREAMING_SNAKE_CASE )
strategy.run(_SCREAMING_SNAKE_CASE , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(_SCREAMING_SNAKE_CASE )
def _check_local_values(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , _SCREAMING_SNAKE_CASE , tol=1E-2 )
self.assertListAlmostEqual(values[1].value() , _SCREAMING_SNAKE_CASE , tol=1E-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 717
|
"""simple docstring"""
from collections.abc import Sequence
from queue import Queue
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ):
__lowerCAmelCase : List[str] = start
__lowerCAmelCase : Union[str, Any] = end
__lowerCAmelCase : Union[str, Any] = val
__lowerCAmelCase : Dict = (start + end) // 2
__lowerCAmelCase : Dict = left
__lowerCAmelCase : List[Any] = right
def __repr__( self ):
return f"SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})"
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[Any] = collection
__lowerCAmelCase : Optional[int] = function
if self.collection:
__lowerCAmelCase : str = self._build_tree(0 , len(_SCREAMING_SNAKE_CASE ) - 1 )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self._update_tree(self.root , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return self._query_range(self.root , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if start == end:
return SegmentTreeNode(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.collection[start] )
__lowerCAmelCase : Tuple = (start + end) // 2
__lowerCAmelCase : List[str] = self._build_tree(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = self._build_tree(mid + 1 , _SCREAMING_SNAKE_CASE )
return SegmentTreeNode(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.fn(left.val , right.val ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if node.start == i and node.end == i:
__lowerCAmelCase : int = val
return
if i <= node.mid:
self._update_tree(node.left , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
self._update_tree(node.right , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = self.fn(node.left.val , node.right.val )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , _SCREAMING_SNAKE_CASE , node.mid ) , self._query_range(node.right , node.mid + 1 , _SCREAMING_SNAKE_CASE ) , )
else:
# range in right child tree
return self._query_range(node.right , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
if self.root is not None:
__lowerCAmelCase : Optional[Any] = Queue()
queue.put(self.root )
while not queue.empty():
__lowerCAmelCase : str = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print("""*""" * 50)
lowerCamelCase__ = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 549
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"sail/poolformer_s12": "https://huggingface.co/sail/poolformer_s12/resolve/main/config.json",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "poolformer"
def __init__(self , _lowercase=3 , _lowercase=16 , _lowercase=16 , _lowercase=3 , _lowercase=4.0 , _lowercase=[2, 2, 6, 2] , _lowercase=[64, 128, 320, 512] , _lowercase=[7, 3, 3, 3] , _lowercase=[4, 2, 2, 2] , _lowercase=[2, 1, 1, 1] , _lowercase=4 , _lowercase=0.0 , _lowercase="gelu" , _lowercase=True , _lowercase=1e-5 , _lowercase=0.02 , **_lowercase , ):
'''simple docstring'''
__a : Tuple = num_channels
__a : int = patch_size
__a : Optional[int] = stride
__a : List[Any] = padding
__a : int = pool_size
__a : List[Any] = hidden_sizes
__a : str = mlp_ratio
__a : Any = depths
__a : int = patch_sizes
__a : Optional[Any] = strides
__a : Optional[Any] = num_encoder_blocks
__a : Optional[int] = drop_path_rate
__a : List[Any] = hidden_act
__a : Dict = use_layer_scale
__a : Optional[Any] = layer_scale_init_value
__a : List[Any] = initializer_range
super().__init__(**_lowercase )
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = version.parse("1.11" )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 2e-3
| 581
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = {
"""task_specific_params""": {
"""summarization""": {"""length_penalty""": 1.0, """max_length""": 128, """min_length""": 12, """num_beams""": 4},
"""summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 142, """min_length""": 56, """num_beams""": 4},
"""summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 62, """min_length""": 11, """num_beams""": 6},
}
}
__a : List[Any] = {
"""task_specific_params.summarization.length_penalty""": 1.0,
"""task_specific_params.summarization.max_length""": 128,
"""task_specific_params.summarization.min_length""": 12,
"""task_specific_params.summarization.num_beams""": 4,
"""task_specific_params.summarization_cnn.length_penalty""": 2.0,
"""task_specific_params.summarization_cnn.max_length""": 142,
"""task_specific_params.summarization_cnn.min_length""": 56,
"""task_specific_params.summarization_cnn.num_beams""": 4,
"""task_specific_params.summarization_xsum.length_penalty""": 1.0,
"""task_specific_params.summarization_xsum.max_length""": 62,
"""task_specific_params.summarization_xsum.min_length""": 11,
"""task_specific_params.summarization_xsum.num_beams""": 6,
}
self.assertEqual(flatten_dict(_lowercase ) , _lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(_lowercase ) , x.transpose() ) )
__a : Union[str, Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(_lowercase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = np.random.randn(3 , 4 )
__a : Optional[Any] = torch.tensor(_lowercase )
self.assertTrue(np.allclose(transpose(_lowercase ) , transpose(_lowercase ).numpy() ) )
__a : Optional[Any] = np.random.randn(3 , 4 , 5 )
__a : Any = torch.tensor(_lowercase )
self.assertTrue(np.allclose(transpose(_lowercase , axes=(1, 2, 0) ) , transpose(_lowercase , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = np.random.randn(3 , 4 )
__a : int = tf.constant(_lowercase )
self.assertTrue(np.allclose(transpose(_lowercase ) , transpose(_lowercase ).numpy() ) )
__a : Any = np.random.randn(3 , 4 , 5 )
__a : List[str] = tf.constant(_lowercase )
self.assertTrue(np.allclose(transpose(_lowercase , axes=(1, 2, 0) ) , transpose(_lowercase , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = np.random.randn(3 , 4 )
__a : Union[str, Any] = jnp.array(_lowercase )
self.assertTrue(np.allclose(transpose(_lowercase ) , np.asarray(transpose(_lowercase ) ) ) )
__a : Optional[int] = np.random.randn(3 , 4 , 5 )
__a : Union[str, Any] = jnp.array(_lowercase )
self.assertTrue(np.allclose(transpose(_lowercase , axes=(1, 2, 0) ) , np.asarray(transpose(_lowercase , axes=(1, 2, 0) ) ) ) )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(_lowercase , (4, 3) ) , np.reshape(_lowercase , (4, 3) ) ) )
__a : Optional[int] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(_lowercase , (12, 5) ) , np.reshape(_lowercase , (12, 5) ) ) )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = np.random.randn(3 , 4 )
__a : List[Any] = torch.tensor(_lowercase )
self.assertTrue(np.allclose(reshape(_lowercase , (4, 3) ) , reshape(_lowercase , (4, 3) ).numpy() ) )
__a : List[str] = np.random.randn(3 , 4 , 5 )
__a : Any = torch.tensor(_lowercase )
self.assertTrue(np.allclose(reshape(_lowercase , (12, 5) ) , reshape(_lowercase , (12, 5) ).numpy() ) )
@require_tf
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = np.random.randn(3 , 4 )
__a : Dict = tf.constant(_lowercase )
self.assertTrue(np.allclose(reshape(_lowercase , (4, 3) ) , reshape(_lowercase , (4, 3) ).numpy() ) )
__a : Tuple = np.random.randn(3 , 4 , 5 )
__a : Optional[Any] = tf.constant(_lowercase )
self.assertTrue(np.allclose(reshape(_lowercase , (12, 5) ) , reshape(_lowercase , (12, 5) ).numpy() ) )
@require_flax
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = np.random.randn(3 , 4 )
__a : Tuple = jnp.array(_lowercase )
self.assertTrue(np.allclose(reshape(_lowercase , (4, 3) ) , np.asarray(reshape(_lowercase , (4, 3) ) ) ) )
__a : Tuple = np.random.randn(3 , 4 , 5 )
__a : Optional[int] = jnp.array(_lowercase )
self.assertTrue(np.allclose(reshape(_lowercase , (12, 5) ) , np.asarray(reshape(_lowercase , (12, 5) ) ) ) )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(_lowercase ) , np.squeeze(_lowercase ) ) )
__a : Optional[Any] = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(_lowercase , axis=2 ) , np.squeeze(_lowercase , axis=2 ) ) )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = np.random.randn(1 , 3 , 4 )
__a : List[Any] = torch.tensor(_lowercase )
self.assertTrue(np.allclose(squeeze(_lowercase ) , squeeze(_lowercase ).numpy() ) )
__a : Optional[Any] = np.random.randn(1 , 4 , 1 , 5 )
__a : str = torch.tensor(_lowercase )
self.assertTrue(np.allclose(squeeze(_lowercase , axis=2 ) , squeeze(_lowercase , axis=2 ).numpy() ) )
@require_tf
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = np.random.randn(1 , 3 , 4 )
__a : Tuple = tf.constant(_lowercase )
self.assertTrue(np.allclose(squeeze(_lowercase ) , squeeze(_lowercase ).numpy() ) )
__a : Any = np.random.randn(1 , 4 , 1 , 5 )
__a : List[str] = tf.constant(_lowercase )
self.assertTrue(np.allclose(squeeze(_lowercase , axis=2 ) , squeeze(_lowercase , axis=2 ).numpy() ) )
@require_flax
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = np.random.randn(1 , 3 , 4 )
__a : Union[str, Any] = jnp.array(_lowercase )
self.assertTrue(np.allclose(squeeze(_lowercase ) , np.asarray(squeeze(_lowercase ) ) ) )
__a : Any = np.random.randn(1 , 4 , 1 , 5 )
__a : Optional[Any] = jnp.array(_lowercase )
self.assertTrue(np.allclose(squeeze(_lowercase , axis=2 ) , np.asarray(squeeze(_lowercase , axis=2 ) ) ) )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(_lowercase , axis=1 ) , np.expand_dims(_lowercase , axis=1 ) ) )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = np.random.randn(3 , 4 )
__a : Any = torch.tensor(_lowercase )
self.assertTrue(np.allclose(expand_dims(_lowercase , axis=1 ) , expand_dims(_lowercase , axis=1 ).numpy() ) )
@require_tf
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = np.random.randn(3 , 4 )
__a : str = tf.constant(_lowercase )
self.assertTrue(np.allclose(expand_dims(_lowercase , axis=1 ) , expand_dims(_lowercase , axis=1 ).numpy() ) )
@require_flax
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = np.random.randn(3 , 4 )
__a : Optional[Any] = jnp.array(_lowercase )
self.assertTrue(np.allclose(expand_dims(_lowercase , axis=1 ) , np.asarray(expand_dims(_lowercase , axis=1 ) ) ) )
| 581
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections import Counter
from random import random
class _lowercase :
"""simple docstring"""
def __init__( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase ={}
def UpperCAmelCase_ ( self : Union[str, Any] , UpperCamelCase__ : str ) -> None:
'''simple docstring'''
__UpperCamelCase ={}
def UpperCAmelCase_ ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : float ) -> None:
'''simple docstring'''
if nodea not in self.connections:
self.add_node(lowerCamelCase__ )
if nodea not in self.connections:
self.add_node(lowerCamelCase__ )
__UpperCamelCase =probability
def UpperCAmelCase_ ( self : Any ) -> list[str]:
'''simple docstring'''
return list(self.connections )
def UpperCAmelCase_ ( self : Union[str, Any] , UpperCamelCase__ : str ) -> str:
'''simple docstring'''
__UpperCamelCase =0
__UpperCamelCase =random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def lowerCAmelCase (__UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : str ):
"""simple docstring"""
__UpperCamelCase =MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__UpperCamelCase =Counter(graph.get_nodes() )
__UpperCamelCase =start
for _ in range(__lowerCAmelCase ):
__UpperCamelCase =graph.transition(__lowerCAmelCase )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718
|
"""simple docstring"""
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class _lowercase ( ctypes.Structure ):
"""simple docstring"""
lowercase__ = [('''size''', ctypes.c_int), ('''visible''', ctypes.c_byte)]
def lowerCAmelCase ():
"""simple docstring"""
if os.name == "nt":
__UpperCamelCase =CursorInfo()
__UpperCamelCase =ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) )
__UpperCamelCase =False
ctypes.windll.kernelaa.SetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25l''' )
sys.stdout.flush()
def lowerCAmelCase ():
"""simple docstring"""
if os.name == "nt":
__UpperCamelCase =CursorInfo()
__UpperCamelCase =ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) )
__UpperCamelCase =True
ctypes.windll.kernelaa.SetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25h''' )
sys.stdout.flush()
@contextmanager
def lowerCAmelCase ():
"""simple docstring"""
try:
hide_cursor()
yield
finally:
show_cursor()
| 296
| 0
|
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 464
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def a_ ( SCREAMING_SNAKE_CASE__ : str=None ):
'''simple docstring'''
if subparsers is not None:
_lowerCamelCase : str =subparsers.add_parser('env' )
else:
_lowerCamelCase : List[Any] =argparse.ArgumentParser('Accelerate env command' )
parser.add_argument(
'--config_file' , default=SCREAMING_SNAKE_CASE__ , help='The config file to use for the default values in the launching script.' )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
return parser
def a_ ( SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
_lowerCamelCase : int =torch.__version__
_lowerCamelCase : Optional[Any] =torch.cuda.is_available()
_lowerCamelCase : int =is_xpu_available()
_lowerCamelCase : Union[str, Any] =is_npu_available()
_lowerCamelCase : Optional[int] ='Not found'
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase : Any =load_config_from_file(args.config_file ).to_dict()
_lowerCamelCase : List[str] ={
'`Accelerate` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'Numpy version': np.__version__,
'PyTorch version (GPU?)': F'''{pt_version} ({pt_cuda_available})''',
'PyTorch XPU available': str(SCREAMING_SNAKE_CASE__ ),
'PyTorch NPU available': str(SCREAMING_SNAKE_CASE__ ),
'System RAM': F'''{psutil.virtual_memory().total / 1_024 ** 3:.2f} GB''',
}
if pt_cuda_available:
_lowerCamelCase : List[Any] =torch.cuda.get_device_name()
print('\nCopy-and-paste the text below in your GitHub issue\n' )
print('\n'.join([F'''- {prop}: {val}''' for prop, val in info.items()] ) )
print('- `Accelerate` default config:' if args.config_file is None else '- `Accelerate` config passed:' )
_lowerCamelCase : Dict =(
'\n'.join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else F'''\t{accelerate_config}'''
)
print(SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : str =accelerate_config
return info
def a_ ( ):
'''simple docstring'''
_lowerCamelCase : Optional[int] =env_command_parser()
_lowerCamelCase : Any =parser.parse_args()
env_command(SCREAMING_SNAKE_CASE__ )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 464
| 1
|
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase ( self ):
lowercase_ :List[str] = '''ylacombe/bark-small'''
lowercase_ :Optional[int] = tempfile.mkdtemp()
lowercase_ :Dict = '''en_speaker_1'''
lowercase_ :str = '''This is a test string'''
lowercase_ :int = '''speaker_embeddings_path.json'''
lowercase_ :List[Any] = '''speaker_embeddings'''
def UpperCamelCase ( self , **UpperCamelCase_ ):
return AutoTokenizer.from_pretrained(self.checkpoint , **UpperCamelCase__ )
def UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self ):
lowercase_ :Union[str, Any] = self.get_tokenizer()
lowercase_ :int = BarkProcessor(tokenizer=UpperCamelCase__ )
processor.save_pretrained(self.tmpdirname )
lowercase_ :Union[str, Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def UpperCamelCase ( self ):
lowercase_ :Optional[int] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowercase_ :Dict = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowercase_ :Union[str, Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def UpperCamelCase ( self ):
lowercase_ :Dict = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowercase_ :Tuple = 35
lowercase_ :Optional[int] = 2
lowercase_ :int = 8
lowercase_ :List[str] = {
'''semantic_prompt''': np.ones(UpperCamelCase__ ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowercase_ :Tuple = processor(text=self.input_string , voice_preset=UpperCamelCase__ )
lowercase_ :Union[str, Any] = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCamelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowercase_ :Optional[Any] = os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(UpperCamelCase__ , **UpperCamelCase__ )
lowercase_ :str = processor(text=self.input_string , voice_preset=UpperCamelCase__ )
lowercase_ :List[str] = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCamelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowercase_ :List[Any] = processor(text=self.input_string , voice_preset=self.voice_preset )
def UpperCamelCase ( self ):
lowercase_ :str = self.get_tokenizer()
lowercase_ :str = BarkProcessor(tokenizer=UpperCamelCase__ )
lowercase_ :str = processor(text=self.input_string )
lowercase_ :Any = tokenizer(
self.input_string , padding='''max_length''' , max_length=256 , add_special_tokens=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 712
|
def UpperCamelCase ( _a ) -> int:
'''simple docstring'''
assert isinstance(_a , _a ), f"The input value of [n={number}] is not an integer"
if number == 1:
return 2
elif number < 1:
lowercase_ :str = f"The input value of [n={number}] has to be > 0"
raise ValueError(_a )
else:
lowercase_ :List[str] = sylvester(number - 1 )
lowercase_ :Union[str, Any] = num - 1
lowercase_ :List[str] = num
return lower * upper + 1
if __name__ == "__main__":
print(f"The 8th number in Sylvester's sequence: {sylvester(8)}")
| 441
| 0
|
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class _snake_case :
"""simple docstring"""
def __init__( self , a , a=1_3 , a=1_0 , a=3 , a=2 , a=2 , a=True , a=True , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=1_0 , a=0.02 , a="divided_space_time" , a=None , ) -> List[str]:
"""simple docstring"""
_A = parent
_A = batch_size
_A = image_size
_A = num_channels
_A = patch_size
_A = num_frames
_A = is_training
_A = use_labels
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = attention_type
_A = initializer_range
_A = scope
_A = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
_A = (image_size // patch_size) ** 2
_A = (num_frames) * self.num_patches_per_frame + 1
def lowercase_ ( self ) -> Tuple:
"""simple docstring"""
_A = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.num_labels )
_A = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self ) -> Optional[int]:
"""simple docstring"""
_A = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
_A = self.num_labels
return config
def lowercase_ ( self , a , a , a ) -> str:
"""simple docstring"""
_A = TimesformerModel(config=a )
model.to(a )
model.eval()
_A = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self , a , a , a ) -> str:
"""simple docstring"""
_A = TimesformerForVideoClassification(a )
model.to(a )
model.eval()
_A = model(a )
# verify the logits shape
_A = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , a )
def lowercase_ ( self ) -> Any:
"""simple docstring"""
_A = self.prepare_config_and_inputs()
_A , _A , _A = config_and_inputs
_A = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( lowerCamelCase ,lowerCamelCase ,unittest.TestCase ):
"""simple docstring"""
lowerCamelCase_ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
lowerCamelCase_ = (
{'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
def lowercase_ ( self ) -> int:
"""simple docstring"""
_A = TimesformerModelTester(self )
_A = ConfigTester(
self , config_class=a , has_text_modality=a , hidden_size=3_7 )
def lowercase_ ( self , a , a , a=False ) -> Optional[int]:
"""simple docstring"""
_A = copy.deepcopy(a )
if return_labels:
if model_class in get_values(a ):
_A = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a )
return inputs_dict
def lowercase_ ( self ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''TimeSformer does not use inputs_embeds''' )
def lowercase_ ( self ) -> Tuple:
"""simple docstring"""
pass
def lowercase_ ( self ) -> List[Any]:
"""simple docstring"""
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def lowercase_ ( self ) -> List[str]:
"""simple docstring"""
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(a )
_A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a )
def lowercase_ ( self ) -> str:
"""simple docstring"""
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def lowercase_ ( self ) -> Optional[int]:
"""simple docstring"""
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*a )
@slow
def lowercase_ ( self ) -> Optional[Any]:
"""simple docstring"""
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = TimesformerModel.from_pretrained(a )
self.assertIsNotNone(a )
def lowercase_ ( self ) -> Optional[Any]:
"""simple docstring"""
if not self.has_attentions:
pass
else:
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
_A = True
for model_class in self.all_model_classes:
_A = self.model_tester.seq_length
_A = self.model_tester.num_frames
_A = True
_A = False
_A = True
_A = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
_A = model(**self._prepare_for_class(a , a ) )
_A = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_A = True
_A = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
_A = model(**self._prepare_for_class(a , a ) )
_A = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
_A = len(a )
# Check attention is always last and order is fine
_A = True
_A = True
_A = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
_A = model(**self._prepare_for_class(a , a ) )
self.assertEqual(out_len + 1 , len(a ) )
_A = outputs.attentions
self.assertEqual(len(a ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def lowercase_ ( self ) -> List[Any]:
"""simple docstring"""
def check_hidden_states_output(a , a , a ):
_A = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
_A = model(**self._prepare_for_class(a , a ) )
_A = outputs.hidden_states
_A = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(a ) , a )
_A = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_A = True
check_hidden_states_output(a , a , a )
def UpperCAmelCase__ ( ) -> List[Any]:
_A = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
_A = np.load(__snake_case )
return list(__snake_case )
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase_ ( self ) -> Any:
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def lowercase_ ( self ) -> List[str]:
"""simple docstring"""
_A = TimesformerForVideoClassification.from_pretrained('''facebook/timesformer-base-finetuned-k400''' ).to(
a )
_A = self.default_image_processor
_A = prepare_video()
_A = image_processor(video[:8] , return_tensors='''pt''' ).to(a )
# forward pass
with torch.no_grad():
_A = model(**a )
# verify the logits
_A = torch.Size((1, 4_0_0) )
self.assertEqual(outputs.logits.shape , a )
_A = torch.tensor([-0.3_016, -0.7_713, -0.4_205] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 ) )
| 317
|
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> str:
"""simple docstring"""
super().tearDown()
gc.collect()
def lowercase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_A , _A = FlaxStableDiffusionPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , )
_A = '''A painting of a squirrel eating a burger'''
_A = jax.device_count()
_A = num_samples * [prompt]
_A = sd_pipe.prepare_inputs(a )
_A = replicate(a )
_A = shard(a )
_A = jax.random.PRNGKey(0 )
_A = jax.random.split(a , jax.device_count() )
_A = sd_pipe(a , a , a , num_inference_steps=2_5 , jit=a )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
_A = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_A = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
_A = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_A = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def lowercase_ ( self ) -> List[str]:
"""simple docstring"""
_A = '''stabilityai/stable-diffusion-2'''
_A , _A = FlaxDPMSolverMultistepScheduler.from_pretrained(a , subfolder='''scheduler''' )
_A , _A = FlaxStableDiffusionPipeline.from_pretrained(
a , scheduler=a , revision='''bf16''' , dtype=jnp.bfloataa , )
_A = scheduler_params
_A = '''A painting of a squirrel eating a burger'''
_A = jax.device_count()
_A = num_samples * [prompt]
_A = sd_pipe.prepare_inputs(a )
_A = replicate(a )
_A = shard(a )
_A = jax.random.PRNGKey(0 )
_A = jax.random.split(a , jax.device_count() )
_A = sd_pipe(a , a , a , num_inference_steps=2_5 , jit=a )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
_A = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_A = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
_A = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_A = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 317
| 1
|
# Algorithm for the pigeonhole sorting
def __snake_case ( _UpperCamelCase ) -> Dict:
_a = min(snake_case_ ) # min() finds the minimum value
_a = max(snake_case_ ) # max() finds the maximum value
_a = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
_a = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(snake_case_ , snake_case_ ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
_a = 0
for count in range(snake_case_ ):
while holes[count] > 0:
holes[count] -= 1
_a = count + min_val
i += 1
def __snake_case ( ) -> int:
_a = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(snake_case_ )
print('''Sorted order is:''' , ''' '''.join(snake_case_ ) )
if __name__ == "__main__":
main()
| 719
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def __snake_case ( _UpperCamelCase=None ) -> List[str]:
_a = argparse.ArgumentParser(add_help=_UpperCamelCase , allow_abbrev=_UpperCamelCase )
# The main config parser
_a = config_command_parser(_UpperCamelCase )
# The subparser to add commands to
_a = config_parser.add_subparsers(title='''subcommands''' , dest='''subcommand''' )
# Then add other parsers with the parent parser
default_command_parser(_UpperCamelCase , parents=[parent_parser] )
update_command_parser(_UpperCamelCase , parents=[parent_parser] )
return config_parser
def __snake_case ( ) -> Optional[Any]:
_a = get_config_parser()
_a = config_parser.parse_args()
if not hasattr(_UpperCamelCase , '''func''' ):
config_parser.print_help()
exit(1 )
# Run
args.func(_UpperCamelCase )
if __name__ == "__main__":
main()
| 346
| 0
|
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
__UpperCAmelCase =Lock()
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(UpperCamelCase__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
__lowerCamelCase = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
__lowerCamelCase = min(UpperCamelCase__ , UpperCamelCase__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(UpperCamelCase__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
__lowerCamelCase = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
__lowerCamelCase = max(UpperCamelCase__ , UpperCamelCase__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(UpperCamelCase__ )
def __lowerCAmelCase ( UpperCamelCase__ ) -> str:
__lowerCamelCase = []
__lowerCamelCase = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
__lowerCamelCase = Pipe()
__lowerCamelCase = Pipe()
process_array_.append(
Process(
target=UpperCamelCase__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
__lowerCamelCase = temp_rs
__lowerCamelCase = temp_rr
for i in range(1 , len(UpperCamelCase__ ) - 1 ):
__lowerCamelCase = Pipe()
__lowerCamelCase = Pipe()
process_array_.append(
Process(
target=UpperCamelCase__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
__lowerCamelCase = temp_rs
__lowerCamelCase = temp_rr
process_array_.append(
Process(
target=UpperCamelCase__ , args=(
len(UpperCamelCase__ ) - 1,
arr[len(UpperCamelCase__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(UpperCamelCase__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(UpperCamelCase__ ) ):
__lowerCamelCase = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __lowerCAmelCase ( ) -> Tuple:
__lowerCamelCase = list(range(10 , 0 , -1 ) )
print('''Initial List''' )
print(*UpperCamelCase__ )
__lowerCamelCase = odd_even_transposition(UpperCamelCase__ )
print('''Sorted List\n''' )
print(*UpperCamelCase__ )
if __name__ == "__main__":
main()
| 546
|
'''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ ) -> bool:
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(UpperCamelCase__ ) == 0:
raise ValueError('''Input list must be a non empty list''' )
if len(UpperCamelCase__ ) == 1:
return True
__lowerCamelCase = series[1] - series[0]
for index in range(len(UpperCamelCase__ ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def __lowerCAmelCase ( UpperCamelCase__ ) -> float:
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(UpperCamelCase__ ) == 0:
raise ValueError('''Input list must be a non empty list''' )
__lowerCamelCase = 0
for val in series:
answer += val
return answer / len(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 546
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase_ : Optional[int] = """▁"""
lowerCamelCase_ : List[str] = {"""vocab_file""": """spiece.model"""}
lowerCamelCase_ : Any = {
"""vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""}
}
lowerCamelCase_ : List[Any] = {
"""google/pegasus-xsum""": 512,
}
lowerCamelCase_ : List[str] = logging.get_logger(__name__)
class a__ ( __snake_case ):
A__ : Tuple = VOCAB_FILES_NAMES
A__ : List[str] = VOCAB_FILES_NAMES
A__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Tuple = ['input_ids', 'attention_mask']
def __init__( self , UpperCAmelCase , UpperCAmelCase="<pad>" , UpperCAmelCase="</s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<mask_2>" , UpperCAmelCase="<mask_1>" , UpperCAmelCase=None , UpperCAmelCase=1_0_3 , UpperCAmelCase = None , **UpperCAmelCase , ) -> None:
__a = offset
if additional_special_tokens is not None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
raise TypeError(
f'''additional_special_tokens should be of type {type(UpperCAmelCase )}, but is'''
f''' {type(UpperCAmelCase )}''' )
__a = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(UpperCAmelCase ) , self.offset - 1 )
]
if len(set(UpperCAmelCase ) ) != len(UpperCAmelCase ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
__a = additional_special_tokens_extended
else:
__a = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
__a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , mask_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token_sent=UpperCAmelCase , offset=UpperCAmelCase , additional_special_tokens=UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase , )
__a = mask_token_sent
__a = vocab_file
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase )
# add special tokens to encoder dict
__a = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
__a = {v: k for k, v in self.encoder.items()}
@property
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return len(self.sp_model ) + self.offset
def __SCREAMING_SNAKE_CASE ( self ) -> Dict[str, int]:
__a = {self.convert_ids_to_tokens(UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[str]:
__a = self.__dict__.copy()
__a = None
return state
def __setstate__( self , UpperCAmelCase ) -> Optional[int]:
__a = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__a = {}
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> List[str]:
return self.sp_model.encode(UpperCAmelCase , out_type=UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> int:
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
__a = self.sp_model.piece_to_id(UpperCAmelCase )
return sp_id + self.offset
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> str:
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
__a = self.sp_model.IdToPiece(index - self.offset )
return token
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> List[Any]:
__a = []
__a = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCAmelCase ) + token
__a = []
else:
current_sub_tokens.append(UpperCAmelCase )
out_string += self.sp_model.decode(UpperCAmelCase )
return out_string.strip()
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase=False ) -> int:
return 1
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> Any:
__a = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(UpperCAmelCase )
elif token_ids_a is None:
return self._special_token_mask(UpperCAmelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a = os.path.join(
UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase , 'wb' ) as fi:
__a = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase )
return (out_vocab_file,)
| 246
|
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
lowerCamelCase_ : Dict = logging.get_logger(__name__)
class a__ ( __snake_case ):
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ) -> None:
warnings.warn(
'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use BeitImageProcessor instead.' , UpperCAmelCase , )
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
| 246
| 1
|
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
__a = 0
__a = len(_SCREAMING_SNAKE_CASE ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__a = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(_SCREAMING_SNAKE_CASE ):
return None
__a = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
__a = left
__a = point
elif point > right:
__a = right
__a = point
else:
if item < current_item:
__a = point - 1
else:
__a = point + 1
return None
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__a = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(_SCREAMING_SNAKE_CASE ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif point > right:
return interpolation_search_by_recursion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , point - 1 )
else:
return interpolation_search_by_recursion(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , point + 1 , _SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
if collection != sorted(_SCREAMING_SNAKE_CASE ):
raise ValueError("""Collection must be ascending sorted""" )
return True
if __name__ == "__main__":
import sys
lowerCamelCase__ = 0
if debug == 1:
lowerCamelCase__ = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("""Sequence must be ascending sorted to apply interpolation search""")
lowerCamelCase__ = 67
lowerCamelCase__ = interpolation_search(collection, target)
if result is not None:
print(F"""{target} found at positions: {result}""")
else:
print("""Not found""")
| 225
|
import functools
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
__a = len(_SCREAMING_SNAKE_CASE )
__a = len(_SCREAMING_SNAKE_CASE )
@functools.cache
def min_distance(_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
__a = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , _SCREAMING_SNAKE_CASE ) , 1 + min_distance(_SCREAMING_SNAKE_CASE , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 225
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCamelCase_ = {"processing_wav2vec2_with_lm": ["Wav2Vec2ProcessorWithLM"]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 707
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCamelCase_ = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE ):
A_ : int = ['pixel_values']
def __init__( self : int , UpperCamelCase_ : bool = True , UpperCamelCase_ : int = 32 , UpperCamelCase_ : List[Any]=PILImageResampling.BILINEAR , UpperCamelCase_ : bool = True , **UpperCamelCase_ : List[Any] , ) -> None:
SCREAMING_SNAKE_CASE__ :Tuple = do_resize
SCREAMING_SNAKE_CASE__ :Optional[int] = do_rescale
SCREAMING_SNAKE_CASE__ :Dict = size_divisor
SCREAMING_SNAKE_CASE__ :Any = resample
super().__init__(**UpperCamelCase_ )
def __lowerCamelCase ( self : List[str] , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[ChannelDimension] = None , **UpperCamelCase_ : Union[str, Any] ) -> np.ndarray:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ :int = get_image_size(UpperCamelCase_ )
# Rounds the height and width down to the closest multiple of size_divisor
SCREAMING_SNAKE_CASE__ :Tuple = height // size_divisor * size_divisor
SCREAMING_SNAKE_CASE__ :Optional[int] = width // size_divisor * size_divisor
SCREAMING_SNAKE_CASE__ :Optional[Any] = resize(UpperCamelCase_ , (new_h, new_w) , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
return image
def __lowerCamelCase ( self : Tuple , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : float , UpperCamelCase_ : Optional[ChannelDimension] = None , **UpperCamelCase_ : List[str] ) -> np.ndarray:
return rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def __lowerCamelCase ( self : Union[str, Any] , UpperCamelCase_ : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , UpperCamelCase_ : Optional[bool] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Optional[bool] = None , UpperCamelCase_ : Optional[Union[TensorType, str]] = None , UpperCamelCase_ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase_ : Optional[Any] , ) -> BatchFeature:
SCREAMING_SNAKE_CASE__ :Optional[Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ :List[str] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ :Dict = size_divisor if size_divisor is not None else self.size_divisor
SCREAMING_SNAKE_CASE__ :int = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('size_divisor is required for resizing' )
SCREAMING_SNAKE_CASE__ :Optional[int] = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError('Invalid image(s)' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ :List[str] = [to_numpy_array(UpperCamelCase_ ) for img in images]
if do_resize:
SCREAMING_SNAKE_CASE__ :Union[str, Any] = [self.resize(UpperCamelCase_ , size_divisor=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ :Optional[Any] = [self.rescale(UpperCamelCase_ , scale=1 / 2_55 ) for image in images]
SCREAMING_SNAKE_CASE__ :int = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
SCREAMING_SNAKE_CASE__ :Tuple = {'pixel_values': images}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
| 320
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
A : Dict = {
"configuration_speecht5": [
"SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP",
"SpeechT5Config",
"SpeechT5HifiGanConfig",
],
"feature_extraction_speecht5": ["SpeechT5FeatureExtractor"],
"processing_speecht5": ["SpeechT5Processor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[int] = ["SpeechT5Tokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = [
"SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"SpeechT5ForSpeechToText",
"SpeechT5ForSpeechToSpeech",
"SpeechT5ForTextToSpeech",
"SpeechT5Model",
"SpeechT5PreTrainedModel",
"SpeechT5HifiGan",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
A : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 140
|
from numpy import exp, pi, sqrt
def a__ ( __UpperCamelCase , __UpperCamelCase = 0.0 , __UpperCamelCase = 1.0 ):
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 140
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""",
"""google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""",
"""google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class _a ( __a ):
"""simple docstring"""
A_ = '''mobilenet_v2'''
def __init__( self : int , lowercase_ : Optional[int]=3 , lowercase_ : Dict=224 , lowercase_ : Optional[Any]=1.0 , lowercase_ : int=8 , lowercase_ : Optional[Any]=8 , lowercase_ : List[Any]=6 , lowercase_ : Dict=32 , lowercase_ : int=True , lowercase_ : List[Any]=True , lowercase_ : Tuple="relu6" , lowercase_ : List[str]=True , lowercase_ : List[str]=0.8 , lowercase_ : Tuple=0.0_2 , lowercase_ : Union[str, Any]=0.0_0_1 , lowercase_ : Any=255 , **lowercase_ : str , ):
'''simple docstring'''
super().__init__(**lowercase_ )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
lowercase_ = num_channels
lowercase_ = image_size
lowercase_ = depth_multiplier
lowercase_ = depth_divisible_by
lowercase_ = min_depth
lowercase_ = expand_ratio
lowercase_ = output_stride
lowercase_ = first_layer_is_expansion
lowercase_ = finegrained_output
lowercase_ = hidden_act
lowercase_ = tf_padding
lowercase_ = classifier_dropout_prob
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = semantic_loss_ignore_index
class _a ( __a ):
"""simple docstring"""
A_ = version.parse('''1.11''' )
@property
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return 1e-4
| 603
|
'''simple docstring'''
def A_ ( SCREAMING_SNAKE_CASE_ ) ->int:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError("""multiplicative_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""multiplicative_persistence() does not accept negative values""" )
lowercase_ = 0
lowercase_ = str(SCREAMING_SNAKE_CASE_ )
while len(SCREAMING_SNAKE_CASE_ ) != 1:
lowercase_ = [int(SCREAMING_SNAKE_CASE_ ) for i in num_string]
lowercase_ = 1
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ):
total *= numbers[i]
lowercase_ = str(SCREAMING_SNAKE_CASE_ )
steps += 1
return steps
def A_ ( SCREAMING_SNAKE_CASE_ ) ->int:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError("""additive_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""additive_persistence() does not accept negative values""" )
lowercase_ = 0
lowercase_ = str(SCREAMING_SNAKE_CASE_ )
while len(SCREAMING_SNAKE_CASE_ ) != 1:
lowercase_ = [int(SCREAMING_SNAKE_CASE_ ) for i in num_string]
lowercase_ = 0
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ):
total += numbers[i]
lowercase_ = str(SCREAMING_SNAKE_CASE_ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 603
| 1
|
def A__ ( lowercase: float, lowercase: list[float] ) -> float:
if discount_rate < 0:
raise ValueError('Discount rate cannot be negative' )
if not cash_flows:
raise ValueError('Cash flows list cannot be empty' )
A : List[str] =sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(lowercase ) )
return round(lowercase, ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 305
|
import collections
import importlib.util
import os
import re
from pathlib import Path
_lowercase : List[Any] ='''src/transformers'''
# Matches is_xxx_available()
_lowercase : List[str] =re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
_lowercase : Any =re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_lowercase : Optional[int] =re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
_lowercase : int =re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
_lowercase : Tuple =re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_lowercase : str =re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
_lowercase : List[Any] =re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
_lowercase : List[Any] =re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
_lowercase : List[str] =re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
_lowercase : Any =re.compile(R'''^\s*try:''')
# Catches a line with else:
_lowercase : Optional[int] =re.compile(R'''^\s*else:''')
def A__ ( lowercase: int ) -> Optional[Any]:
if _re_test_backend.search(lowercase ) is None:
return None
A : List[str] =[b[0] for b in _re_backend.findall(lowercase )]
backends.sort()
return "_and_".join(lowercase )
def A__ ( lowercase: Tuple ) -> int:
with open(lowercase, 'r', encoding='utf-8', newline='\n' ) as f:
A : str =f.readlines()
A : List[str] =0
while line_index < len(lowercase ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowercase ):
return None
# First grab the objects without a specific backend in _import_structure
A : Union[str, Any] =[]
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
A : Union[str, Any] =lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowercase ):
A : List[str] =_re_one_line_import_struct.search(lowercase ).groups()[0]
A : Optional[int] =re.findall('\[([^\]]+)\]', lowercase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
A : int =_re_import_struct_key_value.search(lowercase )
if single_line_import_search is not None:
A : List[str] =[obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(lowercase ) > 0]
objects.extend(lowercase )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
A : Optional[int] ={'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
A : Dict =find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A : int =None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A : str =[]
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
A : List[Any] =lines[line_index]
if _re_import_struct_add_one.search(lowercase ) is not None:
objects.append(_re_import_struct_add_one.search(lowercase ).groups()[0] )
elif _re_import_struct_add_many.search(lowercase ) is not None:
A : List[str] =_re_import_struct_add_many.search(lowercase ).groups()[0].split(', ' )
A : Optional[Any] =[obj[1:-1] for obj in imports if len(lowercase ) > 0]
objects.extend(lowercase )
elif _re_between_brackets.search(lowercase ) is not None:
A : int =_re_between_brackets.search(lowercase ).groups()[0].split(', ' )
A : List[str] =[obj[1:-1] for obj in imports if len(lowercase ) > 0]
objects.extend(lowercase )
elif _re_quote_object.search(lowercase ) is not None:
objects.append(_re_quote_object.search(lowercase ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
A : Optional[Any] =objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
A : int =[]
while (
line_index < len(lowercase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
A : List[str] =lines[line_index]
A : Optional[int] =_re_import.search(lowercase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
A : Dict ={'none': objects}
# Let's continue with backend-specific objects
while line_index < len(lowercase ):
# If the line is an if is_backend_available, we grab all objects associated.
A : Optional[Any] =find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A : str =None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A : List[Any] =[]
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
A : List[str] =lines[line_index]
A : Optional[Any] =_re_import.search(lowercase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
A : Any =objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def A__ ( lowercase: Dict, lowercase: str ) -> int:
def find_duplicates(lowercase: int ):
return [k for k, v in collections.Counter(lowercase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
A : Dict =[]
for key in import_dict_objects.keys():
A : Optional[Any] =find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'Duplicate _import_structure definitions for: {duplicate_imports}' )
A : str =find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
A : Tuple ='base imports' if key == 'none' else F'{key} backend'
errors.append(F'Differences for {name}:' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F' {a} in TYPE_HINT but not in _import_structure.' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F' {a} in _import_structure but not in TYPE_HINT.' )
return errors
def A__ ( ) -> int:
A : List[str] =[]
for root, _, files in os.walk(lowercase ):
if "__init__.py" in files:
A : Optional[int] =os.path.join(lowercase, '__init__.py' )
A : str =parse_init(lowercase )
if objects is not None:
A : Union[str, Any] =analyze_results(*lowercase )
if len(lowercase ) > 0:
A : Optional[int] =F'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append('\n'.join(lowercase ) )
if len(lowercase ) > 0:
raise ValueError('\n\n'.join(lowercase ) )
def A__ ( ) -> Dict:
A : List[Any] =[]
for path, directories, files in os.walk(lowercase ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(lowercase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowercase ) / folder).glob('*.py' ) ) ) == 0:
continue
A : List[Any] =str((Path(lowercase ) / folder).relative_to(lowercase ) )
A : Union[str, Any] =short_path.replace(os.path.sep, '.' )
submodules.append(lowercase )
for fname in files:
if fname == "__init__.py":
continue
A : int =str((Path(lowercase ) / fname).relative_to(lowercase ) )
A : str =short_path.replace('.py', '' ).replace(os.path.sep, '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(lowercase )
return submodules
_lowercase : Dict =[
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def A__ ( ) -> List[str]:
# This is to make sure the transformers module imported is the one in the repo.
A : Optional[Any] =importlib.util.spec_from_file_location(
'transformers', os.path.join(lowercase, '__init__.py' ), submodule_search_locations=[PATH_TO_TRANSFORMERS], )
A : List[Any] =spec.loader.load_module()
A : Union[str, Any] =[
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(lowercase ) > 0:
A : List[Any] ='\n'.join(F'- {module}' for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registered in the main init of Transformers:\n'
F'{list_of_modules}\n'
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 305
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __A (unittest.TestCase):
'''simple docstring'''
def __init__( self : Tuple , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any]=7 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : List[Any]=18 , UpperCAmelCase_ : int=30 , UpperCAmelCase_ : List[Any]=400 , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Optional[int]=[0.5, 0.5, 0.5] , UpperCAmelCase_ : Optional[int]=[0.5, 0.5, 0.5] , UpperCAmelCase_ : int=False , ) ->str:
"""simple docstring"""
snake_case_ = size if size is not None else {"""height""": 20, """width""": 20}
snake_case_ = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = num_channels
snake_case_ = image_size
snake_case_ = min_resolution
snake_case_ = max_resolution
snake_case_ = do_resize
snake_case_ = size
snake_case_ = do_center_crop
snake_case_ = crop_size
snake_case_ = do_normalize
snake_case_ = image_mean
snake_case_ = image_std
snake_case_ = do_reduce_labels
def lowerCAmelCase ( self : List[str] ) ->Union[str, Any]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def _a ( ) -> List[Any]:
snake_case_ = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
snake_case_ = Image.open(dataset[0]["""file"""] )
snake_case_ = Image.open(dataset[1]["""file"""] )
return image, map
def _a ( ) -> Dict:
snake_case_ = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
snake_case_ = Image.open(ds[0]["""file"""] )
snake_case_ = Image.open(ds[1]["""file"""] )
snake_case_ = Image.open(ds[2]["""file"""] )
snake_case_ = Image.open(ds[3]["""file"""] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class __A (snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: List[str] = BeitImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self : List[Any] ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = BeitImageProcessingTester(self )
@property
def lowerCAmelCase ( self : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , """do_resize""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """size""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """do_center_crop""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """center_crop""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """image_mean""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """image_std""" ) )
def lowerCAmelCase ( self : Tuple ) ->Dict:
"""simple docstring"""
snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 20, """width""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
self.assertEqual(image_processor.do_reduce_labels , UpperCAmelCase_ )
snake_case_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=UpperCAmelCase_ )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
self.assertEqual(image_processor.do_reduce_labels , UpperCAmelCase_ )
def lowerCAmelCase ( self : Tuple ) ->Dict:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Optional[int] ) ->int:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case_ = image_processing(UpperCAmelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case_ = image_processing(UpperCAmelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCAmelCase ( self : Tuple ) ->Tuple:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case_ = image_processing(UpperCAmelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCAmelCase ( self : List[str] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
snake_case_ = []
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , maps[0] , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test batched
snake_case_ = image_processing(UpperCAmelCase_ , UpperCAmelCase_ , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test not batched input (PIL images)
snake_case_ , snake_case_ = prepare_semantic_single_inputs()
snake_case_ = image_processing(UpperCAmelCase_ , UpperCAmelCase_ , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test batched input (PIL images)
snake_case_ , snake_case_ = prepare_semantic_batch_inputs()
snake_case_ = image_processing(UpperCAmelCase_ , UpperCAmelCase_ , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
2,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
def lowerCAmelCase ( self : Optional[int] ) ->List[Any]:
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
snake_case_ , snake_case_ = prepare_semantic_single_inputs()
snake_case_ = image_processing(UpperCAmelCase_ , UpperCAmelCase_ , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 150 )
snake_case_ = True
snake_case_ = image_processing(UpperCAmelCase_ , UpperCAmelCase_ , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
| 702
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : str = {
'speechbrain/m-ctc-t-large': 'https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json',
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class __A (snake_case__):
'''simple docstring'''
__lowercase: Any = """mctct"""
def __init__( self : Dict , UpperCAmelCase_ : List[Any]=8_065 , UpperCAmelCase_ : Tuple=1_536 , UpperCAmelCase_ : Optional[Any]=36 , UpperCAmelCase_ : int=6_144 , UpperCAmelCase_ : Dict=4 , UpperCAmelCase_ : Any=384 , UpperCAmelCase_ : List[str]=920 , UpperCAmelCase_ : Any=1E-5 , UpperCAmelCase_ : Any=0.3 , UpperCAmelCase_ : Tuple="relu" , UpperCAmelCase_ : Union[str, Any]=0.02 , UpperCAmelCase_ : Dict=0.3 , UpperCAmelCase_ : str=0.3 , UpperCAmelCase_ : Any=1 , UpperCAmelCase_ : Any=0 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : str=1 , UpperCAmelCase_ : Tuple=0.3 , UpperCAmelCase_ : Optional[Any]=1 , UpperCAmelCase_ : Optional[Any]=(7,) , UpperCAmelCase_ : Optional[Any]=(3,) , UpperCAmelCase_ : List[str]=80 , UpperCAmelCase_ : Tuple=1 , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : List[str]="sum" , UpperCAmelCase_ : Union[str, Any]=False , **UpperCAmelCase_ : Any , ) ->Dict:
"""simple docstring"""
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = intermediate_size
snake_case_ = num_attention_heads
snake_case_ = attention_head_dim
snake_case_ = max_position_embeddings
snake_case_ = layer_norm_eps
snake_case_ = layerdrop
snake_case_ = hidden_act
snake_case_ = initializer_range
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = pad_token_id
snake_case_ = bos_token_id
snake_case_ = eos_token_id
snake_case_ = conv_glu_dim
snake_case_ = conv_dropout
snake_case_ = num_conv_layers
snake_case_ = input_feat_per_channel
snake_case_ = input_channels
snake_case_ = conv_channels
snake_case_ = ctc_loss_reduction
snake_case_ = ctc_zero_infinity
# prevents config testing fail with exporting to json
snake_case_ = list(UpperCAmelCase_ )
snake_case_ = list(UpperCAmelCase_ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel)` == `config.num_conv_layers` """
F"""but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, """
F"""`config.num_conv_layers = {self.num_conv_layers}`.""" )
| 2
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
UpperCAmelCase__ : Any = {
"facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class __lowercase ( SCREAMING_SNAKE_CASE__ ):
__UpperCAmelCase = '''levit'''
def __init__( self , lowercase_=2_2_4 , lowercase_=3 , lowercase_=3 , lowercase_=2 , lowercase_=1 , lowercase_=1_6 , lowercase_=[1_2_8, 2_5_6, 3_8_4] , lowercase_=[4, 8, 1_2] , lowercase_=[4, 4, 4] , lowercase_=[1_6, 1_6, 1_6] , lowercase_=0 , lowercase_=[2, 2, 2] , lowercase_=[2, 2, 2] , lowercase_=0.02 , **lowercase_ , ) -> List[Any]:
super().__init__(**_lowercase)
__snake_case = image_size
__snake_case = num_channels
__snake_case = kernel_size
__snake_case = stride
__snake_case = padding
__snake_case = hidden_sizes
__snake_case = num_attention_heads
__snake_case = depths
__snake_case = key_dim
__snake_case = drop_path_rate
__snake_case = patch_size
__snake_case = attention_ratio
__snake_case = mlp_ratio
__snake_case = initializer_range
__snake_case = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class __lowercase ( SCREAMING_SNAKE_CASE__ ):
__UpperCAmelCase = version.parse('''1.11''' )
@property
def _a ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def _a ( self) -> float:
return 1e-4
| 313
|
"""simple docstring"""
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Optional[int] , lowerCamelCase_: Union[str, Any] ):
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
snake_case : List[str] = flax_key_tuple[:-1] + ("weight",)
snake_case : Union[str, Any] = torch.permute(lowerCamelCase_ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(lowerCamelCase_ ):
# linear layer
snake_case : int = flax_key_tuple[:-1] + ("weight",)
snake_case : Dict = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
snake_case : Any = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Union[str, Any] , lowerCamelCase_: Union[str, Any] , lowerCamelCase_: str ):
"""simple docstring"""
if "metadata" in layer:
snake_case : Dict = layer.split("metadata" )
snake_case : Optional[Any] = "".join(split_layer[0] )[:-1]
snake_case : Any = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
snake_case : List[str] = layer.split("kvstore" )
snake_case : Tuple = "".join(split_layer[0] )[:-1]
snake_case : Union[str, Any] = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
snake_case : List[Any] = layer.split("/" )
snake_case : Union[str, Any] = "/".join(split_layer[:-1] )
snake_case : int = (split_layer[-1],)
if "kvstore/path" in layer:
snake_case : str = f'''{switch_checkpoint_path}/{checkpoint_info[layer]}'''
elif "kvstore/driver" in layer:
snake_case : Tuple = "file"
else:
snake_case : int = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Optional[Any] , lowerCamelCase_: Dict ):
"""simple docstring"""
snake_case : Optional[int] = rename_keys(lowerCamelCase_ )
snake_case : str = {}
for k, v in current_block.items():
snake_case : List[str] = v
snake_case : List[str] = new_current_block
torch.save(lowerCamelCase_ , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Tuple , lowerCamelCase_: Optional[Any] , lowerCamelCase_: Dict , lowerCamelCase_: int , lowerCamelCase_: str = WEIGHTS_NAME ):
"""simple docstring"""
snake_case : List[str] = convert_file_size_to_int(lowerCamelCase_ )
snake_case : List[Any] = []
snake_case : Dict = {}
snake_case : str = 0
snake_case : List[str] = 0
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
snake_case : List[Any] = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
snake_case : Union[str, Any] = flatten_dict(lowerCamelCase_ , sep="/" )
snake_case : Optional[int] = {}
for layer in checkpoint_info.keys():
snake_case , snake_case , snake_case : Union[str, Any] = get_key_and_tensorstore_dict(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if curr_real_layer_name in all_layers:
snake_case : str = content
else:
snake_case : Any = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
snake_case : Optional[Any] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
snake_case : Tuple = torch.tensor(lowerCamelCase_ )
snake_case : str = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
snake_case , snake_case : Dict = rename_base_flax_keys(tuple(key.split("/" ) ) , lowerCamelCase_ )
snake_case : Union[str, Any] = "/".join(lowerCamelCase_ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
snake_case : str = os.path.join(
lowerCamelCase_ , weights_name.replace(".bin" , f'''-{len(lowerCamelCase_ )+1:05d}-of-???.bin''' ) )
rename_and_save_block(lowerCamelCase_ , lowerCamelCase_ )
sharded_state_dicts.append(current_block.keys() )
del current_block
snake_case : Any = {}
snake_case : Union[str, Any] = 0
snake_case : Any = raw_weights.to(getattr(lowerCamelCase_ , lowerCamelCase_ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
snake_case : List[Any] = os.path.join(lowerCamelCase_ , weights_name.replace(".bin" , f'''-{len(lowerCamelCase_ )+1:05d}-of-???.bin''' ) )
rename_and_save_block(lowerCamelCase_ , lowerCamelCase_ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(lowerCamelCase_ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
snake_case : List[Any] = {}
snake_case : Dict = {}
for idx, shard in enumerate(lowerCamelCase_ ):
snake_case : List[Any] = weights_name.replace(
".bin" , f'''-{idx+1:05d}-of-{len(lowerCamelCase_ ):05d}.bin''' ) # len(sharded_state_dicts):05d}
snake_case : Tuple = os.path.join(lowerCamelCase_ , weights_name.replace(".bin" , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(lowerCamelCase_ , os.path.join(lowerCamelCase_ , lowerCamelCase_ ) )
snake_case : Union[str, Any] = shard
for key in shard:
snake_case : List[Any] = shard_file
# Add the metadata
snake_case : Optional[int] = {"total_size": total_size}
snake_case : List[str] = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , "w" , encoding="utf-8" ) as f:
snake_case : Tuple = json.dumps(lowerCamelCase_ , indent=2 , sort_keys=lowerCamelCase_ ) + "\n"
f.write(lowerCamelCase_ )
return metadata, index
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
A = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
snake_case : List[Any] = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
snake_case : List[Any] = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
snake_case : Dict = TaTokenizer.from_pretrained("t5-small" )
snake_case : Tuple = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
snake_case : Dict = tokenizer(lowerCamelCase_ , return_tensors="pt" ).input_ids
snake_case : Optional[int] = model.generate(lowerCamelCase_ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 449
| 0
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase = {
'''configuration_mgp_str''': ['''MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MgpstrConfig'''],
'''processing_mgp_str''': ['''MgpstrProcessor'''],
'''tokenization_mgp_str''': ['''MgpstrTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MgpstrModel''',
'''MgpstrPreTrainedModel''',
'''MgpstrForSceneTextRecognition''',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41
|
'''simple docstring'''
from __future__ import annotations
lowercase = []
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
for i in range(len(lowercase__ ) ):
if board[row][i] == 1:
return False
for i in range(len(lowercase__ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(lowercase__ , -1 , -1 ) , range(lowercase__ , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(lowercase__ , -1 , -1 ) , range(lowercase__ , len(lowercase__ ) ) ):
if board[i][j] == 1:
return False
return True
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if row >= len(lowercase__ ):
solution.append(lowercase__ )
printboard(lowercase__ )
print()
return True
for i in range(len(lowercase__ ) ):
if is_safe(lowercase__ , lowercase__ , lowercase__ ):
a_ =1
solve(lowercase__ , row + 1 )
a_ =0
return False
def UpperCAmelCase_ ( lowercase__ ):
'''simple docstring'''
for i in range(len(lowercase__ ) ):
for j in range(len(lowercase__ ) ):
if board[i][j] == 1:
print("Q" , end=" " )
else:
print("." , end=" " )
print()
# n=int(input("The no. of queens"))
lowercase = 8
lowercase = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 41
| 1
|
def UpperCamelCase ( __lowercase : Dict ):
'''simple docstring'''
A_ : Optional[Any] = len(lowerCamelCase_ )
while cur > 1:
# Find the maximum number in arr
A_ : Tuple = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
A_ : Optional[int] = arr[mi::-1] + arr[mi + 1 : len(lowerCamelCase_ )]
# Reverse whole list
A_ : Dict = arr[cur - 1 :: -1] + arr[cur : len(lowerCamelCase_ )]
cur -= 1
return arr
if __name__ == "__main__":
_UpperCAmelCase = input("""Enter numbers separated by a comma:\n""").strip()
_UpperCAmelCase = [int(item) for item in user_input.split(""",""")]
print(pancake_sort(unsorted))
| 558
|
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"""The RoBERTa Model transformer with early exiting (DeeRoBERTa). """ , lowerCamelCase__ , )
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ =RobertaConfig
snake_case_ ="""roberta"""
def __init__(self ,__lowerCamelCase ) -> int:
"""simple docstring"""
super().__init__(__lowerCamelCase )
lowerCAmelCase__ : Any = RobertaEmbeddings(__lowerCamelCase )
self.init_weights()
@add_start_docstrings(
"""RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. """ , lowerCamelCase__ , )
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ =RobertaConfig
snake_case_ ="""roberta"""
def __init__(self ,__lowerCamelCase ) -> List[Any]:
"""simple docstring"""
super().__init__(__lowerCamelCase )
lowerCAmelCase__ : Any = config.num_labels
lowerCAmelCase__ : List[Any] = config.num_hidden_layers
lowerCAmelCase__ : int = DeeRobertaModel(__lowerCamelCase )
lowerCAmelCase__ : Any = nn.Dropout(config.hidden_dropout_prob )
lowerCAmelCase__ : Dict = nn.Linear(config.hidden_size ,self.config.num_labels )
@add_start_docstrings_to_model_forward(__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase=None ,__lowerCamelCase=None ,__lowerCamelCase=None ,__lowerCamelCase=None ,__lowerCamelCase=None ,__lowerCamelCase=None ,__lowerCamelCase=None ,__lowerCamelCase=-1 ,__lowerCamelCase=False ,) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = self.num_layers
try:
lowerCAmelCase__ : Union[str, Any] = self.roberta(
__lowerCamelCase ,attention_mask=__lowerCamelCase ,token_type_ids=__lowerCamelCase ,position_ids=__lowerCamelCase ,head_mask=__lowerCamelCase ,inputs_embeds=__lowerCamelCase ,)
lowerCAmelCase__ : Optional[int] = outputs[1]
lowerCAmelCase__ : str = self.dropout(__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = self.classifier(__lowerCamelCase )
lowerCAmelCase__ : Dict = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
lowerCAmelCase__ : Union[str, Any] = e.message
lowerCAmelCase__ : Tuple = e.exit_layer
lowerCAmelCase__ : str = outputs[0]
if not self.training:
lowerCAmelCase__ : Optional[int] = entropy(__lowerCamelCase )
lowerCAmelCase__ : Any = []
lowerCAmelCase__ : List[Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
lowerCAmelCase__ : Dict = MSELoss()
lowerCAmelCase__ : List[Any] = loss_fct(logits.view(-1 ) ,labels.view(-1 ) )
else:
lowerCAmelCase__ : Any = CrossEntropyLoss()
lowerCAmelCase__ : str = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
# work with highway exits
lowerCAmelCase__ : Dict = []
for highway_exit in outputs[-1]:
lowerCAmelCase__ : Optional[int] = highway_exit[0]
if not self.training:
highway_logits_all.append(__lowerCamelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
lowerCAmelCase__ : int = MSELoss()
lowerCAmelCase__ : int = loss_fct(highway_logits.view(-1 ) ,labels.view(-1 ) )
else:
lowerCAmelCase__ : Union[str, Any] = CrossEntropyLoss()
lowerCAmelCase__ : Dict = loss_fct(highway_logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
highway_losses.append(__lowerCamelCase )
if train_highway:
lowerCAmelCase__ : Dict = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
lowerCAmelCase__ : Tuple = (loss,) + outputs
if not self.training:
lowerCAmelCase__ : Union[str, Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
lowerCAmelCase__ : Tuple = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 647
| 0
|
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Any , _snake_case : List[Any] , _snake_case : Dict=99 , _snake_case : List[str]=13 , _snake_case : List[Any]=16 , _snake_case : List[Any]=7 , _snake_case : Tuple=True , _snake_case : Optional[Any]=True , _snake_case : Dict=True , _snake_case : Dict=False , _snake_case : int=True , _snake_case : Union[str, Any]=2 , _snake_case : Optional[int]=32 , _snake_case : Tuple=4 , _snake_case : Tuple=4 , _snake_case : Dict=30 , _snake_case : Tuple=0 , _snake_case : Dict=1 , _snake_case : List[Any]=2 , _snake_case : List[str]=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = decoder_seq_length
# For common tests
A__ = self.decoder_seq_length
A__ = is_training
A__ = use_attention_mask
A__ = use_labels
A__ = vocab_size
A__ = d_model
A__ = d_model
A__ = decoder_layers
A__ = decoder_layers
A__ = decoder_ffn_dim
A__ = decoder_attention_heads
A__ = decoder_attention_heads
A__ = eos_token_id
A__ = bos_token_id
A__ = pad_token_id
A__ = decoder_start_token_id
A__ = use_cache
A__ = max_position_embeddings
A__ = None
A__ = decoder_seq_length
A__ = 2
A__ = 1
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
A__ = None
if self.use_attention_mask:
A__ = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
A__ = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def _a ( self : List[Any] , _snake_case : Dict , _snake_case : List[Any] , _snake_case : List[str] , _snake_case : List[str] , ):
"""simple docstring"""
A__ = True
A__ = TrOCRDecoder(config=_snake_case ).to(_snake_case ).eval()
A__ = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
A__ = model(_snake_case , use_cache=_snake_case )
A__ = model(_snake_case )
A__ = model(_snake_case , use_cache=_snake_case )
self.parent.assertTrue(len(_snake_case ) == len(_snake_case ) )
self.parent.assertTrue(len(_snake_case ) == len(_snake_case ) + 1 )
A__ = outputs['past_key_values']
# create hypothetical next token and extent to next_input_ids
A__ = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
A__ = torch.cat([input_ids, next_tokens] , dim=-1 )
A__ = model(_snake_case )['last_hidden_state']
A__ = model(_snake_case , past_key_values=_snake_case )['last_hidden_state']
# select random slice
A__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A__ = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
A__ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_snake_case , _snake_case , atol=1E-3 )
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ , A__ = config_and_inputs
A__ = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : Any = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
A__ : List[str] = (TrOCRForCausalLM,) if is_torch_available() else ()
A__ : List[str] = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {}
A__ : Dict = True
A__ : str = False
def _a ( self : Tuple ):
"""simple docstring"""
A__ = TrOCRStandaloneDecoderModelTester(self , is_training=_snake_case )
A__ = ConfigTester(self , config_class=_snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
pass
def _a ( self : Optional[Any] ):
"""simple docstring"""
pass
def _a ( self : Optional[int] ):
"""simple docstring"""
pass
def _a ( self : List[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_snake_case )
def _a ( self : Any ):
"""simple docstring"""
return
@unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :)
def _a ( self : Union[str, Any] ):
"""simple docstring"""
pass
| 52
|
from typing import TYPE_CHECKING
from ..utils import _LazyModule
SCREAMING_SNAKE_CASE__ = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 52
| 1
|
"""simple docstring"""
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
a_ = logging.getLogger(__name__)
a_ = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
a_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __lowercase :
"""simple docstring"""
_A : Dict = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
_A : int = field(
default=UpperCamelCase__ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(UpperCamelCase__)} , )
_A : List[str] = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
_A : str = field(
default=UpperCamelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""})
_A : Tuple = field(
default=UpperCamelCase__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""})
_A : Dict = field(
default=UpperCamelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
_A : Optional[int] = field(
default=UpperCamelCase__ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
_A : Optional[int] = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
_A : Any = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def __UpperCamelCase (self ):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"""--config_overrides can't be used in combination with --config_name or --model_name_or_path""" )
@dataclass
class __lowercase :
"""simple docstring"""
_A : Any = field(
default=UpperCamelCase__ , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""})
_A : Optional[Any] = field(
default=UpperCamelCase__ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""})
_A : Dict = field(default=UpperCamelCase__ , metadata={"""help""": """The input training data file (a text file)."""})
_A : Any = field(
default=UpperCamelCase__ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
_A : int = field(
default=UpperCamelCase__ , metadata={"""help""": """An optional input train ref data file for whole word masking in Chinese."""} , )
_A : Optional[int] = field(
default=UpperCamelCase__ , metadata={"""help""": """An optional input validation ref data file for whole word masking in Chinese."""} , )
_A : str = field(
default=UpperCamelCase__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""})
_A : Tuple = field(
default=5 , metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} , )
_A : List[Any] = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated. Default to the max input length of the model."""
)
} , )
_A : Optional[Any] = field(
default=UpperCamelCase__ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
_A : Dict = field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""})
_A : Any = field(
default=UpperCamelCase__ , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
def __UpperCamelCase (self ):
if self.train_file is not None:
snake_case_ : Dict = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
snake_case_ : Union[str, Any] = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE__ , """r""" , encoding="""utf-8""" ) as f:
snake_case_ : Any = [json.loads(SCREAMING_SNAKE_CASE__ ) for line in f.read().splitlines() if (len(SCREAMING_SNAKE_CASE__ ) > 0 and not line.isspace())]
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
snake_case_ : Optional[Any] = {c: dataset[c] for c in dataset.column_names}
snake_case_ : List[str] = refs
return Dataset.from_dict(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
snake_case_ : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case_ : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case_ : Optional[Any] = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
snake_case_ : List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case_ : Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , SCREAMING_SNAKE_CASE__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
snake_case_ : Union[str, Any] = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
snake_case_ : int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'train[:{data_args.validation_split_percentage}%]' , )
snake_case_ : Optional[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'train[{data_args.validation_split_percentage}%:]' , )
else:
snake_case_ : str = {}
if data_args.train_file is not None:
snake_case_ : List[Any] = data_args.train_file
if data_args.validation_file is not None:
snake_case_ : str = data_args.validation_file
snake_case_ : List[Any] = data_args.train_file.split(""".""" )[-1]
if extension == "txt":
snake_case_ : List[str] = """text"""
snake_case_ : Optional[Any] = load_dataset(SCREAMING_SNAKE_CASE__ , data_files=SCREAMING_SNAKE_CASE__ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ : List[Any] = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
snake_case_ : Tuple = AutoConfig.from_pretrained(model_args.config_name , **SCREAMING_SNAKE_CASE__ )
elif model_args.model_name_or_path:
snake_case_ : int = AutoConfig.from_pretrained(model_args.model_name_or_path , **SCREAMING_SNAKE_CASE__ )
else:
snake_case_ : Any = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(f'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(f'New config: {config}' )
snake_case_ : Any = {
"""cache_dir""": model_args.cache_dir,
"""use_fast""": model_args.use_fast_tokenizer,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
snake_case_ : List[str] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **SCREAMING_SNAKE_CASE__ )
elif model_args.model_name_or_path:
snake_case_ : Dict = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported by this script."""
"""You can do it from another script, save it, and load it from here, using --tokenizer_name.""" )
if model_args.model_name_or_path:
snake_case_ : List[str] = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
snake_case_ : List[str] = AutoModelForMaskedLM.from_config(SCREAMING_SNAKE_CASE__ )
model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE__ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
snake_case_ : Union[str, Any] = datasets["""train"""].column_names
else:
snake_case_ : List[str] = datasets["""validation"""].column_names
snake_case_ : Optional[Any] = """text""" if """text""" in column_names else column_names[0]
snake_case_ : Union[str, Any] = """max_length""" if data_args.pad_to_max_length else False
def tokenize_function(SCREAMING_SNAKE_CASE__ : Dict ):
# Remove empty lines
snake_case_ : Tuple = [line for line in examples["""text"""] if len(SCREAMING_SNAKE_CASE__ ) > 0 and not line.isspace()]
return tokenizer(examples["""text"""] , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=data_args.max_seq_length )
snake_case_ : List[Any] = datasets.map(
SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
snake_case_ : str = add_chinese_references(tokenized_datasets["""train"""] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
snake_case_ : List[Any] = add_chinese_references(
tokenized_datasets["""validation"""] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
snake_case_ : str = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
snake_case_ : Optional[Any] = False
# Data collator
# This one will take care of randomly masking the tokens.
snake_case_ : Any = DataCollatorForWholeWordMask(tokenizer=SCREAMING_SNAKE_CASE__ , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
snake_case_ : Tuple = Trainer(
model=SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , train_dataset=tokenized_datasets["""train"""] if training_args.do_train else None , eval_dataset=tokenized_datasets["""validation"""] if training_args.do_eval else None , tokenizer=SCREAMING_SNAKE_CASE__ , data_collator=SCREAMING_SNAKE_CASE__ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
snake_case_ : str = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
snake_case_ : Union[str, Any] = model_args.model_name_or_path
else:
snake_case_ : int = None
snake_case_ : Dict = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE__ )
trainer.save_model() # Saves the tokenizer too for easy upload
snake_case_ : List[str] = os.path.join(training_args.output_dir , """train_results.txt""" )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE__ , """w""" ) as writer:
logger.info("""***** Train results *****""" )
for key, value in sorted(train_result.metrics.items() ):
logger.info(f' {key} = {value}' )
writer.write(f'{key} = {value}\n' )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# Evaluation
snake_case_ : Dict = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
snake_case_ : Tuple = trainer.evaluate()
snake_case_ : Optional[int] = math.exp(eval_output["""eval_loss"""] )
snake_case_ : List[Any] = perplexity
snake_case_ : Optional[int] = os.path.join(training_args.output_dir , """eval_results_mlm_wwm.txt""" )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in sorted(results.items() ):
logger.info(f' {key} = {value}' )
writer.write(f'{key} = {value}\n' )
return results
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 480
|
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class __A ( unittest.TestCase ):
def A__ ( self :Tuple ):
'''simple docstring'''
debug_launcher(test_script.main )
def A__ ( self :Dict ):
'''simple docstring'''
debug_launcher(test_ops.main )
| 21
| 0
|
from __future__ import annotations
def lowerCAmelCase__ ( _UpperCamelCase : int , _UpperCamelCase : Any ) -> bool:
"""simple docstring"""
if len(_SCREAMING_SNAKE_CASE ) == 0:
return False
snake_case = len(_SCREAMING_SNAKE_CASE ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , _SCREAMING_SNAKE_CASE )
else:
return binary_search(a_list[midpoint + 1 :] , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = input("Enter numbers separated by comma:\n").strip()
SCREAMING_SNAKE_CASE__ = [int(item.strip()) for item in user_input.split(",")]
SCREAMING_SNAKE_CASE__ = int(input("Enter the number to be found in the list:\n").strip())
SCREAMING_SNAKE_CASE__ = '' if binary_search(sequence, target) else 'not '
print(f"""{target} was {not_str}found in {sequence}""")
| 702
|
"""simple docstring"""
import numpy as np
from PIL import Image
def lowerCAmelCase__ ( _UpperCamelCase : np.ndarray , _UpperCamelCase : int , _UpperCamelCase : int ) -> np.ndarray:
"""simple docstring"""
snake_case = np.array(_UpperCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
snake_case = 0
snake_case = 0
snake_case = 0
snake_case = 0
# compute the shape of the output matrix
snake_case = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
snake_case = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
snake_case = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
snake_case = 0
snake_case = 0
return updated_arr
def lowerCAmelCase__ ( _UpperCamelCase : np.ndarray , _UpperCamelCase : int , _UpperCamelCase : int ) -> np.ndarray:
"""simple docstring"""
snake_case = np.array(_UpperCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
snake_case = 0
snake_case = 0
snake_case = 0
snake_case = 0
# compute the shape of the output matrix
snake_case = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
snake_case = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
snake_case = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
snake_case = 0
snake_case = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name="avgpooling", verbose=True)
# Loading the image
SCREAMING_SNAKE_CASE__ = Image.open("path_to_image")
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 104
| 0
|
from __future__ import annotations
from collections.abc import Callable
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 1_0_0 , ):
UpperCamelCase__ : List[str] = x_start
UpperCamelCase__ : Dict = fnc(UpperCamelCase_ )
UpperCamelCase__ : List[str] = 0.0
for _ in range(UpperCamelCase_ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
UpperCamelCase__ : Tuple = (x_end - x_start) / steps + xa
UpperCamelCase__ : List[Any] = fnc(UpperCamelCase_ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
UpperCamelCase__ : Union[str, Any] = xa
UpperCamelCase__ : Dict = fxa
return area
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ):
return x**3 + x**2
print("f(x) = x^3 + x^2")
print("The area between the curve, x = -5, x = 5 and the x axis is:")
lowerCamelCase =1_0
while i <= 1_0_0_0_0_0:
print(F'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''')
i *= 1_0
| 285
|
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __snake_case :
lowerCAmelCase__ = 42
lowerCAmelCase__ = None
# Automatically constructed
lowerCAmelCase__ = "dict"
lowerCAmelCase__ = None
lowerCAmelCase__ = field(default="Translation" , init=_a , repr=_a )
def __call__( self : Tuple ) -> Optional[int]:
'''simple docstring'''
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Value
return {k: Value("""string""" ) for k in sorted(self.languages )}
@dataclass
class __snake_case :
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
# Automatically constructed
lowerCAmelCase__ = "dict"
lowerCAmelCase__ = None
lowerCAmelCase__ = field(default="TranslationVariableLanguages" , init=_a , repr=_a )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = sorted(set(self.languages ) ) if self.languages else None
_lowerCAmelCase : Optional[int] = len(self.languages ) if self.languages else None
def __call__( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCAmelCase : List[str] ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : Optional[int] = set(self.languages )
if self.languages and set(_UpperCAmelCase ) - lang_set:
raise ValueError(
f"Some languages in example ({', '.join(sorted(set(_UpperCAmelCase ) - lang_set ) )}) are not in valid set ({', '.join(_UpperCAmelCase )})." )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
_lowerCAmelCase : Dict = []
for lang, text in translation_dict.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
_lowerCAmelCase , _lowerCAmelCase : int = zip(*sorted(_UpperCAmelCase ) )
return {"language": languages, "translation": translations}
def SCREAMING_SNAKE_CASE ( self : str ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Sequence, Value
return {
"language": Sequence(Value("""string""" ) ),
"translation": Sequence(Value("""string""" ) ),
}
| 429
| 0
|
'''simple docstring'''
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
__snake_case : Optional[int] = datasets.utils.logging.get_logger(__name__)
class lowerCamelCase ( folder_based_builder.FolderBasedBuilderConfig ):
'''simple docstring'''
__snake_case = None
__snake_case = None
class lowerCamelCase ( folder_based_builder.FolderBasedBuilder ):
'''simple docstring'''
__snake_case = datasets.Audio()
__snake_case = 'audio'
__snake_case = AudioFolderConfig
__snake_case = 42 # definition at the bottom of the script
__snake_case = AudioClassification(audio_column='audio' , label_column='label' )
__snake_case : int = [
'.aiff',
'.au',
'.avr',
'.caf',
'.flac',
'.htk',
'.svx',
'.mat4',
'.mat5',
'.mpc2k',
'.ogg',
'.paf',
'.pvf',
'.raw',
'.rf64',
'.sd2',
'.sds',
'.ircam',
'.voc',
'.w64',
'.wav',
'.nist',
'.wavex',
'.wve',
'.xi',
'.mp3',
'.opus',
]
__snake_case : List[str] = AUDIO_EXTENSIONS
| 687
|
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
__snake_case : str = False
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
A__ : List[str] =VersatileDiffusionTextToImagePipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : int ="""A painting of a squirrel eating a burger """
A__ : Tuple =torch.manual_seed(0 )
A__ : int =pipe(
prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase_ )
A__ : str =VersatileDiffusionTextToImagePipeline.from_pretrained(lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : int =generator.manual_seed(0 )
A__ : Tuple =pipe(
prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def lowercase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
A__ : Any =VersatileDiffusionTextToImagePipeline.from_pretrained(
"""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
A__ : Dict ="""A painting of a squirrel eating a burger """
A__ : Optional[int] =torch.manual_seed(0 )
A__ : List[str] =pipe(
prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
A__ : List[str] =image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
A__ : Tuple =np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 687
| 1
|
import math
from datetime import datetime, timedelta
def _A ( __snake_case :int ) -> datetime:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = year % 19
__SCREAMING_SNAKE_CASE = year % 4
__SCREAMING_SNAKE_CASE = year % 7
__SCREAMING_SNAKE_CASE = math.floor(year / 100 )
__SCREAMING_SNAKE_CASE = math.floor((13 + 8 * leap_day_inhibits) / 25 )
__SCREAMING_SNAKE_CASE = leap_day_inhibits / 4
__SCREAMING_SNAKE_CASE = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
__SCREAMING_SNAKE_CASE = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__SCREAMING_SNAKE_CASE = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
__SCREAMING_SNAKE_CASE = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(_lowercase , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(_lowercase , 4 , 18 )
else:
return datetime(_lowercase , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (19_94, 20_00, 20_10, 20_21, 20_23):
_snake_case : Optional[int] = 'will be' if year > datetime.now().year else 'was'
print(F"""Easter in {year} {tense} {gauss_easter(year)}""")
| 693
|
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowercase_ = 16
lowercase_ = 32
def UpperCAmelCase ( _lowercase : Accelerator , _lowercase : int = 1_6 , _lowercase : str = "bert-base-cased" ) -> Dict:
"""simple docstring"""
lowerCAmelCase_ = AutoTokenizer.from_pretrained(_lowercase )
lowerCAmelCase_ = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(_lowercase : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_lowercase , max_length=_lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase_ = datasets.map(
_lowercase , batched=_lowercase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=_lowercase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(_lowercase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowercase , padding='''max_length''' , max_length=1_2_8 , return_tensors='''pt''' )
return tokenizer.pad(_lowercase , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
lowerCAmelCase_ = DataLoader(
tokenized_datasets['''train'''] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
lowerCAmelCase_ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
return train_dataloader, eval_dataloader
def UpperCAmelCase ( _lowercase : int , _lowercase : Any , _lowercase : Any , _lowercase : str ) -> List[Any]:
"""simple docstring"""
model.eval()
lowerCAmelCase_ = 0
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase_ = model(**_lowercase )
lowerCAmelCase_ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowerCAmelCase_ , lowerCAmelCase_ = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_lowercase ) - 1:
lowerCAmelCase_ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCAmelCase_ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_lowercase , references=_lowercase , )
lowerCAmelCase_ = metric.compute()
return eval_metric["accuracy"]
def UpperCAmelCase ( _lowercase : Dict , _lowercase : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCAmelCase_ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ = config['''lr''']
lowerCAmelCase_ = int(config['''num_epochs'''] )
lowerCAmelCase_ = int(config['''seed'''] )
lowerCAmelCase_ = int(config['''batch_size'''] )
lowerCAmelCase_ = args.model_name_or_path
set_seed(_lowercase )
lowerCAmelCase_ , lowerCAmelCase_ = get_dataloaders(_lowercase , _lowercase , _lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ = AutoModelForSequenceClassification.from_pretrained(_lowercase , return_dict=_lowercase )
# Instantiate optimizer
lowerCAmelCase_ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCAmelCase_ = optimizer_cls(params=model.parameters() , lr=_lowercase )
if accelerator.state.deepspeed_plugin is not None:
lowerCAmelCase_ = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
lowerCAmelCase_ = 1
lowerCAmelCase_ = (len(_lowercase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCAmelCase_ = get_linear_schedule_with_warmup(
optimizer=_lowercase , num_warmup_steps=0 , num_training_steps=_lowercase , )
else:
lowerCAmelCase_ = DummyScheduler(_lowercase , total_num_steps=_lowercase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# We need to keep track of how many total steps we have iterated over
lowerCAmelCase_ = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCAmelCase_ = 0
lowerCAmelCase_ = evaluate.load('''glue''' , '''mrpc''' )
lowerCAmelCase_ = num_epochs
if args.partial_train_epoch is not None:
lowerCAmelCase_ = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
lowerCAmelCase_ = args.resume_from_checkpoint.split('''epoch_''' )[1]
lowerCAmelCase_ = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
lowerCAmelCase_ = int(_lowercase ) + 1
lowerCAmelCase_ = evaluation_loop(_lowercase , _lowercase , _lowercase , _lowercase )
accelerator.print('''resumed checkpoint performance:''' , _lowercase )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir , F"""state_{starting_epoch-1}.json""" ) , '''r''' ) as f:
lowerCAmelCase_ = json.load(_lowercase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
lowerCAmelCase_ = {}
for epoch in range(_lowercase , _lowercase ):
model.train()
for step, batch in enumerate(_lowercase ):
lowerCAmelCase_ = model(**_lowercase )
lowerCAmelCase_ = outputs.loss
lowerCAmelCase_ = loss / gradient_accumulation_steps
accelerator.backward(_lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
lowerCAmelCase_ = F"""epoch_{epoch}"""
lowerCAmelCase_ = os.path.join(args.output_dir , _lowercase )
accelerator.save_state(_lowercase )
lowerCAmelCase_ = evaluation_loop(_lowercase , _lowercase , _lowercase , _lowercase )
lowerCAmelCase_ = accuracy
lowerCAmelCase_ = lr_scheduler.get_lr()[0]
lowerCAmelCase_ = optimizer.param_groups[0]['''lr''']
lowerCAmelCase_ = epoch
lowerCAmelCase_ = overall_step
accelerator.print(F"""epoch {epoch}:""" , _lowercase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F"""state_{epoch}.json""" ) , '''w''' ) as f:
json.dump(_lowercase , _lowercase )
def UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase_ = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=_lowercase , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=_lowercase , )
parser.add_argument(
'''--output_dir''' , type=_lowercase , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=_lowercase , default=_lowercase , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--partial_train_epoch''' , type=_lowercase , default=_lowercase , help='''If passed, the training will stop after this number of epochs.''' , )
parser.add_argument(
'''--num_epochs''' , type=_lowercase , default=2 , help='''Number of train epochs.''' , )
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 4_2, '''batch_size''': 1_6}
training_function(_lowercase , _lowercase )
if __name__ == "__main__":
main()
| 552
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : List[Any] = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[int] = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 721
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __a , __a=7 , __a=3 , __a=30 , __a=400 , __a=True , __a=None , __a=0.9 , __a=None , __a=True , __a=[0.5, 0.5, 0.5] , __a=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
A__ = size if size is not None else {'shortest_edge': 30}
A__ = crop_size if crop_size is not None else {'height': 30, 'width': 30}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = min_resolution
A__ = max_resolution
A__ = do_resize_and_center_crop
A__ = size
A__ = crop_pct
A__ = crop_size
A__ = do_normalize
A__ = image_mean
A__ = image_std
def _UpperCAmelCase ( self ):
"""simple docstring"""
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class snake_case_ ( _lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_: List[Any] = PoolFormerImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = PoolFormerImageProcessingTester(self )
@property
def _UpperCAmelCase ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , 'do_resize_and_center_crop' ) )
self.assertTrue(hasattr(__a , 'size' ) )
self.assertTrue(hasattr(__a , 'crop_pct' ) )
self.assertTrue(hasattr(__a , 'do_normalize' ) )
self.assertTrue(hasattr(__a , 'image_mean' ) )
self.assertTrue(hasattr(__a , 'image_std' ) )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 30} )
self.assertEqual(image_processor.crop_size , {'height': 30, 'width': 30} )
A__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def _UpperCAmelCase ( self ):
"""simple docstring"""
pass
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
A__ = image_processing(__a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a )
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
A__ = image_processing(__a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a )
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
A__ = image_processing(__a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 554
| 0
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int]=False ):
"""simple docstring"""
_lowerCamelCase : List[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCamelCase : Optional[int] = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any]=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCamelCase : int = ""
else:
_lowerCamelCase : int = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase : Any = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
_lowerCamelCase : Tuple = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : List[str] = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase : List[str] = in_proj_bias[: config.hidden_size]
_lowerCamelCase : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase : Any = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase : List[str] = in_proj_bias[-config.hidden_size :]
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : List[str] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = dct.pop(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = val
def A_ ( ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Optional[int] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = ViTConfig()
_lowerCamelCase : List[str] = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : Optional[Any] = int(vit_name[-12:-10] )
_lowerCamelCase : str = int(vit_name[-9:-6] )
else:
_lowerCamelCase : List[Any] = 1000
_lowerCamelCase : str = "huggingface/label-files"
_lowerCamelCase : Any = "imagenet-1k-id2label.json"
_lowerCamelCase : int = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : str = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Optional[Any] = idalabel
_lowerCamelCase : List[str] = {v: k for k, v in idalabel.items()}
_lowerCamelCase : List[str] = int(vit_name[-6:-4] )
_lowerCamelCase : str = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("tiny" ):
_lowerCamelCase : List[Any] = 192
_lowerCamelCase : Optional[int] = 768
_lowerCamelCase : Union[str, Any] = 12
_lowerCamelCase : Optional[Any] = 3
elif vit_name[9:].startswith("small" ):
_lowerCamelCase : Optional[Any] = 384
_lowerCamelCase : Optional[Any] = 1536
_lowerCamelCase : int = 12
_lowerCamelCase : List[str] = 6
else:
pass
else:
if vit_name[4:].startswith("small" ):
_lowerCamelCase : List[str] = 768
_lowerCamelCase : Optional[Any] = 2304
_lowerCamelCase : List[Any] = 8
_lowerCamelCase : List[Any] = 8
elif vit_name[4:].startswith("base" ):
pass
elif vit_name[4:].startswith("large" ):
_lowerCamelCase : List[Any] = 1024
_lowerCamelCase : Optional[Any] = 4096
_lowerCamelCase : List[Any] = 24
_lowerCamelCase : Union[str, Any] = 16
elif vit_name[4:].startswith("huge" ):
_lowerCamelCase : str = 1280
_lowerCamelCase : List[Any] = 5120
_lowerCamelCase : List[str] = 32
_lowerCamelCase : List[str] = 16
# load original model from timm
_lowerCamelCase : int = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCamelCase : Any = timm_model.state_dict()
if base_model:
remove_classification_head_(_lowerCAmelCase )
_lowerCamelCase : Optional[int] = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
_lowerCamelCase : int = ViTModel(_lowerCAmelCase ).eval()
else:
_lowerCamelCase : List[str] = ViTForImageClassification(_lowerCAmelCase ).eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
_lowerCamelCase : Union[str, Any] = DeiTImageProcessor(size=config.image_size )
else:
_lowerCamelCase : Union[str, Any] = ViTImageProcessor(size=config.image_size )
_lowerCamelCase : Optional[int] = image_processor(images=prepare_img() , return_tensors="pt" )
_lowerCamelCase : Optional[int] = encoding["pixel_values"]
_lowerCamelCase : Union[str, Any] = model(_lowerCAmelCase )
if base_model:
_lowerCamelCase : int = timm_model.forward_features(_lowerCAmelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_lowerCAmelCase , outputs.pooler_output , atol=1E-3 )
else:
_lowerCamelCase : Union[str, Any] = timm_model(_lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCAmelCase , outputs.logits , atol=1E-3 )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(F'Saving model {vit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowerCAmelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_patch16_224',
type=str,
help='Name of the ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
UpperCAmelCase_ : Optional[int] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 44
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase : str = {
"""configuration_blenderbot_small""": [
"""BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotSmallConfig""",
"""BlenderbotSmallOnnxConfig""",
],
"""tokenization_blenderbot_small""": ["""BlenderbotSmallTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = ["""BlenderbotSmallTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : str = [
"""BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotSmallForCausalLM""",
"""BlenderbotSmallForConditionalGeneration""",
"""BlenderbotSmallModel""",
"""BlenderbotSmallPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : str = [
"""TFBlenderbotSmallForConditionalGeneration""",
"""TFBlenderbotSmallModel""",
"""TFBlenderbotSmallPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Dict = [
"""FlaxBlenderbotSmallForConditionalGeneration""",
"""FlaxBlenderbotSmallModel""",
"""FlaxBlenderbotSmallPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
lowercase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 336
| 0
|
"""simple docstring"""
def lowerCAmelCase__ ( _UpperCamelCase : str , _UpperCamelCase : str ) -> bool:
"""simple docstring"""
snake_case = len(_UpperCamelCase )
snake_case = len(_UpperCamelCase )
snake_case = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
snake_case = True
for i in range(_UpperCamelCase ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
snake_case = True
if a[i].islower():
snake_case = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104
|
"""simple docstring"""
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = False , lowerCAmelCase = None , **lowerCAmelCase , ):
"""simple docstring"""
snake_case = path_or_paths
snake_case = split if split or isinstance(lowerCAmelCase , lowerCAmelCase ) else 'train'
snake_case = features
snake_case = cache_dir
snake_case = keep_in_memory
snake_case = streaming
snake_case = num_proc
snake_case = kwargs
@abstractmethod
def snake_case ( self ):
"""simple docstring"""
pass
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = False , lowerCAmelCase = None , **lowerCAmelCase , ):
"""simple docstring"""
snake_case = features
snake_case = cache_dir
snake_case = keep_in_memory
snake_case = streaming
snake_case = num_proc
snake_case = kwargs
@abstractmethod
def snake_case ( self ):
"""simple docstring"""
pass
| 104
| 1
|
'''simple docstring'''
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =model.config
_UpperCamelCase =DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
_UpperCamelCase =MBartConfig(
is_decoder=__SCREAMING_SNAKE_CASE , is_encoder_decoder=__SCREAMING_SNAKE_CASE , add_cross_attention=__SCREAMING_SNAKE_CASE , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=__SCREAMING_SNAKE_CASE , add_final_layer_norm=__SCREAMING_SNAKE_CASE , )
return encoder_config, decoder_config
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if "encoder.model" in name:
_UpperCamelCase =name.replace('''encoder.model''' , '''encoder''' )
if "decoder.model" in name:
_UpperCamelCase =name.replace('''decoder.model''' , '''decoder''' )
if "patch_embed.proj" in name:
_UpperCamelCase =name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
_UpperCamelCase =name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if name.startswith('''encoder''' ):
if "layers" in name:
_UpperCamelCase ='''encoder.''' + name
if "attn.proj" in name:
_UpperCamelCase =name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "mask" not in name:
_UpperCamelCase =name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
_UpperCamelCase =name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
_UpperCamelCase =name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
_UpperCamelCase =name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
_UpperCamelCase =name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
_UpperCamelCase ='''encoder.layernorm.weight'''
if name == "encoder.norm.bias":
_UpperCamelCase ='''encoder.layernorm.bias'''
return name
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_UpperCamelCase =orig_state_dict.pop(__SCREAMING_SNAKE_CASE )
if "qkv" in key:
_UpperCamelCase =key.split('''.''' )
_UpperCamelCase =int(key_split[3] )
_UpperCamelCase =int(key_split[5] )
_UpperCamelCase =model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_UpperCamelCase =val[:dim, :]
_UpperCamelCase =val[dim : dim * 2, :]
_UpperCamelCase =val[-dim:, :]
else:
_UpperCamelCase =val[:dim]
_UpperCamelCase =val[dim : dim * 2]
_UpperCamelCase =val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
_UpperCamelCase =val
return orig_state_dict
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
_UpperCamelCase =DonutModel.from_pretrained(__SCREAMING_SNAKE_CASE ).eval()
# load HuggingFace model
_UpperCamelCase , _UpperCamelCase =get_configs(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =DonutSwinModel(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =MBartForCausalLM(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =VisionEncoderDecoderModel(encoder=__SCREAMING_SNAKE_CASE , decoder=__SCREAMING_SNAKE_CASE )
model.eval()
_UpperCamelCase =original_model.state_dict()
_UpperCamelCase =convert_state_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
model.load_state_dict(__SCREAMING_SNAKE_CASE )
# verify results on scanned document
_UpperCamelCase =load_dataset('''hf-internal-testing/example-documents''' )
_UpperCamelCase =dataset['''test'''][0]['''image'''].convert('''RGB''' )
_UpperCamelCase =XLMRobertaTokenizerFast.from_pretrained(__SCREAMING_SNAKE_CASE , from_slow=__SCREAMING_SNAKE_CASE )
_UpperCamelCase =DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
_UpperCamelCase =DonutProcessor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
_UpperCamelCase =processor(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
_UpperCamelCase ='''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
_UpperCamelCase ='''When is the coffee break?'''
_UpperCamelCase =task_prompt.replace('''{user_input}''' , __SCREAMING_SNAKE_CASE )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
_UpperCamelCase ='''<s_rvlcdip>'''
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
_UpperCamelCase ='''<s_cord>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
_UpperCamelCase ='''s_cord-v2>'''
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
_UpperCamelCase ='''<s_zhtrainticket>'''
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
_UpperCamelCase ='''hello world'''
else:
raise ValueError('''Model name not supported''' )
_UpperCamelCase =original_model.decoder.tokenizer(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )[
'''input_ids'''
]
_UpperCamelCase =original_model.encoder.model.patch_embed(__SCREAMING_SNAKE_CASE )
_UpperCamelCase , _UpperCamelCase =model.encoder.embeddings(__SCREAMING_SNAKE_CASE )
assert torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3 )
# verify encoder hidden states
_UpperCamelCase =original_model.encoder(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =model.encoder(__SCREAMING_SNAKE_CASE ).last_hidden_state
assert torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-2 )
# verify decoder hidden states
_UpperCamelCase =original_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).logits
_UpperCamelCase =model(__SCREAMING_SNAKE_CASE , decoder_input_ids=__SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if push_to_hub:
model.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
processor.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
if __name__ == "__main__":
__lowerCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='naver-clova-ix/donut-base-finetuned-docvqa',
required=False,
type=str,
help='Name of the original model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
required=False,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub.',
)
__lowerCamelCase : Any = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 404
|
'''simple docstring'''
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase ( lowercase_):
"""simple docstring"""
lowerCAmelCase_ = (UnCLIPScheduler,)
def UpperCamelCase__ ( self : Union[str, Any] , **UpperCamelCase__ : Dict ) -> Optional[Any]:
_UpperCamelCase ={
'''num_train_timesteps''': 1000,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**UpperCamelCase__ )
return config
def UpperCamelCase__ ( self : str ) -> Tuple:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def UpperCamelCase__ ( self : List[str] ) -> int:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=UpperCamelCase__ )
def UpperCamelCase__ ( self : List[str] ) -> List[Any]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase__ )
def UpperCamelCase__ ( self : List[Any] ) -> List[str]:
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=UpperCamelCase__ )
def UpperCamelCase__ ( self : List[Any] ) -> Dict:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=UpperCamelCase__ )
def UpperCamelCase__ ( self : Optional[Any] ) -> Tuple:
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=UpperCamelCase__ , prev_timestep=UpperCamelCase__ )
def UpperCamelCase__ ( self : List[str] ) -> Dict:
_UpperCamelCase =self.scheduler_classes[0]
_UpperCamelCase =self.get_scheduler_config(variance_type='''fixed_small_log''' )
_UpperCamelCase =scheduler_class(**UpperCamelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0549625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9994987 ) ) < 1E-5
def UpperCamelCase__ ( self : Any ) -> Tuple:
_UpperCamelCase =self.scheduler_classes[0]
_UpperCamelCase =self.get_scheduler_config(variance_type='''learned_range''' )
_UpperCamelCase =scheduler_class(**UpperCamelCase__ )
_UpperCamelCase =0.5
assert scheduler._get_variance(1 , predicted_variance=UpperCamelCase__ ) - -10.1712790 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=UpperCamelCase__ ) - -5.7998052 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=UpperCamelCase__ ) - -0.0010011 < 1E-5
def UpperCamelCase__ ( self : str ) -> Optional[int]:
_UpperCamelCase =self.scheduler_classes[0]
_UpperCamelCase =self.get_scheduler_config()
_UpperCamelCase =scheduler_class(**UpperCamelCase__ )
_UpperCamelCase =scheduler.timesteps
_UpperCamelCase =self.dummy_model()
_UpperCamelCase =self.dummy_sample_deter
_UpperCamelCase =torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase__ ):
# 1. predict noise residual
_UpperCamelCase =model(UpperCamelCase__ , UpperCamelCase__ )
# 2. predict previous mean of sample x_t-1
_UpperCamelCase =scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample
_UpperCamelCase =pred_prev_sample
_UpperCamelCase =torch.sum(torch.abs(UpperCamelCase__ ) )
_UpperCamelCase =torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 252.2682495 ) < 1E-2
assert abs(result_mean.item() - 0.3284743 ) < 1E-3
def UpperCamelCase__ ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase =self.scheduler_classes[0]
_UpperCamelCase =self.get_scheduler_config()
_UpperCamelCase =scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(25 )
_UpperCamelCase =scheduler.timesteps
_UpperCamelCase =self.dummy_model()
_UpperCamelCase =self.dummy_sample_deter
_UpperCamelCase =torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase__ ):
# 1. predict noise residual
_UpperCamelCase =model(UpperCamelCase__ , UpperCamelCase__ )
if i + 1 == timesteps.shape[0]:
_UpperCamelCase =None
else:
_UpperCamelCase =timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
_UpperCamelCase =scheduler.step(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , prev_timestep=UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample
_UpperCamelCase =pred_prev_sample
_UpperCamelCase =torch.sum(torch.abs(UpperCamelCase__ ) )
_UpperCamelCase =torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 258.2044983 ) < 1E-2
assert abs(result_mean.item() - 0.3362038 ) < 1E-3
def UpperCamelCase__ ( self : str ) -> Any:
pass
def UpperCamelCase__ ( self : List[str] ) -> str:
pass
| 404
| 1
|
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[int] ):
SCREAMING_SNAKE_CASE = {}
def _snake_case ( self : List[str] ):
print(self.vertex )
for i in self.vertex:
print(__lowerCamelCase , " -> " , " -> ".join([str(__lowerCamelCase ) for j in self.vertex[i]] ) )
def _snake_case ( self : List[Any] , __lowerCamelCase : int , __lowerCamelCase : int ):
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(__lowerCamelCase )
else:
# else make a new vertex
SCREAMING_SNAKE_CASE = [to_vertex]
def _snake_case ( self : Optional[int] ):
# visited array for storing already visited nodes
SCREAMING_SNAKE_CASE = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(__lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : int , __lowerCamelCase : int , __lowerCamelCase : list ):
# mark start vertex as visited
SCREAMING_SNAKE_CASE = True
print(__lowerCamelCase , end=" " )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
__A : Optional[int] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 698
|
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = -1
SCREAMING_SNAKE_CASE = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.generate(__lowerCamelCase , max_new_tokens=10 , do_sample=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
SCREAMING_SNAKE_CASE = TextStreamer(__lowerCamelCase )
model.generate(__lowerCamelCase , max_new_tokens=10 , do_sample=__lowerCamelCase , streamer=__lowerCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
SCREAMING_SNAKE_CASE = cs.out[:-1]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = -1
SCREAMING_SNAKE_CASE = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.generate(__lowerCamelCase , max_new_tokens=10 , do_sample=__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.decode(greedy_ids[0] )
SCREAMING_SNAKE_CASE = TextIteratorStreamer(__lowerCamelCase )
SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
SCREAMING_SNAKE_CASE = Thread(target=model.generate , kwargs=__lowerCamelCase )
thread.start()
SCREAMING_SNAKE_CASE = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = -1
SCREAMING_SNAKE_CASE = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.generate(__lowerCamelCase , max_new_tokens=10 , do_sample=__lowerCamelCase )
SCREAMING_SNAKE_CASE = greedy_ids[:, input_ids.shape[1] :]
SCREAMING_SNAKE_CASE = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
SCREAMING_SNAKE_CASE = TextStreamer(__lowerCamelCase , skip_prompt=__lowerCamelCase )
model.generate(__lowerCamelCase , max_new_tokens=10 , do_sample=__lowerCamelCase , streamer=__lowerCamelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
SCREAMING_SNAKE_CASE = cs.out[:-1]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : int ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("distilgpt2" )
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = -1
SCREAMING_SNAKE_CASE = torch.ones((1, 5) , device=__lowerCamelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
SCREAMING_SNAKE_CASE = TextStreamer(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
model.generate(__lowerCamelCase , max_new_tokens=1 , do_sample=__lowerCamelCase , streamer=__lowerCamelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
SCREAMING_SNAKE_CASE = cs.out[:-1] # Remove the final "\n"
SCREAMING_SNAKE_CASE = tokenizer(__lowerCamelCase , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = -1
SCREAMING_SNAKE_CASE = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE = TextIteratorStreamer(__lowerCamelCase , timeout=0.001 )
SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
SCREAMING_SNAKE_CASE = Thread(target=model.generate , kwargs=__lowerCamelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__lowerCamelCase ):
SCREAMING_SNAKE_CASE = ""
for new_text in streamer:
streamer_text += new_text
| 698
| 1
|
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase__ :
"""simple docstring"""
@staticmethod
def snake_case__ ( *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[str]:
pass
def _lowerCamelCase ( UpperCAmelCase_ : Image ) -> str:
"""simple docstring"""
A__ = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
A__ : int = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
A__ = DepthEstimationPipeline(model=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
A__ = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" )
self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , SCREAMING_SNAKE_CASE__ )
import datasets
A__ = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
A__ = depth_estimator(
[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
] )
self.assertEqual(
[
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
] , SCREAMING_SNAKE_CASE__ , )
@require_tf
@unittest.skip("Depth estimation is not implemented in TF" )
def snake_case__ ( self ) -> Dict:
pass
@slow
@require_torch
def snake_case__ ( self ) -> Optional[int]:
A__ = "Intel/dpt-large"
A__ = pipeline("depth-estimation" , model=SCREAMING_SNAKE_CASE__ )
A__ = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" )
A__ = hashimage(outputs["depth"] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 2_9.3_0_4 )
self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.6_6_2 )
@require_torch
def snake_case__ ( self ) -> str:
# This is highly irregular to have no small tests.
self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
| 104
|
"""simple docstring"""
import numpy as np
def lowercase_ ( __UpperCAmelCase ) -> np.ndarray:
return 1 / (1 + np.exp(-vector ))
def lowercase_ ( __UpperCAmelCase ) -> np.ndarray:
return vector * sigmoid(__UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 299
| 0
|
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
__snake_case : Any = logging.getLogger()
def _UpperCamelCase ( ) -> str:
"""simple docstring"""
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('-f' )
lowerCAmelCase__ = parser.parse_args()
return args.f
class __SCREAMING_SNAKE_CASE ( __lowercase):
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = logging.StreamHandler(sys.stdout )
logger.addHandler(_UpperCamelCase )
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , 'run_glue_deebert.py' )
with patch.object(_UpperCamelCase , 'argv' , _UpperCamelCase ):
lowerCAmelCase__ = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(_UpperCamelCase , 0.6_66 )
@slow
@require_torch_non_multi_gpu
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = '\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n '.split()
self.run_and_check(_UpperCamelCase )
lowerCAmelCase__ = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(_UpperCamelCase )
lowerCAmelCase__ = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(_UpperCamelCase )
| 365
|
from __future__ import annotations
__snake_case : Any = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class __SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = graph
# mapping node to its parent in resulting breadth first tree
lowerCAmelCase__ = {}
lowerCAmelCase__ = source_vertex
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = {self.source_vertex}
lowerCAmelCase__ = None
lowerCAmelCase__ = [self.source_vertex] # first in first out queue
while queue:
lowerCAmelCase__ = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(_UpperCamelCase )
lowerCAmelCase__ = vertex
queue.append(_UpperCamelCase )
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
if target_vertex == self.source_vertex:
return self.source_vertex
lowerCAmelCase__ = self.parent.get(_UpperCamelCase )
if target_vertex_parent is None:
lowerCAmelCase__ = (
F"No path from vertex: {self.source_vertex} to vertex: {target_vertex}"
)
raise ValueError(_UpperCamelCase )
return self.shortest_path(_UpperCamelCase ) + F"->{target_vertex}"
if __name__ == "__main__":
__snake_case : Optional[Any] = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 365
| 1
|
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='''%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s''',
datefmt='''%Y-%m-%d %H:%M:%S''',
level=os.environ.get('''LOGLEVEL''', '''INFO''').upper(),
stream=sys.stdout,
)
__magic_name__ = logging.getLogger(__name__)
__magic_name__ = {'''facebook/bart-base''': BartForConditionalGeneration}
__magic_name__ = {'''facebook/bart-base''': BartTokenizer}
def SCREAMING_SNAKE_CASE__ ( ):
snake_case__ = argparse.ArgumentParser(description="Export Bart model + Beam Search to ONNX graph." )
parser.add_argument(
"--validation_file" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="A csv or a json file containing the validation data." )
parser.add_argument(
"--max_length" , type=__lowerCAmelCase , default=5 , help="The maximum total input sequence length after tokenization." , )
parser.add_argument(
"--num_beams" , type=__lowerCAmelCase , default=__lowerCAmelCase , help=(
"Number of beams to use for evaluation. This argument will be "
"passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."
) , )
parser.add_argument(
"--model_name_or_path" , type=__lowerCAmelCase , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__lowerCAmelCase , )
parser.add_argument(
"--config_name" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="Pretrained config name or path if not the same as model_name" , )
parser.add_argument(
"--device" , type=__lowerCAmelCase , default="cpu" , help="Device where the model will be run" , )
parser.add_argument("--output_file_path" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="Where to store the final ONNX file." )
snake_case__ = parser.parse_args()
return args
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase="cpu" ):
snake_case__ = model_dict[model_name].from_pretrained(__lowerCAmelCase ).to(__lowerCAmelCase )
snake_case__ = tokenizer_dict[model_name].from_pretrained(__lowerCAmelCase )
if model_name in ["facebook/bart-base"]:
snake_case__ = 0
snake_case__ = None
snake_case__ = 0
return huggingface_model, tokenizer
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
model.eval()
snake_case__ = None
snake_case__ = torch.jit.script(BARTBeamSearchGenerator(__lowerCAmelCase ) )
with torch.no_grad():
snake_case__ = "My friends are cool but they eat too many carbs."
snake_case__ = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_024 , return_tensors="pt" ).to(model.device )
snake_case__ = model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , num_beams=__lowerCAmelCase , max_length=__lowerCAmelCase , early_stopping=__lowerCAmelCase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
__lowerCAmelCase , (
inputs["input_ids"],
inputs["attention_mask"],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , __lowerCAmelCase , opset_version=14 , input_names=["input_ids", "attention_mask", "num_beams", "max_length", "decoder_start_token_id"] , output_names=["output_ids"] , dynamic_axes={
"input_ids": {0: "batch", 1: "seq"},
"output_ids": {0: "batch", 1: "seq_out"},
} , example_outputs=__lowerCAmelCase , )
logger.info("Model exported to {}".format(__lowerCAmelCase ) )
snake_case__ = remove_dup_initializers(os.path.abspath(__lowerCAmelCase ) )
logger.info("Deduplicated and optimized model written to {}".format(__lowerCAmelCase ) )
snake_case__ = onnxruntime.InferenceSession(__lowerCAmelCase )
snake_case__ = ort_sess.run(
__lowerCAmelCase , {
"input_ids": inputs["input_ids"].cpu().numpy(),
"attention_mask": inputs["attention_mask"].cpu().numpy(),
"num_beams": np.array(__lowerCAmelCase ),
"max_length": np.array(__lowerCAmelCase ),
"decoder_start_token_id": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info("Model outputs from torch and ONNX Runtime are similar." )
logger.info("Success." )
def SCREAMING_SNAKE_CASE__ ( ):
snake_case__ = parse_args()
snake_case__ = 5
snake_case__ = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
snake_case__ = torch.device(args.device )
snake_case__ , snake_case__ = load_model_tokenizer(args.model_name_or_path , __lowerCAmelCase )
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined" )
model.to(__lowerCAmelCase )
if args.max_length:
snake_case__ = args.max_length
if args.num_beams:
snake_case__ = args.num_beams
if args.output_file_path:
snake_case__ = args.output_file_path
else:
snake_case__ = "BART.onnx"
logger.info("Exporting model to ONNX" )
export_and_validate_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 276
|
import math
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase ):
if initial_intensity < 0:
raise ValueError("The value of intensity cannot be negative" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("In Malus Law, the angle is in the range 0-360 degrees" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(__lowerCAmelCase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='''malus_law''')
| 276
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCAmelCase = {
"configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"],
"tokenization_tapas": ["TapasTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TapasForMaskedLM",
"TapasForQuestionAnswering",
"TapasForSequenceClassification",
"TapasModel",
"TapasPreTrainedModel",
"load_tf_weights_in_tapas",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFTapasForMaskedLM",
"TFTapasForQuestionAnswering",
"TFTapasForSequenceClassification",
"TFTapasModel",
"TFTapasPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 712
|
from math import factorial
def A__ ( __lowerCamelCase = 20 ):
SCREAMING_SNAKE_CASE_ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
SCREAMING_SNAKE_CASE_ = n // 2
return int(factorial(__lowerCamelCase ) / (factorial(__lowerCamelCase ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
__UpperCAmelCase = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number.")
| 597
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowercase =logging.get_logger(__name__)
if is_vision_available():
import PIL
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase =["pixel_values"]
def __init__( self , snake_case = True , snake_case = None , snake_case = PILImageResampling.BICUBIC , snake_case = True , snake_case = None , snake_case = True , snake_case = 1 / 2_5_5 , snake_case = True , snake_case = None , snake_case = None , snake_case = True , **snake_case , ) -> None:
'''simple docstring'''
super().__init__(**snake_case)
_UpperCAmelCase : Optional[int] =size if size is not None else {'shortest_edge': 2_2_4}
_UpperCAmelCase : List[Any] =get_size_dict(snake_case , default_to_square=snake_case)
_UpperCAmelCase : Optional[int] =crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
_UpperCAmelCase : Dict =get_size_dict(snake_case , default_to_square=snake_case , param_name='crop_size')
_UpperCAmelCase : int =do_resize
_UpperCAmelCase : List[Any] =size
_UpperCAmelCase : str =resample
_UpperCAmelCase : str =do_center_crop
_UpperCAmelCase : Optional[int] =crop_size
_UpperCAmelCase : List[Any] =do_rescale
_UpperCAmelCase : Optional[int] =rescale_factor
_UpperCAmelCase : Tuple =do_normalize
_UpperCAmelCase : Optional[int] =image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_UpperCAmelCase : int =image_std if image_std is not None else OPENAI_CLIP_STD
_UpperCAmelCase : Tuple =do_convert_rgb
def lowerCAmelCase ( self , snake_case , snake_case , snake_case = PILImageResampling.BICUBIC , snake_case = None , **snake_case , ) -> np.ndarray:
'''simple docstring'''
_UpperCAmelCase : Any =get_size_dict(snake_case , default_to_square=snake_case)
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}")
_UpperCAmelCase : int =get_resize_output_image_size(snake_case , size=size['shortest_edge'] , default_to_square=snake_case)
return resize(snake_case , size=snake_case , resample=snake_case , data_format=snake_case , **snake_case)
def lowerCAmelCase ( self , snake_case , snake_case , snake_case = None , **snake_case , ) -> np.ndarray:
'''simple docstring'''
_UpperCAmelCase : str =get_size_dict(snake_case)
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys (height, width). Got {size.keys()}")
return center_crop(snake_case , size=(size['height'], size['width']) , data_format=snake_case , **snake_case)
def lowerCAmelCase ( self , snake_case , snake_case , snake_case = None , **snake_case , ) -> List[Any]:
'''simple docstring'''
return rescale(snake_case , scale=snake_case , data_format=snake_case , **snake_case)
def lowerCAmelCase ( self , snake_case , snake_case , snake_case , snake_case = None , **snake_case , ) -> np.ndarray:
'''simple docstring'''
return normalize(snake_case , mean=snake_case , std=snake_case , data_format=snake_case , **snake_case)
def lowerCAmelCase ( self , snake_case , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = ChannelDimension.FIRST , **snake_case , ) -> PIL.Image.Image:
'''simple docstring'''
_UpperCAmelCase : Any =do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase : int =size if size is not None else self.size
_UpperCAmelCase : int =get_size_dict(snake_case , param_name='size' , default_to_square=snake_case)
_UpperCAmelCase : List[Any] =resample if resample is not None else self.resample
_UpperCAmelCase : List[Any] =do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase : Optional[int] =crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase : Tuple =get_size_dict(snake_case , param_name='crop_size' , default_to_square=snake_case)
_UpperCAmelCase : List[Any] =do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase : Dict =rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase : List[str] =do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase : Tuple =image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase : int =image_std if image_std is not None else self.image_std
_UpperCAmelCase : List[Any] =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_UpperCAmelCase : str =make_list_of_images(snake_case)
if not valid_images(snake_case):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_UpperCAmelCase : Optional[int] =[convert_to_rgb(snake_case) for image in images]
# All transformations expect numpy arrays.
_UpperCAmelCase : Dict =[to_numpy_array(snake_case) for image in images]
if do_resize:
_UpperCAmelCase : List[str] =[self.resize(image=snake_case , size=snake_case , resample=snake_case) for image in images]
if do_center_crop:
_UpperCAmelCase : List[Any] =[self.center_crop(image=snake_case , size=snake_case) for image in images]
if do_rescale:
_UpperCAmelCase : Union[str, Any] =[self.rescale(image=snake_case , scale=snake_case) for image in images]
if do_normalize:
_UpperCAmelCase : Optional[Any] =[self.normalize(image=snake_case , mean=snake_case , std=snake_case) for image in images]
_UpperCAmelCase : Union[str, Any] =[to_channel_dimension_format(snake_case , snake_case) for image in images]
_UpperCAmelCase : Any ={'pixel_values': images}
return BatchFeature(data=snake_case , tensor_type=snake_case)
| 446
|
'''simple docstring'''
import warnings
from functools import wraps
from typing import Callable
def lowerCamelCase__ ( __lowerCamelCase : Callable ):
'''simple docstring'''
@wraps(__lowerCamelCase )
def _inner_fn(*__lowerCamelCase : int , **__lowerCamelCase : Optional[Any] ):
warnings.warn(
(f"'{fn.__name__}' is experimental and might be subject to breaking changes in the future.") , __lowerCamelCase , )
return fn(*__lowerCamelCase , **__lowerCamelCase )
return _inner_fn
| 446
| 1
|
"""simple docstring"""
from __future__ import annotations
def _A ( _a : float , _a : float , _a : float ):
"""simple docstring"""
if days_between_payments <= 0:
raise ValueError("""days_between_payments must be > 0""" )
if daily_interest_rate < 0:
raise ValueError("""daily_interest_rate must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return principal * daily_interest_rate * days_between_payments
def _A ( _a : float , _a : float , _a : float , ):
"""simple docstring"""
if number_of_compounding_periods <= 0:
raise ValueError("""number_of_compounding_periods must be > 0""" )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError("""nominal_annual_interest_rate_percentage must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def _A ( _a : float , _a : float , _a : float , ):
"""simple docstring"""
if number_of_years <= 0:
raise ValueError("""number_of_years must be > 0""" )
if nominal_annual_percentage_rate < 0:
raise ValueError("""nominal_annual_percentage_rate must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return compound_interest(
_a , nominal_annual_percentage_rate / 3_6_5 , number_of_years * 3_6_5 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 255
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ ( self ) -> Dict:
A = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
A = tf.convert_to_tensor(
[[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] ,dtype=tf.intaa ,) # J'aime le camembert !"
A = model(lowerCamelCase_ )["""last_hidden_state"""]
A = tf.TensorShape((1, 1_0, 7_6_8) )
self.assertEqual(output.shape ,lowerCamelCase_ )
# compare the actual values for a slice.
A = tf.convert_to_tensor(
[[[-0.02_54, 0.02_35, 0.10_27], [0.06_06, -0.18_11, -0.04_18], [-0.15_61, -0.11_27, 0.26_87]]] ,dtype=tf.floataa ,)
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1E-4 ) )
| 255
| 1
|
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class a ( unittest.TestCase ):
__lowerCAmelCase : List[str] = MODEL_FOR_MASKED_LM_MAPPING
__lowerCAmelCase : int = TF_MODEL_FOR_MASKED_LM_MAPPING
def __lowerCamelCase ( self :List[Any] ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : Union[str, Any] = pipeline(task='''fill-mask''' ,model='''sshleifer/tiny-distilroberta-base''' ,top_k=2 ,framework='''tf''' )
snake_case__ : Union[str, Any] = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ,decimals=6 ) ,[
{'''sequence''': '''My name is grouped''', '''score''': 2.1e-0_5, '''token''': 3_8_0_1_5, '''token_str''': ''' grouped'''},
{'''sequence''': '''My name is accuser''', '''score''': 2.1e-0_5, '''token''': 2_5_5_0_6, '''token_str''': ''' accuser'''},
] ,)
snake_case__ : Tuple = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ,decimals=6 ) ,[
{
'''sequence''': '''The largest city in France is grouped''',
'''score''': 2.1e-0_5,
'''token''': 3_8_0_1_5,
'''token_str''': ''' grouped''',
},
{
'''sequence''': '''The largest city in France is accuser''',
'''score''': 2.1e-0_5,
'''token''': 2_5_5_0_6,
'''token_str''': ''' accuser''',
},
] ,)
snake_case__ : int = unmasker('''My name is <mask>''' ,targets=[''' Patrick''', ''' Clara''', ''' Teven'''] ,top_k=3 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ,decimals=6 ) ,[
{'''sequence''': '''My name is Clara''', '''score''': 2e-0_5, '''token''': 1_3_6_0_6, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Patrick''', '''score''': 2e-0_5, '''token''': 3_4_9_9, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 1.9e-0_5, '''token''': 2_9_4_1, '''token_str''': ''' Te'''},
] ,)
@require_torch
def __lowerCamelCase ( self :List[str] ):
snake_case__ : int = pipeline(task='''fill-mask''' ,model='''sshleifer/tiny-distilroberta-base''' ,top_k=2 ,framework='''pt''' )
snake_case__ : int = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ,decimals=6 ) ,[
{'''sequence''': '''My name is Maul''', '''score''': 2.2e-0_5, '''token''': 3_5_6_7_6, '''token_str''': ''' Maul'''},
{'''sequence''': '''My name isELS''', '''score''': 2.2e-0_5, '''token''': 1_6_4_1_6, '''token_str''': '''ELS'''},
] ,)
snake_case__ : List[Any] = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ,decimals=6 ) ,[
{
'''sequence''': '''The largest city in France is Maul''',
'''score''': 2.2e-0_5,
'''token''': 3_5_6_7_6,
'''token_str''': ''' Maul''',
},
{'''sequence''': '''The largest city in France isELS''', '''score''': 2.2e-0_5, '''token''': 1_6_4_1_6, '''token_str''': '''ELS'''},
] ,)
snake_case__ : List[Any] = unmasker('''My name is <mask>''' ,targets=[''' Patrick''', ''' Clara''', ''' Teven'''] ,top_k=3 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ,decimals=6 ) ,[
{'''sequence''': '''My name is Patrick''', '''score''': 2.1e-0_5, '''token''': 3_4_9_9, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 2e-0_5, '''token''': 2_9_4_1, '''token_str''': ''' Te'''},
{'''sequence''': '''My name is Clara''', '''score''': 2e-0_5, '''token''': 1_3_6_0_6, '''token_str''': ''' Clara'''},
] ,)
snake_case__ : Dict = unmasker('''My name is <mask> <mask>''' ,top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ,decimals=6 ) ,[
[
{
'''score''': 2.2e-0_5,
'''token''': 3_5_6_7_6,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is Maul<mask></s>''',
},
{'''score''': 2.2e-0_5, '''token''': 1_6_4_1_6, '''token_str''': '''ELS''', '''sequence''': '''<s>My name isELS<mask></s>'''},
],
[
{
'''score''': 2.2e-0_5,
'''token''': 3_5_6_7_6,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is<mask> Maul</s>''',
},
{'''score''': 2.2e-0_5, '''token''': 1_6_4_1_6, '''token_str''': '''ELS''', '''sequence''': '''<s>My name is<mask>ELS</s>'''},
],
] ,)
@require_torch_gpu
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : str = pipeline('''fill-mask''' ,model='''hf-internal-testing/tiny-random-distilbert''' ,device=0 ,framework='''pt''' )
# convert model to fp16
pipe.model.half()
snake_case__ : Tuple = pipe('''Paris is the [MASK] of France.''' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
@slow
@require_torch
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : Dict = pipeline(task='''fill-mask''' ,model='''distilroberta-base''' ,top_k=2 ,framework='''pt''' )
self.run_large_test(SCREAMING_SNAKE_CASE_ )
@slow
@require_tf
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : Optional[Any] = pipeline(task='''fill-mask''' ,model='''distilroberta-base''' ,top_k=2 ,framework='''tf''' )
self.run_large_test(SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( self :Any ,__lowercase :Optional[int] ):
snake_case__ : Dict = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) ,[
{'''sequence''': '''My name is John''', '''score''': 0.008, '''token''': 6_1_0, '''token_str''': ''' John'''},
{'''sequence''': '''My name is Chris''', '''score''': 0.007, '''token''': 1_5_7_3, '''token_str''': ''' Chris'''},
] ,)
snake_case__ : List[str] = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) ,[
{
'''sequence''': '''The largest city in France is Paris''',
'''score''': 0.251,
'''token''': 2_2_0_1,
'''token_str''': ''' Paris''',
},
{
'''sequence''': '''The largest city in France is Lyon''',
'''score''': 0.214,
'''token''': 1_2_7_9_0,
'''token_str''': ''' Lyon''',
},
] ,)
snake_case__ : List[Any] = unmasker('''My name is <mask>''' ,targets=[''' Patrick''', ''' Clara''', ''' Teven'''] ,top_k=3 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) ,[
{'''sequence''': '''My name is Patrick''', '''score''': 0.005, '''token''': 3_4_9_9, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Clara''', '''score''': 0.000, '''token''': 1_3_6_0_6, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Te''', '''score''': 0.000, '''token''': 2_9_4_1, '''token_str''': ''' Te'''},
] ,)
@require_torch
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : Dict = pipeline(task='''fill-mask''' ,model='''sshleifer/tiny-distilroberta-base''' ,framework='''pt''' )
snake_case__ : Dict = None
snake_case__ : Dict = None
self.run_pipeline_test(SCREAMING_SNAKE_CASE_ ,[] )
@require_tf
def __lowerCamelCase ( self :str ):
snake_case__ : List[str] = pipeline(task='''fill-mask''' ,model='''sshleifer/tiny-distilroberta-base''' ,framework='''tf''' )
snake_case__ : List[str] = None
snake_case__ : Dict = None
self.run_pipeline_test(SCREAMING_SNAKE_CASE_ ,[] )
def __lowerCamelCase ( self :List[Any] ,__lowercase :List[str] ,__lowercase :int ,__lowercase :Optional[Any] ):
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('''The provided tokenizer has no mask token, (probably reformer or wav2vec2)''' )
snake_case__ : Tuple = FillMaskPipeline(model=SCREAMING_SNAKE_CASE_ ,tokenizer=SCREAMING_SNAKE_CASE_ )
snake_case__ : Tuple = [
F"""This is another {tokenizer.mask_token} test""",
]
return fill_masker, examples
def __lowerCamelCase ( self :Tuple ,__lowercase :int ,__lowercase :List[str] ):
snake_case__ : Optional[int] = fill_masker.tokenizer
snake_case__ : Optional[int] = fill_masker.model
snake_case__ : Optional[Any] = fill_masker(
F"""This is a {tokenizer.mask_token}""" ,)
self.assertEqual(
SCREAMING_SNAKE_CASE_ ,[
{'''sequence''': ANY(SCREAMING_SNAKE_CASE_ ), '''score''': ANY(SCREAMING_SNAKE_CASE_ ), '''token''': ANY(SCREAMING_SNAKE_CASE_ ), '''token_str''': ANY(SCREAMING_SNAKE_CASE_ )},
{'''sequence''': ANY(SCREAMING_SNAKE_CASE_ ), '''score''': ANY(SCREAMING_SNAKE_CASE_ ), '''token''': ANY(SCREAMING_SNAKE_CASE_ ), '''token_str''': ANY(SCREAMING_SNAKE_CASE_ )},
{'''sequence''': ANY(SCREAMING_SNAKE_CASE_ ), '''score''': ANY(SCREAMING_SNAKE_CASE_ ), '''token''': ANY(SCREAMING_SNAKE_CASE_ ), '''token_str''': ANY(SCREAMING_SNAKE_CASE_ )},
{'''sequence''': ANY(SCREAMING_SNAKE_CASE_ ), '''score''': ANY(SCREAMING_SNAKE_CASE_ ), '''token''': ANY(SCREAMING_SNAKE_CASE_ ), '''token_str''': ANY(SCREAMING_SNAKE_CASE_ )},
{'''sequence''': ANY(SCREAMING_SNAKE_CASE_ ), '''score''': ANY(SCREAMING_SNAKE_CASE_ ), '''token''': ANY(SCREAMING_SNAKE_CASE_ ), '''token_str''': ANY(SCREAMING_SNAKE_CASE_ )},
] ,)
snake_case__ : Optional[Any] = fill_masker([F"""This is a {tokenizer.mask_token}"""] )
self.assertEqual(
SCREAMING_SNAKE_CASE_ ,[
{'''sequence''': ANY(SCREAMING_SNAKE_CASE_ ), '''score''': ANY(SCREAMING_SNAKE_CASE_ ), '''token''': ANY(SCREAMING_SNAKE_CASE_ ), '''token_str''': ANY(SCREAMING_SNAKE_CASE_ )},
{'''sequence''': ANY(SCREAMING_SNAKE_CASE_ ), '''score''': ANY(SCREAMING_SNAKE_CASE_ ), '''token''': ANY(SCREAMING_SNAKE_CASE_ ), '''token_str''': ANY(SCREAMING_SNAKE_CASE_ )},
{'''sequence''': ANY(SCREAMING_SNAKE_CASE_ ), '''score''': ANY(SCREAMING_SNAKE_CASE_ ), '''token''': ANY(SCREAMING_SNAKE_CASE_ ), '''token_str''': ANY(SCREAMING_SNAKE_CASE_ )},
{'''sequence''': ANY(SCREAMING_SNAKE_CASE_ ), '''score''': ANY(SCREAMING_SNAKE_CASE_ ), '''token''': ANY(SCREAMING_SNAKE_CASE_ ), '''token_str''': ANY(SCREAMING_SNAKE_CASE_ )},
{'''sequence''': ANY(SCREAMING_SNAKE_CASE_ ), '''score''': ANY(SCREAMING_SNAKE_CASE_ ), '''token''': ANY(SCREAMING_SNAKE_CASE_ ), '''token_str''': ANY(SCREAMING_SNAKE_CASE_ )},
] ,)
snake_case__ : Union[str, Any] = fill_masker([F"""This is a {tokenizer.mask_token}""", F"""Another {tokenizer.mask_token} great test."""] )
self.assertEqual(
SCREAMING_SNAKE_CASE_ ,[
[
{'''sequence''': ANY(SCREAMING_SNAKE_CASE_ ), '''score''': ANY(SCREAMING_SNAKE_CASE_ ), '''token''': ANY(SCREAMING_SNAKE_CASE_ ), '''token_str''': ANY(SCREAMING_SNAKE_CASE_ )},
{'''sequence''': ANY(SCREAMING_SNAKE_CASE_ ), '''score''': ANY(SCREAMING_SNAKE_CASE_ ), '''token''': ANY(SCREAMING_SNAKE_CASE_ ), '''token_str''': ANY(SCREAMING_SNAKE_CASE_ )},
{'''sequence''': ANY(SCREAMING_SNAKE_CASE_ ), '''score''': ANY(SCREAMING_SNAKE_CASE_ ), '''token''': ANY(SCREAMING_SNAKE_CASE_ ), '''token_str''': ANY(SCREAMING_SNAKE_CASE_ )},
{'''sequence''': ANY(SCREAMING_SNAKE_CASE_ ), '''score''': ANY(SCREAMING_SNAKE_CASE_ ), '''token''': ANY(SCREAMING_SNAKE_CASE_ ), '''token_str''': ANY(SCREAMING_SNAKE_CASE_ )},
{'''sequence''': ANY(SCREAMING_SNAKE_CASE_ ), '''score''': ANY(SCREAMING_SNAKE_CASE_ ), '''token''': ANY(SCREAMING_SNAKE_CASE_ ), '''token_str''': ANY(SCREAMING_SNAKE_CASE_ )},
],
[
{'''sequence''': ANY(SCREAMING_SNAKE_CASE_ ), '''score''': ANY(SCREAMING_SNAKE_CASE_ ), '''token''': ANY(SCREAMING_SNAKE_CASE_ ), '''token_str''': ANY(SCREAMING_SNAKE_CASE_ )},
{'''sequence''': ANY(SCREAMING_SNAKE_CASE_ ), '''score''': ANY(SCREAMING_SNAKE_CASE_ ), '''token''': ANY(SCREAMING_SNAKE_CASE_ ), '''token_str''': ANY(SCREAMING_SNAKE_CASE_ )},
{'''sequence''': ANY(SCREAMING_SNAKE_CASE_ ), '''score''': ANY(SCREAMING_SNAKE_CASE_ ), '''token''': ANY(SCREAMING_SNAKE_CASE_ ), '''token_str''': ANY(SCREAMING_SNAKE_CASE_ )},
{'''sequence''': ANY(SCREAMING_SNAKE_CASE_ ), '''score''': ANY(SCREAMING_SNAKE_CASE_ ), '''token''': ANY(SCREAMING_SNAKE_CASE_ ), '''token_str''': ANY(SCREAMING_SNAKE_CASE_ )},
{'''sequence''': ANY(SCREAMING_SNAKE_CASE_ ), '''score''': ANY(SCREAMING_SNAKE_CASE_ ), '''token''': ANY(SCREAMING_SNAKE_CASE_ ), '''token_str''': ANY(SCREAMING_SNAKE_CASE_ )},
],
] ,)
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
fill_masker('''This is''' )
self.run_test_top_k(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
self.run_test_targets(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
self.run_test_top_k_targets(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
self.fill_mask_with_duplicate_targets_and_top_k(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
self.fill_mask_with_multiple_masks(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :Optional[int] ,__lowercase :Optional[Any] ):
snake_case__ : List[Any] = tokenizer.get_vocab()
snake_case__ : Union[str, Any] = sorted(vocab.keys() )[:2]
# Pipeline argument
snake_case__ : List[Any] = FillMaskPipeline(model=SCREAMING_SNAKE_CASE_ ,tokenizer=SCREAMING_SNAKE_CASE_ ,targets=SCREAMING_SNAKE_CASE_ )
snake_case__ : int = fill_masker(F"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
SCREAMING_SNAKE_CASE_ ,[
{'''sequence''': ANY(SCREAMING_SNAKE_CASE_ ), '''score''': ANY(SCREAMING_SNAKE_CASE_ ), '''token''': ANY(SCREAMING_SNAKE_CASE_ ), '''token_str''': ANY(SCREAMING_SNAKE_CASE_ )},
{'''sequence''': ANY(SCREAMING_SNAKE_CASE_ ), '''score''': ANY(SCREAMING_SNAKE_CASE_ ), '''token''': ANY(SCREAMING_SNAKE_CASE_ ), '''token_str''': ANY(SCREAMING_SNAKE_CASE_ )},
] ,)
snake_case__ : Dict = {vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs} ,SCREAMING_SNAKE_CASE_ )
snake_case__ : int = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs} ,set(SCREAMING_SNAKE_CASE_ ) )
# Call argument
snake_case__ : Optional[int] = FillMaskPipeline(model=SCREAMING_SNAKE_CASE_ ,tokenizer=SCREAMING_SNAKE_CASE_ )
snake_case__ : Dict = fill_masker(F"""This is a {tokenizer.mask_token}""" ,targets=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
SCREAMING_SNAKE_CASE_ ,[
{'''sequence''': ANY(SCREAMING_SNAKE_CASE_ ), '''score''': ANY(SCREAMING_SNAKE_CASE_ ), '''token''': ANY(SCREAMING_SNAKE_CASE_ ), '''token_str''': ANY(SCREAMING_SNAKE_CASE_ )},
{'''sequence''': ANY(SCREAMING_SNAKE_CASE_ ), '''score''': ANY(SCREAMING_SNAKE_CASE_ ), '''token''': ANY(SCREAMING_SNAKE_CASE_ ), '''token_str''': ANY(SCREAMING_SNAKE_CASE_ )},
] ,)
snake_case__ : List[Any] = {vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs} ,SCREAMING_SNAKE_CASE_ )
snake_case__ : List[str] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs} ,set(SCREAMING_SNAKE_CASE_ ) )
# Score equivalence
snake_case__ : Any = fill_masker(F"""This is a {tokenizer.mask_token}""" ,targets=SCREAMING_SNAKE_CASE_ )
snake_case__ : Dict = [top_mask['''token_str'''] for top_mask in outputs]
snake_case__ : List[Any] = [top_mask['''score'''] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(SCREAMING_SNAKE_CASE_ ) == set(SCREAMING_SNAKE_CASE_ ):
snake_case__ : List[str] = fill_masker(F"""This is a {tokenizer.mask_token}""" ,targets=SCREAMING_SNAKE_CASE_ )
snake_case__ : List[Any] = [top_mask['''score'''] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ) ,nested_simplify(SCREAMING_SNAKE_CASE_ ) )
# Raises with invalid
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
snake_case__ : Optional[int] = fill_masker(F"""This is a {tokenizer.mask_token}""" ,targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
snake_case__ : Optional[int] = fill_masker(F"""This is a {tokenizer.mask_token}""" ,targets=[''''''] )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
snake_case__ : List[Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" ,targets='''''' )
def __lowerCamelCase ( self :str ,__lowercase :List[Any] ,__lowercase :List[str] ):
snake_case__ : Any = FillMaskPipeline(model=SCREAMING_SNAKE_CASE_ ,tokenizer=SCREAMING_SNAKE_CASE_ ,top_k=2 )
snake_case__ : List[str] = fill_masker(F"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
SCREAMING_SNAKE_CASE_ ,[
{'''sequence''': ANY(SCREAMING_SNAKE_CASE_ ), '''score''': ANY(SCREAMING_SNAKE_CASE_ ), '''token''': ANY(SCREAMING_SNAKE_CASE_ ), '''token_str''': ANY(SCREAMING_SNAKE_CASE_ )},
{'''sequence''': ANY(SCREAMING_SNAKE_CASE_ ), '''score''': ANY(SCREAMING_SNAKE_CASE_ ), '''token''': ANY(SCREAMING_SNAKE_CASE_ ), '''token_str''': ANY(SCREAMING_SNAKE_CASE_ )},
] ,)
snake_case__ : Union[str, Any] = FillMaskPipeline(model=SCREAMING_SNAKE_CASE_ ,tokenizer=SCREAMING_SNAKE_CASE_ )
snake_case__ : Optional[int] = fill_masker(F"""This is a {tokenizer.mask_token}""" ,top_k=2 )
self.assertEqual(
SCREAMING_SNAKE_CASE_ ,[
{'''sequence''': ANY(SCREAMING_SNAKE_CASE_ ), '''score''': ANY(SCREAMING_SNAKE_CASE_ ), '''token''': ANY(SCREAMING_SNAKE_CASE_ ), '''token_str''': ANY(SCREAMING_SNAKE_CASE_ )},
{'''sequence''': ANY(SCREAMING_SNAKE_CASE_ ), '''score''': ANY(SCREAMING_SNAKE_CASE_ ), '''token''': ANY(SCREAMING_SNAKE_CASE_ ), '''token_str''': ANY(SCREAMING_SNAKE_CASE_ )},
] ,)
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ) ,nested_simplify(SCREAMING_SNAKE_CASE_ ) )
def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :Optional[Any] ,__lowercase :Dict ):
snake_case__ : Union[str, Any] = tokenizer.get_vocab()
snake_case__ : Optional[Any] = FillMaskPipeline(model=SCREAMING_SNAKE_CASE_ ,tokenizer=SCREAMING_SNAKE_CASE_ )
# top_k=2, ntargets=3
snake_case__ : Union[str, Any] = sorted(vocab.keys() )[:3]
snake_case__ : Optional[int] = fill_masker(F"""This is a {tokenizer.mask_token}""" ,top_k=2 ,targets=SCREAMING_SNAKE_CASE_ )
# If we use the most probably targets, and filter differently, we should still
# have the same results
snake_case__ : int = [el['''token_str'''] for el in sorted(SCREAMING_SNAKE_CASE_ ,key=lambda __lowercase : x["score"] ,reverse=SCREAMING_SNAKE_CASE_ )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(SCREAMING_SNAKE_CASE_ ).issubset(SCREAMING_SNAKE_CASE_ ):
snake_case__ : Optional[int] = fill_masker(F"""This is a {tokenizer.mask_token}""" ,top_k=3 ,targets=SCREAMING_SNAKE_CASE_ )
# They should yield exactly the same result
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE_ ) ,nested_simplify(SCREAMING_SNAKE_CASE_ ) )
def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :Optional[Any] ,__lowercase :List[Any] ):
snake_case__ : str = FillMaskPipeline(model=SCREAMING_SNAKE_CASE_ ,tokenizer=SCREAMING_SNAKE_CASE_ )
snake_case__ : Tuple = tokenizer.get_vocab()
# String duplicates + id duplicates
snake_case__ : List[Any] = sorted(vocab.keys() )[:3]
snake_case__ : str = [targets[0], targets[1], targets[0], targets[2], targets[1]]
snake_case__ : Optional[int] = fill_masker(F"""My name is {tokenizer.mask_token}""" ,targets=SCREAMING_SNAKE_CASE_ ,top_k=1_0 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) ,3 )
def __lowerCamelCase ( self :Dict ,__lowercase :int ,__lowercase :List[Any] ):
snake_case__ : str = FillMaskPipeline(model=SCREAMING_SNAKE_CASE_ ,tokenizer=SCREAMING_SNAKE_CASE_ )
snake_case__ : List[Any] = fill_masker(
F"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" ,top_k=2 )
self.assertEqual(
SCREAMING_SNAKE_CASE_ ,[
[
{'''sequence''': ANY(SCREAMING_SNAKE_CASE_ ), '''score''': ANY(SCREAMING_SNAKE_CASE_ ), '''token''': ANY(SCREAMING_SNAKE_CASE_ ), '''token_str''': ANY(SCREAMING_SNAKE_CASE_ )},
{'''sequence''': ANY(SCREAMING_SNAKE_CASE_ ), '''score''': ANY(SCREAMING_SNAKE_CASE_ ), '''token''': ANY(SCREAMING_SNAKE_CASE_ ), '''token_str''': ANY(SCREAMING_SNAKE_CASE_ )},
],
[
{'''sequence''': ANY(SCREAMING_SNAKE_CASE_ ), '''score''': ANY(SCREAMING_SNAKE_CASE_ ), '''token''': ANY(SCREAMING_SNAKE_CASE_ ), '''token_str''': ANY(SCREAMING_SNAKE_CASE_ )},
{'''sequence''': ANY(SCREAMING_SNAKE_CASE_ ), '''score''': ANY(SCREAMING_SNAKE_CASE_ ), '''token''': ANY(SCREAMING_SNAKE_CASE_ ), '''token_str''': ANY(SCREAMING_SNAKE_CASE_ )},
],
[
{'''sequence''': ANY(SCREAMING_SNAKE_CASE_ ), '''score''': ANY(SCREAMING_SNAKE_CASE_ ), '''token''': ANY(SCREAMING_SNAKE_CASE_ ), '''token_str''': ANY(SCREAMING_SNAKE_CASE_ )},
{'''sequence''': ANY(SCREAMING_SNAKE_CASE_ ), '''score''': ANY(SCREAMING_SNAKE_CASE_ ), '''token''': ANY(SCREAMING_SNAKE_CASE_ ), '''token_str''': ANY(SCREAMING_SNAKE_CASE_ )},
],
] ,)
| 252
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
lowerCAmelCase__ = logging.getLogger(__name__)
@dataclass
class snake_case :
UpperCAmelCase__ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCAmelCase__ = field(
default=__lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCAmelCase__ = field(
default=__lowercase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCAmelCase__ = field(
default=__lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCAmelCase__ = field(default=__lowercase , metadata={'''help''': '''Whether tp freeze the encoder.'''} )
UpperCAmelCase__ = field(default=__lowercase , metadata={'''help''': '''Whether to freeze the embeddings.'''} )
@dataclass
class snake_case :
UpperCAmelCase__ = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
UpperCAmelCase__ = field(
default='''summarization''' , metadata={'''help''': '''Task name, summarization (or summarization_{dataset} for pegasus) or translation'''} , )
UpperCAmelCase__ = field(
default=1_024 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCAmelCase__ = field(
default=128 , metadata={
'''help''': (
'''The maximum total sequence length for target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCAmelCase__ = field(
default=142 , metadata={
'''help''': (
'''The maximum total sequence length for validation target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded. '''
'''This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '''
'''during ``evaluate`` and ``predict``.'''
)
} , )
UpperCAmelCase__ = field(
default=142 , metadata={
'''help''': (
'''The maximum total sequence length for test target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCAmelCase__ = field(default=-1 , metadata={'''help''': '''# training examples. -1 means use all.'''} )
UpperCAmelCase__ = field(default=-1 , metadata={'''help''': '''# validation examples. -1 means use all.'''} )
UpperCAmelCase__ = field(default=-1 , metadata={'''help''': '''# test examples. -1 means use all.'''} )
UpperCAmelCase__ = field(default=__lowercase , metadata={'''help''': '''Source language id for translation.'''} )
UpperCAmelCase__ = field(default=__lowercase , metadata={'''help''': '''Target language id for translation.'''} )
UpperCAmelCase__ = field(default=__lowercase , metadata={'''help''': '''# num_beams to use for evaluation.'''} )
UpperCAmelCase__ = field(
default=__lowercase , metadata={'''help''': '''If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'''} , )
def _lowerCamelCase ( __a, __a, __a ):
logger.info(F'***** {split} metrics *****' )
for key in sorted(metrics.keys() ):
logger.info(F' {key} = {metrics[key]}' )
save_json(__a, os.path.join(__a, F'{split}_results.json' ) )
def _lowerCamelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE_ = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = parser.parse_args_into_dataclasses()
check_output_dir(__a )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''', training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ), training_args.fpaa, )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''', __a )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
SCREAMING_SNAKE_CASE_ = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(__a, __a, __a ):
assert hasattr(__a, __a ), F'({config.__class__.__name__}) doesn\'t have a `{p}` attribute'
setattr(__a, __a, getattr(__a, __a ) )
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
SCREAMING_SNAKE_CASE_ = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path, from_tf='''.ckpt''' in model_args.model_name_or_path, config=__a, cache_dir=model_args.cache_dir, )
# use task specific params
use_task_specific_params(__a, data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
SCREAMING_SNAKE_CASE_ = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(__a, (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(__a, __a ):
SCREAMING_SNAKE_CASE_ = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
SCREAMING_SNAKE_CASE_ = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(__a )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
SCREAMING_SNAKE_CASE_ = SeqaSeqDataset
# Get datasets
SCREAMING_SNAKE_CASE_ = (
dataset_class(
__a, type_path='''train''', data_dir=data_args.data_dir, n_obs=data_args.n_train, max_target_length=data_args.max_target_length, max_source_length=data_args.max_source_length, prefix=model.config.prefix or '''''', )
if training_args.do_train
else None
)
SCREAMING_SNAKE_CASE_ = (
dataset_class(
__a, type_path='''val''', data_dir=data_args.data_dir, n_obs=data_args.n_val, max_target_length=data_args.val_max_target_length, max_source_length=data_args.max_source_length, prefix=model.config.prefix or '''''', )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
SCREAMING_SNAKE_CASE_ = (
dataset_class(
__a, type_path='''test''', data_dir=data_args.data_dir, n_obs=data_args.n_test, max_target_length=data_args.test_max_target_length, max_source_length=data_args.max_source_length, prefix=model.config.prefix or '''''', )
if training_args.do_predict
else None
)
# Initialize our Trainer
SCREAMING_SNAKE_CASE_ = (
build_compute_metrics_fn(data_args.task, __a ) if training_args.predict_with_generate else None
)
SCREAMING_SNAKE_CASE_ = SeqaSeqTrainer(
model=__a, args=__a, data_args=__a, train_dataset=__a, eval_dataset=__a, data_collator=SeqaSeqDataCollator(
__a, __a, model.config.decoder_start_token_id, training_args.tpu_num_cores ), compute_metrics=__a, tokenizer=__a, )
SCREAMING_SNAKE_CASE_ = {}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
SCREAMING_SNAKE_CASE_ = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
SCREAMING_SNAKE_CASE_ = train_result.metrics
SCREAMING_SNAKE_CASE_ = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''', __a, training_args.output_dir )
all_metrics.update(__a )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir, '''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
SCREAMING_SNAKE_CASE_ = trainer.evaluate(metric_key_prefix='''val''' )
SCREAMING_SNAKE_CASE_ = data_args.n_val
SCREAMING_SNAKE_CASE_ = round(metrics['''val_loss'''], 4 )
if trainer.is_world_process_zero():
handle_metrics('''val''', __a, training_args.output_dir )
all_metrics.update(__a )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
SCREAMING_SNAKE_CASE_ = trainer.predict(test_dataset=__a, metric_key_prefix='''test''' )
SCREAMING_SNAKE_CASE_ = test_output.metrics
SCREAMING_SNAKE_CASE_ = data_args.n_test
if trainer.is_world_process_zero():
SCREAMING_SNAKE_CASE_ = round(metrics['''test_loss'''], 4 )
handle_metrics('''test''', __a, training_args.output_dir )
all_metrics.update(__a )
if training_args.predict_with_generate:
SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(
test_output.predictions, skip_special_tokens=__a, clean_up_tokenization_spaces=__a )
SCREAMING_SNAKE_CASE_ = lmap(str.strip, __a )
write_txt_file(__a, os.path.join(training_args.output_dir, '''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(__a, os.path.join(training_args.output_dir, '''all_results.json''' ) )
return all_metrics
def _lowerCamelCase ( __a ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 626
| 0
|
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
a_ = logging.get_logger(__name__)
class lowercase__ ( _UpperCAmelCase ):
def UpperCAmelCase ( self , __UpperCAmelCase )-> Dict:
'''simple docstring'''
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ = [label.strip() for label in labels.split("," ) if label.strip()]
return labels
def __call__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )-> Dict:
'''simple docstring'''
if len(__UpperCAmelCase ) == 0 or len(__UpperCAmelCase ) == 0:
raise ValueError("You must include at least one label and at least one sequence." )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. "
"Make sure the passed template includes formatting syntax such as {{}} where the label should go."
).format(__UpperCAmelCase ) )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ = [sequences]
lowerCAmelCase__ = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(__UpperCAmelCase )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(_UpperCAmelCase )
class lowercase__ ( _UpperCAmelCase ):
def __init__( self , __UpperCAmelCase=ZeroShotClassificationArgumentHandler() , *__UpperCAmelCase , **__UpperCAmelCase )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = args_parser
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
if self.entailment_id == -1:
logger.warning(
"Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to "
"-1. Define a descriptive label2id mapping in the model config to ensure correct outputs." )
@property
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("entail" ):
return ind
return -1
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=TruncationStrategy.ONLY_FIRST , **__UpperCAmelCase )-> Any:
'''simple docstring'''
lowerCAmelCase__ = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"Tokenizer was not supporting padding necessary for zero-shot, attempting to use "
" `pad_token=eos_token`" )
lowerCAmelCase__ = self.tokenizer.eos_token
try:
lowerCAmelCase__ = self.tokenizer(
__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , )
except Exception as e:
if "too short" in str(__UpperCAmelCase ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
lowerCAmelCase__ = self.tokenizer(
__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
if kwargs.get("multi_class" , __UpperCAmelCase ) is not None:
lowerCAmelCase__ = kwargs["multi_class"]
logger.warning(
"The `multi_class` argument has been deprecated and renamed to `multi_label`. "
"`multi_class` will be removed in a future version of Transformers." )
lowerCAmelCase__ = {}
if "candidate_labels" in kwargs:
lowerCAmelCase__ = self._args_parser._parse_labels(kwargs["candidate_labels"] )
if "hypothesis_template" in kwargs:
lowerCAmelCase__ = kwargs["hypothesis_template"]
lowerCAmelCase__ = {}
if "multi_label" in kwargs:
lowerCAmelCase__ = kwargs["multi_label"]
return preprocess_params, {}, postprocess_params
def __call__( self , __UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase , )-> List[str]:
'''simple docstring'''
if len(__UpperCAmelCase ) == 0:
pass
elif len(__UpperCAmelCase ) == 1 and "candidate_labels" not in kwargs:
lowerCAmelCase__ = args[0]
else:
raise ValueError(F"Unable to understand extra arguments {args}" )
return super().__call__(__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase="This example is {}." )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ = self._args_parser(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
for i, (candidate_label, sequence_pair) in enumerate(zip(__UpperCAmelCase , __UpperCAmelCase ) ):
lowerCAmelCase__ = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(__UpperCAmelCase ) - 1,
**model_input,
}
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = inputs["candidate_label"]
lowerCAmelCase__ = inputs["sequence"]
lowerCAmelCase__ = {k: inputs[k] for k in self.tokenizer.model_input_names}
lowerCAmelCase__ = self.model(**__UpperCAmelCase )
lowerCAmelCase__ = {
"candidate_label": candidate_label,
"sequence": sequence,
"is_last": inputs["is_last"],
**outputs,
}
return model_outputs
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=False )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = [outputs["candidate_label"] for outputs in model_outputs]
lowerCAmelCase__ = [outputs["sequence"] for outputs in model_outputs]
lowerCAmelCase__ = np.concatenate([output["logits"].numpy() for output in model_outputs] )
lowerCAmelCase__ = logits.shape[0]
lowerCAmelCase__ = len(__UpperCAmelCase )
lowerCAmelCase__ = N // n
lowerCAmelCase__ = logits.reshape((num_sequences, n, -1) )
if multi_label or len(__UpperCAmelCase ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
lowerCAmelCase__ = self.entailment_id
lowerCAmelCase__ = -1 if entailment_id == 0 else 0
lowerCAmelCase__ = reshaped_outputs[..., [contradiction_id, entailment_id]]
lowerCAmelCase__ = np.exp(__UpperCAmelCase ) / np.exp(__UpperCAmelCase ).sum(-1 , keepdims=__UpperCAmelCase )
lowerCAmelCase__ = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
lowerCAmelCase__ = reshaped_outputs[..., self.entailment_id]
lowerCAmelCase__ = np.exp(__UpperCAmelCase ) / np.exp(__UpperCAmelCase ).sum(-1 , keepdims=__UpperCAmelCase )
lowerCAmelCase__ = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 115
|
def _a ( UpperCamelCase_ : list , UpperCamelCase_ : list ) -> float:
"""simple docstring"""
_validate_point(UpperCamelCase_ )
_validate_point(UpperCamelCase_ )
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(UpperCamelCase_ , UpperCamelCase_ ) ) )
def _a ( UpperCamelCase_ : list[float] ) -> None:
"""simple docstring"""
if point:
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
for item in point:
if not isinstance(UpperCamelCase_ , (int, float) ):
lowerCAmelCase__ = (
"Expected a list of numbers as input, found "
F"{type(UpperCamelCase_ ).__name__}"
)
raise TypeError(UpperCamelCase_ )
else:
lowerCAmelCase__ = F"Expected a list of numbers as input, found {type(UpperCamelCase_ ).__name__}"
raise TypeError(UpperCamelCase_ )
else:
raise ValueError("Missing an input" )
def _a ( UpperCamelCase_ : list , UpperCamelCase_ : list ) -> float:
"""simple docstring"""
_validate_point(UpperCamelCase_ )
_validate_point(UpperCamelCase_ )
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(UpperCamelCase_ , UpperCamelCase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 115
| 1
|
"""simple docstring"""
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
__UpperCamelCase : Optional[Any] = '''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
__UpperCamelCase : str = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
__UpperCamelCase : Dict = dict(zip(vocab, range(len(vocab))))
__UpperCamelCase : Union[str, Any] = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase : Any = Path(tmpdirname)
__UpperCamelCase : Optional[int] = build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
__UpperCamelCase : Tuple = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
__UpperCamelCase : List[Any] = build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
__UpperCamelCase : Dict = FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
__UpperCamelCase : List[Any] = FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=1000,
tgt_vocab_size=1000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
__UpperCamelCase : Optional[int] = FSMTForConditionalGeneration(config)
print(f'''num of params {tiny_model.num_parameters()}''')
# Test
__UpperCamelCase : Union[str, Any] = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
__UpperCamelCase : Dict = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 4
|
"""simple docstring"""
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__UpperCamelCase : str = logging.getLogger(__name__)
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = argparse.ArgumentParser(
description='Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).' )
parser.add_argument('--file_path' , type=_UpperCAmelCase , default='data/dump.txt' , help='The path to the data.' )
parser.add_argument('--tokenizer_type' , type=_UpperCAmelCase , default='bert' , choices=['bert', 'roberta', 'gpt2'] )
parser.add_argument('--tokenizer_name' , type=_UpperCAmelCase , default='bert-base-uncased' , help='The tokenizer to use.' )
parser.add_argument('--dump_file' , type=_UpperCAmelCase , default='data/dump' , help='The dump file prefix.' )
lowerCAmelCase = parser.parse_args()
logger.info(F'Loading Tokenizer ({args.tokenizer_name})' )
if args.tokenizer_type == "bert":
lowerCAmelCase = BertTokenizer.from_pretrained(args.tokenizer_name )
lowerCAmelCase = tokenizer.special_tokens_map['cls_token'] # `[CLS]`
lowerCAmelCase = tokenizer.special_tokens_map['sep_token'] # `[SEP]`
elif args.tokenizer_type == "roberta":
lowerCAmelCase = RobertaTokenizer.from_pretrained(args.tokenizer_name )
lowerCAmelCase = tokenizer.special_tokens_map['cls_token'] # `<s>`
lowerCAmelCase = tokenizer.special_tokens_map['sep_token'] # `</s>`
elif args.tokenizer_type == "gpt2":
lowerCAmelCase = GPTaTokenizer.from_pretrained(args.tokenizer_name )
lowerCAmelCase = tokenizer.special_tokens_map['bos_token'] # `<|endoftext|>`
lowerCAmelCase = tokenizer.special_tokens_map['eos_token'] # `<|endoftext|>`
logger.info(F'Loading text from {args.file_path}' )
with open(args.file_path , 'r' , encoding='utf8' ) as fp:
lowerCAmelCase = fp.readlines()
logger.info('Start encoding' )
logger.info(F'{len(_UpperCAmelCase )} examples to process.' )
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = 1_0000
lowerCAmelCase = time.time()
for text in data:
lowerCAmelCase = F'{bos} {text.strip()} {sep}'
lowerCAmelCase = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
rslt.append(_UpperCAmelCase )
iter += 1
if iter % interval == 0:
lowerCAmelCase = time.time()
logger.info(F'{iter} examples processed. - {(end-start):.2f}s/{interval}expl' )
lowerCAmelCase = time.time()
logger.info('Finished binarization' )
logger.info(F'{len(_UpperCAmelCase )} examples processed.' )
lowerCAmelCase = F'{args.dump_file}.{args.tokenizer_name}.pickle'
lowerCAmelCase = tokenizer.vocab_size
if vocab_size < (1 << 16):
lowerCAmelCase = [np.uintaa(_UpperCAmelCase ) for d in rslt]
else:
lowerCAmelCase = [np.intaa(_UpperCAmelCase ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F'Dump to {dp_file}' )
with open(_UpperCAmelCase , 'wb' ) as handle:
pickle.dump(rslt_ , _UpperCAmelCase , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 4
| 1
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
lowerCAmelCase_ = logging.get_logger(__name__)
class snake_case_ ( A__ ):
"""simple docstring"""
def __init__( self , *UpperCamelCase , **UpperCamelCase):
warnings.warn(
"The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use ImageGPTImageProcessor instead." , UpperCamelCase , )
super().__init__(*UpperCamelCase , **UpperCamelCase)
| 721
|
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class snake_case_ :
"""simple docstring"""
__lowerCAmelCase : int
__lowerCAmelCase : Node | None =None
__lowerCAmelCase : Node | None =None
def lowerCAmelCase( ):
'''simple docstring'''
lowerCamelCase__ = Node(1 )
lowerCamelCase__ = Node(2 )
lowerCamelCase__ = Node(3 )
lowerCamelCase__ = Node(4 )
lowerCamelCase__ = Node(5 )
return tree
def lowerCAmelCase( a__ : Node | None ):
'''simple docstring'''
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def lowerCAmelCase( a__ : Node | None ):
'''simple docstring'''
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def lowerCAmelCase( a__ : Node | None ):
'''simple docstring'''
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def lowerCAmelCase( a__ : Node | None ):
'''simple docstring'''
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def lowerCAmelCase( a__ : Node | None ):
'''simple docstring'''
lowerCamelCase__ = []
if root is None:
return output
lowerCamelCase__ = deque([root] )
while process_queue:
lowerCamelCase__ = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def lowerCAmelCase( a__ : Node | None , a__ : int ):
'''simple docstring'''
lowerCamelCase__ = []
def populate_output(a__ : Node | None , a__ : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(a__ , a__ )
return output
def lowerCAmelCase( a__ : Node | None , a__ : int ):
'''simple docstring'''
lowerCamelCase__ = []
def populate_output(a__ : Node | None , a__ : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(a__ , a__ )
return output
def lowerCAmelCase( a__ : Node | None ):
'''simple docstring'''
if root is None:
return []
lowerCamelCase__ = []
lowerCamelCase__ = 0
lowerCamelCase__ = height(a__ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(a__ , a__ ) )
lowerCamelCase__ = 1
else:
output.append(get_nodes_from_right_to_left(a__ , a__ ) )
lowerCamelCase__ = 0
return output
def lowerCAmelCase( ): # Main function for testing.
'''simple docstring'''
lowerCamelCase__ = make_tree()
print(f"""In-order Traversal: {inorder(a__ )}""" )
print(f"""Pre-order Traversal: {preorder(a__ )}""" )
print(f"""Post-order Traversal: {postorder(a__ )}""" , "\n" )
print(f"""Height of Tree: {height(a__ )}""" , "\n" )
print("Complete Level Order Traversal: " )
print(level_order(a__ ) , "\n" )
print("Level-wise order Traversal: " )
for level in range(1 , height(a__ ) + 1 ):
print(f"""Level {level}:""" , get_nodes_from_left_to_right(a__ , level=a__ ) )
print("\nZigZag order Traversal: " )
print(zigzag(a__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 426
| 0
|
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( a_: list[int] ):
_UpperCAmelCase : Dict = len(a_ ) // 2
# choose the middle 3 elements
_UpperCAmelCase : List[Any] = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 494
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Dict = '''gpt_bigcode'''
UpperCamelCase_ : Optional[Any] = ['''past_key_values''']
UpperCamelCase_ : int = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Optional[int] , lowerCAmelCase__ : Union[str, Any]=5_0_2_5_7 , lowerCAmelCase__ : Optional[Any]=1_0_2_4 , lowerCAmelCase__ : int=7_6_8 , lowerCAmelCase__ : Any=1_2 , lowerCAmelCase__ : Any=1_2 , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : List[Any]="gelu_pytorch_tanh" , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : Optional[Any]=0.1 , lowerCAmelCase__ : Tuple=1e-5 , lowerCAmelCase__ : Dict=0.02 , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : Union[str, Any]=5_0_2_5_6 , lowerCAmelCase__ : List[str]=5_0_2_5_6 , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : List[Any]=True , lowerCAmelCase__ : Tuple=True , **lowerCAmelCase__ : Any , ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Tuple = vocab_size
_UpperCAmelCase : int = n_positions
_UpperCAmelCase : int = n_embd
_UpperCAmelCase : List[Any] = n_layer
_UpperCAmelCase : Any = n_head
_UpperCAmelCase : int = n_inner
_UpperCAmelCase : Optional[Any] = activation_function
_UpperCAmelCase : Optional[int] = resid_pdrop
_UpperCAmelCase : Dict = embd_pdrop
_UpperCAmelCase : Optional[int] = attn_pdrop
_UpperCAmelCase : int = layer_norm_epsilon
_UpperCAmelCase : Tuple = initializer_range
_UpperCAmelCase : Dict = scale_attn_weights
_UpperCAmelCase : Any = use_cache
_UpperCAmelCase : Dict = attention_softmax_in_fpaa
_UpperCAmelCase : str = scale_attention_softmax_in_fpaa
_UpperCAmelCase : List[str] = multi_query
_UpperCAmelCase : Any = bos_token_id
_UpperCAmelCase : str = eos_token_id
super().__init__(bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
| 494
| 1
|
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class __snake_case ( unittest.TestCase ):
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = inspect.getfile(accelerate.test_utils)
SCREAMING_SNAKE_CASE_ = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ['scripts', 'test_script.py'])
SCREAMING_SNAKE_CASE_ = os.path.sep.join(inspect.getfile(self.__class__).split(os.path.sep)[:-1])
@require_tpu
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = f"""\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n """.split()
SCREAMING_SNAKE_CASE_ = [sys.executable] + distributed_args
execute_subprocess_async(_A , env=os.environ.copy())
| 708
|
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
UpperCamelCase__ : int = Lock()
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(_SCREAMING_SNAKE_CASE )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
SCREAMING_SNAKE_CASE_ = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
SCREAMING_SNAKE_CASE_ = min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(_SCREAMING_SNAKE_CASE )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
SCREAMING_SNAKE_CASE_ = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
SCREAMING_SNAKE_CASE_ = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# after all swaps are performed, send the values back to main
result_pipe[1].send(_SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
SCREAMING_SNAKE_CASE_ = Pipe()
SCREAMING_SNAKE_CASE_ = Pipe()
process_array_.append(
Process(
target=_SCREAMING_SNAKE_CASE , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
SCREAMING_SNAKE_CASE_ = temp_rs
SCREAMING_SNAKE_CASE_ = temp_rr
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) - 1 ):
SCREAMING_SNAKE_CASE_ = Pipe()
SCREAMING_SNAKE_CASE_ = Pipe()
process_array_.append(
Process(
target=_SCREAMING_SNAKE_CASE , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
SCREAMING_SNAKE_CASE_ = temp_rs
SCREAMING_SNAKE_CASE_ = temp_rr
process_array_.append(
Process(
target=_SCREAMING_SNAKE_CASE , args=(
len(_SCREAMING_SNAKE_CASE ) - 1,
arr[len(_SCREAMING_SNAKE_CASE ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(_SCREAMING_SNAKE_CASE ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(_SCREAMING_SNAKE_CASE ) ):
SCREAMING_SNAKE_CASE_ = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def _UpperCAmelCase ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = odd_even_transposition(_SCREAMING_SNAKE_CASE )
print('Sorted List\n' )
print(*_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 620
| 0
|
import re
from filelock import FileLock
try:
import nltk
__A = True
except (ImportError, ModuleNotFoundError):
__A = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def lowercase__ ( A_: str ) -> str:
"""simple docstring"""
re.sub("""<n>""" , """""" , A_ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(A_ ) )
| 68
|
'''simple docstring'''
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def A ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict , UpperCamelCase_ : int ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = BigBirdConfig.from_json_file(UpperCamelCase_ )
print(F"""Building PyTorch model from configuration: {config}""" )
if is_trivia_qa:
lowerCAmelCase__ = BigBirdForQuestionAnswering(UpperCamelCase_ )
else:
lowerCAmelCase__ = BigBirdForPreTraining(UpperCamelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(UpperCamelCase_ , UpperCamelCase_ , is_trivia_qa=UpperCamelCase_ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--big_bird_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_trivia_qa", action="store_true", help="Whether to convert a model with a trivia_qa head."
)
UpperCAmelCase__ : int = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 48
| 0
|
import math
import sys
import cva
import numpy as np
def _lowerCamelCase ( _a , _a ):
"""simple docstring"""
_lowerCamelCase = math.sqrt(_A )
_lowerCamelCase = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def _lowerCamelCase ( _a , _a , _a , _a ):
"""simple docstring"""
_lowerCamelCase = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def _lowerCamelCase ( _a , _a ):
"""simple docstring"""
_lowerCamelCase = np.zeros((kernel_size, kernel_size) )
for i in range(0 , _A ):
for j in range(0 , _A ):
_lowerCamelCase = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(_A , _A )
def _lowerCamelCase ( _a , _a , _a , _a , ):
"""simple docstring"""
_lowerCamelCase = np.zeros(img.shape )
_lowerCamelCase = get_gauss_kernel(_A , _A )
_lowerCamelCase , _lowerCamelCase = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
_lowerCamelCase = get_slice(_A , _A , _A , _A )
_lowerCamelCase = img_s - img_s[kernel_size // 2, kernel_size // 2]
_lowerCamelCase = vec_gaussian(_A , _A )
_lowerCamelCase = np.multiply(_A , _A )
_lowerCamelCase = np.multiply(_A , _A )
_lowerCamelCase = np.sum(_A ) / np.sum(_A )
_lowerCamelCase = val
return imga
def _lowerCamelCase ( _a ):
"""simple docstring"""
_lowerCamelCase = args[1] if args[1:] else '''../image_data/lena.jpg'''
_lowerCamelCase = float(args[2] ) if args[2:] else 1.0
_lowerCamelCase = float(args[3] ) if args[3:] else 1.0
if args[4:]:
_lowerCamelCase = int(args[4] )
_lowerCamelCase = kernel_size + abs(kernel_size % 2 - 1 )
else:
_lowerCamelCase = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
_UpperCAmelCase = parse_args(sys.argv)
_UpperCAmelCase = cva.imread(filename, 0)
cva.imshow("input image", img)
_UpperCAmelCase = img / 255
_UpperCAmelCase = out.astype("float32")
_UpperCAmelCase = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
_UpperCAmelCase = out * 255
_UpperCAmelCase = np.uinta(out)
cva.imshow("output image", out)
cva.waitKey(0)
cva.destroyAllWindows()
| 719
|
def _lowerCamelCase ( _a ):
"""simple docstring"""
if bit_count < 0:
raise ValueError('''The given input must be positive''' )
# get the generated string sequence
_lowerCamelCase = gray_code_sequence_string(_a )
#
# convert them to integers
for i in range(len(_a ) ):
_lowerCamelCase = int(sequence[i] , 2 )
return sequence
def _lowerCamelCase ( _a ):
"""simple docstring"""
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
_lowerCamelCase = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
_lowerCamelCase = gray_code_sequence_string(bit_count - 1 )
_lowerCamelCase = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
_lowerCamelCase = '''0''' + smaller_sequence[i]
sequence.append(_a )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
_lowerCamelCase = '''1''' + smaller_sequence[i]
sequence.append(_a )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 297
| 0
|
from __future__ import annotations
import os
from typing import Any
import requests
UpperCAmelCase = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
UpperCAmelCase = BASE_URL + '''/user'''
# https://github.com/settings/tokens
UpperCAmelCase = os.environ.get('''USER_TOKEN''', '''''')
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = {
'Authorization': F'''token {auth_token}''',
'Accept': 'application/vnd.github.v3+json',
}
return requests.get(__SCREAMING_SNAKE_CASE , headers=__SCREAMING_SNAKE_CASE ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F"""{key}: {value}""")
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
| 84
|
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : Optional[int]=8 , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Optional[Any]=9_9 , lowerCAmelCase_ : List[Any]=1_6 , lowerCAmelCase_ : int=5 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : str=3_6 , lowerCAmelCase_ : Optional[int]="gelu" , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Optional[int]=0.0 , lowerCAmelCase_ : str=5_1_2 , lowerCAmelCase_ : List[str]=1_6 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : Tuple=0.02 , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : List[Any]=4 , lowerCAmelCase_ : List[str]=None , ) -> List[Any]:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_input_mask
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = num_labels
__lowerCAmelCase = num_choices
__lowerCAmelCase = scope
def lowercase ( self : Optional[int] ) -> Dict:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase = None
if self.use_token_type_ids:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self : Any ) -> Union[str, Any]:
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
def lowercase ( self : Dict ) -> List[Any]:
__lowerCAmelCase = self.get_config()
__lowerCAmelCase = 3_0_0
return config
def lowercase ( self : Optional[int] ) -> Union[str, Any]:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = self.prepare_config_and_inputs()
__lowerCAmelCase = True
__lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowercase ( self : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple ) -> List[str]:
__lowerCAmelCase = MraModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , ) -> Tuple:
__lowerCAmelCase = True
__lowerCAmelCase = MraModel(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )
__lowerCAmelCase = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , )
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple ) -> List[str]:
__lowerCAmelCase = MraForMaskedLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict ) -> str:
__lowerCAmelCase = MraForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase ( self : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict ) -> Optional[Any]:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = MraForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict ) -> Any:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = MraForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase ( self : int , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] ) -> List[Any]:
__lowerCAmelCase = self.num_choices
__lowerCAmelCase = MraForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase ( self : Tuple ) -> Optional[Any]:
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
a_ = False
a_ = False
a_ = False
a_ = False
a_ = ()
def lowercase ( self : List[Any] ) -> Optional[Any]:
__lowerCAmelCase = MraModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=3_7 )
def lowercase ( self : Tuple ) -> List[str]:
self.config_tester.run_common_tests()
def lowercase ( self : Optional[int] ) -> Tuple:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowercase ( self : int ) -> Union[str, Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCAmelCase = type
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowercase ( self : Any ) -> Union[str, Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase_ )
def lowercase ( self : List[str] ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase_ )
def lowercase ( self : Dict ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase_ )
def lowercase ( self : Dict ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase_ )
def lowercase ( self : Tuple ) -> str:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase_ )
@slow
def lowercase ( self : Optional[int] ) -> Optional[int]:
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = MraModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@unittest.skip(reason='MRA does not output attentions' )
def lowercase ( self : Optional[int] ) -> Tuple:
return
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase ( self : Optional[Any] ) -> List[str]:
__lowerCAmelCase = MraModel.from_pretrained('uw-madison/mra-base-512-4' )
__lowerCAmelCase = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
__lowerCAmelCase = model(lowerCAmelCase_ )[0]
__lowerCAmelCase = torch.Size((1, 2_5_6, 7_6_8) )
self.assertEqual(output.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor(
[[[-0.01_40, 0.08_30, -0.03_81], [0.15_46, 0.14_02, 0.02_20], [0.11_62, 0.08_51, 0.01_65]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
@slow
def lowercase ( self : int ) -> Optional[int]:
__lowerCAmelCase = MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4' )
__lowerCAmelCase = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
__lowerCAmelCase = model(lowerCAmelCase_ )[0]
__lowerCAmelCase = 5_0_2_6_5
__lowerCAmelCase = torch.Size((1, 2_5_6, vocab_size) )
self.assertEqual(output.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor(
[[[9.25_95, -3.60_38, 11.88_19], [9.38_69, -3.26_93, 11.09_56], [11.85_24, -3.49_38, 13.12_10]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
@slow
def lowercase ( self : Any ) -> List[str]:
__lowerCAmelCase = MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3' )
__lowerCAmelCase = torch.arange(4_0_9_6 ).unsqueeze(0 )
with torch.no_grad():
__lowerCAmelCase = model(lowerCAmelCase_ )[0]
__lowerCAmelCase = 5_0_2_6_5
__lowerCAmelCase = torch.Size((1, 4_0_9_6, vocab_size) )
self.assertEqual(output.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor(
[[[5.47_89, -2.35_64, 7.50_64], [7.90_67, -1.33_69, 9.96_68], [9.07_12, -1.81_06, 7.03_80]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 53
| 0
|
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Any = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_))
def __UpperCamelCase ( self : Union[str, Any]):
UpperCamelCase__ : Optional[Any] = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_))
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : int = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(UpperCAmelCase_))
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : Dict = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_))
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : int = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
# Removed: 'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(UpperCAmelCase_))
def __UpperCamelCase ( self : str):
UpperCamelCase__ : str = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
UpperCamelCase__ : Union[str, Any] = 'fp16'
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_ , variant=UpperCAmelCase_))
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Tuple = [
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
UpperCamelCase__ : Any = 'fp16'
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_ , variant=UpperCAmelCase_))
def __UpperCamelCase ( self : int):
# pass variant but use the non-variant filenames
UpperCamelCase__ : Any = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
UpperCamelCase__ : Tuple = 'fp16'
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_ , variant=UpperCAmelCase_))
def __UpperCamelCase ( self : Union[str, Any]):
UpperCamelCase__ : Tuple = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
UpperCamelCase__ : Any = 'fp16'
self.assertFalse(is_safetensors_compatible(UpperCAmelCase_ , variant=UpperCAmelCase_))
def __UpperCamelCase ( self : str):
UpperCamelCase__ : Union[str, Any] = [
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
]
UpperCamelCase__ : Optional[int] = 'fp16'
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_ , variant=UpperCAmelCase_))
def __UpperCamelCase ( self : List[Any]):
# pass variant but use the non-variant filenames
UpperCamelCase__ : Union[str, Any] = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
UpperCamelCase__ : Any = 'fp16'
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_ , variant=UpperCAmelCase_))
def __UpperCamelCase ( self : Union[str, Any]):
UpperCamelCase__ : Optional[int] = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
# 'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
UpperCamelCase__ : Any = 'fp16'
self.assertFalse(is_safetensors_compatible(UpperCAmelCase_ , variant=UpperCAmelCase_))
| 709
|
'''simple docstring'''
import numpy as np
from PIL import Image
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> np.ndarray:
UpperCamelCase__ : List[Any] = np.array(lowerCamelCase_)
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix')
UpperCamelCase__ : Tuple = 0
UpperCamelCase__ : int = 0
UpperCamelCase__ : Optional[int] = 0
UpperCamelCase__ : str = 0
# compute the shape of the output matrix
UpperCamelCase__ : int = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
UpperCamelCase__ : Dict = np.zeros((maxpool_shape, maxpool_shape))
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
UpperCamelCase__ : Dict = np.max(arr[i : i + size, j : j + size])
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
UpperCamelCase__ : List[Any] = 0
UpperCamelCase__ : Optional[int] = 0
return updated_arr
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> np.ndarray:
UpperCamelCase__ : Tuple = np.array(lowerCamelCase_)
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix')
UpperCamelCase__ : Optional[int] = 0
UpperCamelCase__ : int = 0
UpperCamelCase__ : List[str] = 0
UpperCamelCase__ : List[Any] = 0
# compute the shape of the output matrix
UpperCamelCase__ : str = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
UpperCamelCase__ : Union[str, Any] = np.zeros((avgpool_shape, avgpool_shape))
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
UpperCamelCase__ : List[Any] = int(np.average(arr[i : i + size, j : j + size]))
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
UpperCamelCase__ : Union[str, Any] = 0
UpperCamelCase__ : Optional[Any] = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='avgpooling', verbose=True)
# Loading the image
lowerCAmelCase__ = Image.open('path_to_image')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 6
| 0
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class UpperCAmelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = StableDiffusionPanoramaPipeline
__UpperCAmelCase : List[Any] = TEXT_TO_IMAGE_PARAMS
__UpperCAmelCase : Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS
__UpperCAmelCase : Any = TEXT_TO_IMAGE_IMAGE_PARAMS
__UpperCAmelCase : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
def __lowercase ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_a : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=1 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,)
_a : Optional[int] = DDIMScheduler()
torch.manual_seed(0 )
_a : int = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,)
torch.manual_seed(0 )
_a : List[str] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
_a : Dict = CLIPTextModel(_a )
_a : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_a : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __lowercase ( self : Tuple ,_a : Optional[int] ,_a : Tuple=0 ):
'''simple docstring'''
_a : Any = torch.manual_seed(_a )
_a : Dict = {
'prompt': 'a photo of the dolomites',
'generator': generator,
# Setting height and width to None to prevent OOMs on CPU.
'height': None,
'width': None,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
_a : List[Any] = self.get_dummy_components()
_a : Union[str, Any] = StableDiffusionPanoramaPipeline(**_a )
_a : List[Any] = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
_a : List[Any] = self.get_dummy_inputs(_a )
_a : Any = sd_pipe(**_a ).images
_a : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a : Any = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowercase ( self : List[Any] ):
'''simple docstring'''
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
super().test_inference_batch_single_identical(batch_size=2 ,expected_max_diff=3.25E-3 )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_a : Union[str, Any] = self.get_dummy_components()
_a : List[Any] = StableDiffusionPanoramaPipeline(**_a )
_a : List[str] = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
_a : Dict = self.get_dummy_inputs(_a )
_a : List[Any] = 'french fries'
_a : Optional[Any] = sd_pipe(**_a ,negative_prompt=_a )
_a : Optional[int] = output.images
_a : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a : Dict = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_a : int = self.get_dummy_components()
_a : List[Any] = StableDiffusionPanoramaPipeline(**_a )
_a : str = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
_a : int = self.get_dummy_inputs(_a )
_a : Any = sd_pipe(**_a ,view_batch_size=2 )
_a : Optional[Any] = output.images
_a : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a : Union[str, Any] = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_a : Optional[int] = self.get_dummy_components()
_a : int = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 ,beta_end=0.012 ,beta_schedule='scaled_linear' )
_a : int = StableDiffusionPanoramaPipeline(**_a )
_a : Optional[int] = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
_a : Any = self.get_dummy_inputs(_a )
_a : Dict = sd_pipe(**_a ).images
_a : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a : Tuple = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
_a : int = self.get_dummy_components()
_a : List[Any] = PNDMScheduler(
beta_start=0.0_0085 ,beta_end=0.012 ,beta_schedule='scaled_linear' ,skip_prk_steps=_a )
_a : List[Any] = StableDiffusionPanoramaPipeline(**_a )
_a : List[Any] = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
_a : int = self.get_dummy_inputs(_a )
_a : str = sd_pipe(**_a ).images
_a : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a : str = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : Tuple ,_a : str=0 ):
'''simple docstring'''
_a : str = torch.manual_seed(_a )
_a : List[Any] = {
'prompt': 'a photo of the dolomites',
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : Union[str, Any] = 'stabilityai/stable-diffusion-2-base'
_a : Optional[int] = DDIMScheduler.from_pretrained(_a ,subfolder='scheduler' )
_a : int = StableDiffusionPanoramaPipeline.from_pretrained(_a ,scheduler=_a ,safety_checker=_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
_a : str = self.get_inputs()
_a : Optional[int] = pipe(**_a ).images
_a : str = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
_a : str = np.array(
[
0.3696_8392,
0.2702_5372,
0.3244_6766,
0.2837_9387,
0.3636_3274,
0.3073_3347,
0.2710_0027,
0.2705_4125,
0.2553_6096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-2
def __lowercase ( self : int ):
'''simple docstring'''
_a : int = StableDiffusionPanoramaPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-base' ,safety_checker=_a )
_a : List[str] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
_a : Union[str, Any] = self.get_inputs()
_a : Any = pipe(**_a ).images
_a : str = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
_a : Any = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Tuple = 0
def callback_fn(_a : int ,_a : int ,_a : torch.FloatTensor ) -> None:
_a : List[str] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
_a : Tuple = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
_a : List[str] = latents[0, -3:, -3:, -1]
_a : Dict = np.array(
[
0.1868_1869,
0.3390_7816,
0.536_1276,
0.1443_2865,
-0.0285_6611,
-0.7394_1123,
0.2339_7987,
0.4732_2682,
-0.3782_3164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
_a : int = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
_a : Optional[Any] = latents[0, -3:, -3:, -1]
_a : Dict = np.array(
[
0.1853_9645,
0.3398_7248,
0.537_8559,
0.1443_7142,
-0.0245_5261,
-0.733_8317,
0.2399_0755,
0.4735_6272,
-0.378_6505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
_a : Union[str, Any] = False
_a : Optional[Any] = 'stabilityai/stable-diffusion-2-base'
_a : List[Any] = DDIMScheduler.from_pretrained(_a ,subfolder='scheduler' )
_a : str = StableDiffusionPanoramaPipeline.from_pretrained(_a ,scheduler=_a ,safety_checker=_a )
_a : Union[str, Any] = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
_a : Optional[Any] = self.get_inputs()
pipe(**_a ,callback=_a ,callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_a : Tuple = 'stabilityai/stable-diffusion-2-base'
_a : str = DDIMScheduler.from_pretrained(_a ,subfolder='scheduler' )
_a : str = StableDiffusionPanoramaPipeline.from_pretrained(_a ,scheduler=_a ,safety_checker=_a )
_a : int = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_a : int = self.get_inputs()
_a : List[Any] = pipe(**_a )
_a : List[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 229
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Any = '''gpt_neo'''
__UpperCAmelCase : Optional[int] = ['''past_key_values''']
__UpperCAmelCase : Optional[int] = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : Optional[Any] ,_a : Optional[int]=5_0257 ,_a : Tuple=2048 ,_a : Optional[int]=2048 ,_a : Any=24 ,_a : Tuple=[[["global", "local"], 12]] ,_a : Union[str, Any]=16 ,_a : List[Any]=None ,_a : Optional[int]=256 ,_a : Optional[Any]="gelu_new" ,_a : List[Any]=0.0 ,_a : Optional[int]=0.0 ,_a : List[Any]=0.0 ,_a : Union[str, Any]=0.1 ,_a : Optional[Any]=1E-5 ,_a : Optional[Any]=0.02 ,_a : str=True ,_a : Any=5_0256 ,_a : Tuple=5_0256 ,**_a : List[str] ,):
'''simple docstring'''
_a : Dict = vocab_size
_a : Union[str, Any] = max_position_embeddings
_a : List[str] = hidden_size
_a : Optional[Any] = num_layers
_a : Optional[Any] = num_heads
_a : Dict = intermediate_size
_a : Any = window_size
_a : List[str] = activation_function
_a : int = resid_dropout
_a : Tuple = embed_dropout
_a : int = attention_dropout
_a : Dict = classifier_dropout
_a : Tuple = layer_norm_epsilon
_a : List[str] = initializer_range
_a : str = use_cache
_a : List[str] = bos_token_id
_a : Optional[Any] = eos_token_id
_a : Tuple = attention_types
_a : Union[str, Any] = self.expand_attention_types_params(_a )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.attention_layers)` == `config.num_layers` '
F"""but is `len(config.attention_layers) = {len(self.attention_layers )}`, """
F"""`config.num_layers = {self.num_layers}`. """
'`config.attention_layers` is prepared using `config.attention_types`. '
'Please verify the value of `config.attention_types` argument.' )
super().__init__(bos_token_id=_a ,eos_token_id=_a ,**_a )
@staticmethod
def __lowercase ( _a : Dict ):
'''simple docstring'''
_a : Dict = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def UpperCAmelCase_ (__a : str , __a : Optional[int] , __a : Tuple , __a : Dict ):
"""simple docstring"""
import torch
_a : Tuple = input.size()
_a : Union[str, Any] = len(__a )
_a : Union[str, Any] = shape[dimension]
_a : str = torch.arange(0 , __a , __a )
_a : Optional[Any] = torch.div(sizedim - size , __a , rounding_mode='floor' ) + 1
_a : str = torch.arange(__a ) + low_indices[:min_length][:, None]
_a : Optional[Any] = [slice(__a )] * rank
_a : Dict = indices
_a : List[str] = input[s]
_a : Optional[int] = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(__a )
def UpperCAmelCase_ (__a : str , __a : Optional[int] ):
"""simple docstring"""
import torch
_a : List[str] = torch.arange(1 , __a )
_a : int = torch.remainder(__a , __a )
_a : Tuple = remainders == 0
_a : Optional[Any] = candidates[divisor_indices]
_a : List[Any] = torch.max(__a )
return largest_divisor, torch.div(__a , __a , rounding_mode='floor' )
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
@property
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Optional[Any] = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(_a ,direction='inputs' )
_a : Optional[int] = {0: 'batch', 1: 'past_sequence + sequence'}
else:
_a : List[str] = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def __lowercase ( self : List[str] ):
'''simple docstring'''
return self._config.num_heads
def __lowercase ( self : Any ,_a : PreTrainedTokenizer ,_a : int = -1 ,_a : int = -1 ,_a : bool = False ,_a : Optional[TensorType] = None ,):
'''simple docstring'''
_a : Dict = super(_a ,self ).generate_dummy_inputs(
_a ,batch_size=_a ,seq_length=_a ,is_pair=_a ,framework=_a )
# We need to order the input in the way they appears in the forward()
_a : Union[str, Any] = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_a, _a : Dict = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_a : Any = seqlen + 2
_a : str = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_a : Tuple = [
(torch.zeros(_a ), torch.zeros(_a )) for _ in range(self.num_layers )
]
_a : List[str] = common_inputs['attention_mask']
if self.use_past:
_a : Optional[int] = ordered_inputs['attention_mask'].dtype
_a : Optional[Any] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(_a ,_a ,dtype=_a )] ,dim=1 )
return ordered_inputs
@property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return 13
| 229
| 1
|
from collections import deque
from .hash_table import HashTable
class UpperCamelCase_ ( _lowerCamelCase ):
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Any:
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]:
_snake_case = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(lowerCAmelCase_ )
_snake_case = self.values[key]
def lowerCAmelCase ( self ) -> Optional[int]:
return (
sum(self.charge_factor - len(lowerCAmelCase_ ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None ) -> str:
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(lowerCAmelCase_ ) == 0
):
return key
return super()._collision_resolution(lowerCAmelCase_ , lowerCAmelCase_ )
| 714
|
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
UpperCAmelCase_ = logging.get_logger(__name__)
def lowerCamelCase__ ( UpperCamelCase__ : Tuple ) -> List[List[ImageInput]]:
'''simple docstring'''
if isinstance(UpperCamelCase__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(UpperCamelCase__ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(UpperCamelCase__ ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = ['''pixel_values''']
def __init__( self , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = PILImageResampling.BILINEAR , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = True , lowerCAmelCase_ = 1 / 255 , lowerCAmelCase_ = True , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> None:
super().__init__(**lowerCAmelCase_ )
_snake_case = size if size is not None else {'shortest_edge': 256}
_snake_case = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
_snake_case = crop_size if crop_size is not None else {'height': 224, 'width': 224}
_snake_case = get_size_dict(lowerCAmelCase_ , param_name='crop_size' )
_snake_case = do_resize
_snake_case = size
_snake_case = do_center_crop
_snake_case = crop_size
_snake_case = resample
_snake_case = do_rescale
_snake_case = rescale_factor
_snake_case = offset
_snake_case = do_normalize
_snake_case = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_snake_case = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = PILImageResampling.BILINEAR , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> np.ndarray:
_snake_case = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
if "shortest_edge" in size:
_snake_case = get_resize_output_image_size(lowerCAmelCase_ , size['shortest_edge'] , default_to_square=lowerCAmelCase_ )
elif "height" in size and "width" in size:
_snake_case = (size['height'], size['width'])
else:
raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> np.ndarray:
_snake_case = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(lowerCAmelCase_ , size=(size['height'], size['width']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = True , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> int:
_snake_case = image.astype(np.floataa )
if offset:
_snake_case = image - (scale / 2)
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> np.ndarray:
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
if offset and not do_rescale:
raise ValueError('For offset, do_rescale must also be set to True.' )
# All transformations expect numpy arrays.
_snake_case = to_numpy_array(lowerCAmelCase_ )
if do_resize:
_snake_case = self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ )
if do_center_crop:
_snake_case = self.center_crop(lowerCAmelCase_ , size=lowerCAmelCase_ )
if do_rescale:
_snake_case = self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ , offset=lowerCAmelCase_ )
if do_normalize:
_snake_case = self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ )
_snake_case = to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ )
return image
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = ChannelDimension.FIRST , **lowerCAmelCase_ , ) -> PIL.Image.Image:
_snake_case = do_resize if do_resize is not None else self.do_resize
_snake_case = resample if resample is not None else self.resample
_snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop
_snake_case = do_rescale if do_rescale is not None else self.do_rescale
_snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
_snake_case = offset if offset is not None else self.offset
_snake_case = do_normalize if do_normalize is not None else self.do_normalize
_snake_case = image_mean if image_mean is not None else self.image_mean
_snake_case = image_std if image_std is not None else self.image_std
_snake_case = size if size is not None else self.size
_snake_case = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
_snake_case = crop_size if crop_size is not None else self.crop_size
_snake_case = get_size_dict(lowerCAmelCase_ , param_name='crop_size' )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
_snake_case = make_batched(lowerCAmelCase_ )
_snake_case = [
[
self._preprocess_image(
image=lowerCAmelCase_ , do_resize=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , do_center_crop=lowerCAmelCase_ , crop_size=lowerCAmelCase_ , do_rescale=lowerCAmelCase_ , rescale_factor=lowerCAmelCase_ , offset=lowerCAmelCase_ , do_normalize=lowerCAmelCase_ , image_mean=lowerCAmelCase_ , image_std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , )
for img in video
]
for video in videos
]
_snake_case = {'pixel_values': videos}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
| 541
| 0
|
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ = logging.get_logger()
@dataclass
class a__ :
__magic_name__ : nn.Module
__magic_name__ : List[nn.Module] = field(default_factory=_lowercase )
__magic_name__ : list = field(default_factory=_lowercase )
def lowercase__ (self : Optional[int], __UpperCAmelCase : Dict, __UpperCAmelCase : Tensor, __UpperCAmelCase : Tensor ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = len(list(m.modules() ) ) == 1 or isinstance(__UpperCAmelCase, nn.Convad ) or isinstance(__UpperCAmelCase, nn.BatchNormad )
if has_not_submodules:
self.traced.append(__UpperCAmelCase )
def __call__(self : Optional[int], __UpperCAmelCase : Tensor ) -> Optional[Any]:
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(__UpperCAmelCase )
[x.remove() for x in self.handles]
return self
@property
def lowercase__ (self : str ) -> int:
"""simple docstring"""
return list(filter(lambda __UpperCAmelCase : len(list(x.state_dict().keys() ) ) > 0, self.traced ) )
@dataclass
class a__ :
__magic_name__ : nn.Module
__magic_name__ : nn.Module
__magic_name__ : int = 0
__magic_name__ : List = field(default_factory=_lowercase )
__magic_name__ : List = field(default_factory=_lowercase )
def __call__(self : List[str], __UpperCAmelCase : Tensor ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = Tracker(self.dest )(__UpperCAmelCase ).parametrized
SCREAMING_SNAKE_CASE : str = Tracker(self.src )(__UpperCAmelCase ).parametrized
SCREAMING_SNAKE_CASE : List[str] = list(filter(lambda __UpperCAmelCase : type(__UpperCAmelCase ) not in self.src_skip, __UpperCAmelCase ) )
SCREAMING_SNAKE_CASE : Optional[int] = list(filter(lambda __UpperCAmelCase : type(__UpperCAmelCase ) not in self.dest_skip, __UpperCAmelCase ) )
if len(__UpperCAmelCase ) != len(__UpperCAmelCase ):
raise Exception(
F'''Numbers of operations are different. Source module has {len(__UpperCAmelCase )} operations while'''
F''' destination module has {len(__UpperCAmelCase )}.''' )
for dest_m, src_m in zip(__UpperCAmelCase, __UpperCAmelCase ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''' )
def __lowercase (_SCREAMING_SNAKE_CASE :str , _SCREAMING_SNAKE_CASE :ResNetConfig , _SCREAMING_SNAKE_CASE :Path , _SCREAMING_SNAKE_CASE :bool = True ):
print(F'''Converting {name}...''' )
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE ).eval()
SCREAMING_SNAKE_CASE : Tuple = ResNetForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
SCREAMING_SNAKE_CASE : Optional[int] = ModuleTransfer(src=_SCREAMING_SNAKE_CASE , dest=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Any = torch.randn((1, 3, 2_24, 2_24) )
module_transfer(_SCREAMING_SNAKE_CASE )
assert torch.allclose(from_model(_SCREAMING_SNAKE_CASE ) , our_model(_SCREAMING_SNAKE_CASE ).logits ), "The model logits don't match the original one."
SCREAMING_SNAKE_CASE : List[str] = F'''resnet{"-".join(name.split("resnet" ) )}'''
print(_SCREAMING_SNAKE_CASE )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add model''' , use_temp_dir=_SCREAMING_SNAKE_CASE , )
# we can use the convnext one
SCREAMING_SNAKE_CASE : Any = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add image processor''' , use_temp_dir=_SCREAMING_SNAKE_CASE , )
print(F'''Pushed {checkpoint_name}''' )
def __lowercase (_SCREAMING_SNAKE_CASE :Path , _SCREAMING_SNAKE_CASE :str = None , _SCREAMING_SNAKE_CASE :bool = True ):
SCREAMING_SNAKE_CASE : str = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE : Any = 10_00
SCREAMING_SNAKE_CASE : Dict = (1, num_labels)
SCREAMING_SNAKE_CASE : List[Any] = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE : str = num_labels
SCREAMING_SNAKE_CASE : Optional[int] = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) )
SCREAMING_SNAKE_CASE : Any = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : List[str] = idalabel
SCREAMING_SNAKE_CASE : str = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : int = partial(_SCREAMING_SNAKE_CASE , num_labels=_SCREAMING_SNAKE_CASE , idalabel=_SCREAMING_SNAKE_CASE , labelaid=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Any = {
'''resnet18''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type='''basic''' ),
'''resnet26''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='''bottleneck''' ),
'''resnet34''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type='''basic''' ),
'''resnet50''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='''bottleneck''' ),
'''resnet101''': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='''bottleneck''' ),
'''resnet152''': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='''bottleneck''' ),
}
if model_name:
convert_weight_and_push(_SCREAMING_SNAKE_CASE , names_to_config[model_name] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return config, expected_shape
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported resnet* architecture,"""
""" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
snake_case_ = parser.parse_args()
snake_case_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 507
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class a__ ( unittest.TestCase ):
def lowercase__ (self : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = [[1, 2, 4], [1, 2, 3, 4]]
SCREAMING_SNAKE_CASE : Tuple = DisjunctiveConstraint(__UpperCAmelCase )
self.assertTrue(isinstance(dc.token_ids, __UpperCAmelCase ) )
with self.assertRaises(__UpperCAmelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__UpperCAmelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def lowercase__ (self : Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__UpperCAmelCase ):
DisjunctiveConstraint(__UpperCAmelCase ) # fails here
def lowercase__ (self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = [[1, 2, 3], [1, 2, 4]]
SCREAMING_SNAKE_CASE : List[Any] = DisjunctiveConstraint(__UpperCAmelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = dc.update(1 )
SCREAMING_SNAKE_CASE : Any = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = dc.update(2 )
SCREAMING_SNAKE_CASE : Any = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = dc.update(3 )
SCREAMING_SNAKE_CASE : List[Any] = stepped is True and completed is True and reset is False
self.assertTrue(__UpperCAmelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def lowercase__ (self : Tuple ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
SCREAMING_SNAKE_CASE : Union[str, Any] = DisjunctiveConstraint(__UpperCAmelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 507
| 1
|
"""simple docstring"""
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
SCREAMING_SNAKE_CASE__ = WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
lowerCAmelCase = test_results.split(""" """ )
lowerCAmelCase = 0
lowerCAmelCase = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
lowerCAmelCase = expressions[-2] if """=""" in expressions[-1] else expressions[-1]
for i, expression in enumerate(SCREAMING_SNAKE_CASE ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase = {}
lowerCAmelCase = None
lowerCAmelCase = False
for line in failures_short_lines.split("""\n""" ):
if re.search(R"""_ \[doctest\]""" , SCREAMING_SNAKE_CASE ):
lowerCAmelCase = True
lowerCAmelCase = line.split(""" """ )[2]
elif in_error and not line.split(""" """ )[0].isdigit():
lowerCAmelCase = line
lowerCAmelCase = False
return failures
class lowercase :
def __init__( self , lowercase , lowercase ) -> List[Any]:
lowerCAmelCase = title
lowerCAmelCase = doc_test_results["""time_spent"""].split(""",""" )[0]
lowerCAmelCase = doc_test_results["""success"""]
lowerCAmelCase = doc_test_results["""failures"""]
lowerCAmelCase = self.n_success + self.n_failures
# Failures and success of the modeling tests
lowerCAmelCase = doc_test_results
@property
def _snake_case ( self ) -> str:
lowerCAmelCase = [self._time_spent]
lowerCAmelCase = 0
for time in time_spent:
lowerCAmelCase = time.split(""":""" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(lowercase ) == 1:
lowerCAmelCase = [0, 0, time_parts[0]]
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3_600 + minutes * 60 + seconds
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = total_secs // 3_600, (total_secs % 3_600) // 60, total_secs % 60
return f'{int(lowercase )}h{int(lowercase )}m{int(lowercase )}s'
@property
def _snake_case ( self ) -> Dict:
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _snake_case ( self ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def _snake_case ( self ) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
f' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def _snake_case ( self ) -> Dict:
lowerCAmelCase = 40
lowerCAmelCase = {k: v["""failed"""] for k, v in doc_test_results.items() if isinstance(lowercase , lowercase )}
lowerCAmelCase = """"""
for category, failures in category_failures.items():
if len(lowercase ) == 0:
continue
if report != "":
report += "\n\n"
report += f'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(lowercase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def _snake_case ( self ) -> str:
lowerCAmelCase = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(lowercase )
@staticmethod
def _snake_case ( ) -> str:
lowerCAmelCase = [
{
"""type""": """section""",
"""text""": {
"""type""": """plain_text""",
"""text""": """There was an issue running the tests.""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """Check Action results""", """emoji""": True},
"""url""": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(lowercase )} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text="""There was an issue running the tests.""" , blocks=lowercase , )
def _snake_case ( self ) -> Optional[int]:
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(self.payload )} ) )
lowerCAmelCase = f'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else """All tests passed."""
lowerCAmelCase = client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , blocks=self.payload , text=lowercase , )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
lowerCAmelCase = """"""
for key, value in failures.items():
lowerCAmelCase = value[:200] + """ [Truncated]""" if len(lowercase ) > 250 else value
failures_text += f'*{key}*\n_{value}_\n\n'
lowerCAmelCase = job_name
lowerCAmelCase = {"""type""": """section""", """text""": {"""type""": """mrkdwn""", """text""": text}}
if job_link is not None:
lowerCAmelCase = {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """GitHub Action job""", """emoji""": True},
"""url""": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _snake_case ( self ) -> Any:
if self.thread_ts is None:
raise ValueError("""Can only post reply if a post has been made.""" )
lowerCAmelCase = self.doc_test_results.pop("""job_link""" )
self.doc_test_results.pop("""failures""" )
self.doc_test_results.pop("""success""" )
self.doc_test_results.pop("""time_spent""" )
lowerCAmelCase = sorted(self.doc_test_results.items() , key=lambda lowercase : t[0] )
for job, job_result in sorted_dict:
if len(job_result["""failures"""] ):
lowerCAmelCase = f'*Num failures* :{len(job_result["failed"] )} \n'
lowerCAmelCase = job_result["""failures"""]
lowerCAmelCase = self.get_reply_blocks(lowercase , lowercase , lowercase , text=lowercase )
print("""Sending the following reply""" )
print(json.dumps({"""blocks""": blocks} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text=f'Results for {job}' , blocks=lowercase , thread_ts=self.thread_ts["""ts"""] , )
time.sleep(1 )
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = os.environ["""GITHUB_RUN_ID"""]
lowerCAmelCase = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE ).json()
lowerCAmelCase = {}
try:
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
lowerCAmelCase = math.ceil((result["""total_count"""] - 1_00) / 1_00 )
for i in range(SCREAMING_SNAKE_CASE ):
lowerCAmelCase = requests.get(url + F'&page={i + 2}' ).json()
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return jobs
except Exception as e:
print("""Unknown error, could not fetch links.""" , SCREAMING_SNAKE_CASE )
return {}
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase = {}
if os.path.exists(SCREAMING_SNAKE_CASE ):
lowerCAmelCase = os.listdir(SCREAMING_SNAKE_CASE )
for file in files:
try:
with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , encoding="""utf-8""" ) as f:
lowerCAmelCase = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'Could not open {os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}.' ) from e
return _artifact
def UpperCAmelCase__ ( ):
'''simple docstring'''
class lowercase :
def __init__( self , lowercase ) -> Union[str, Any]:
lowerCAmelCase = name
lowerCAmelCase = []
def __str__( self ) -> Union[str, Any]:
return self.name
def _snake_case ( self , lowercase ) -> int:
self.paths.append({"""name""": self.name, """path""": path} )
lowerCAmelCase = {}
lowerCAmelCase = filter(os.path.isdir , os.listdir() )
for directory in directories:
lowerCAmelCase = directory
if artifact_name not in _available_artifacts:
lowerCAmelCase = Artifact(SCREAMING_SNAKE_CASE )
_available_artifacts[artifact_name].add_path(SCREAMING_SNAKE_CASE )
return _available_artifacts
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = get_job_links()
SCREAMING_SNAKE_CASE__ = retrieve_available_artifacts()
SCREAMING_SNAKE_CASE__ = collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
SCREAMING_SNAKE_CASE__ = {
v: {
"failed": [],
"failures": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
SCREAMING_SNAKE_CASE__ = github_actions_job_links.get("run_doctests")
SCREAMING_SNAKE_CASE__ = available_artifacts["doc_tests_gpu_test_reports"].paths[0]
SCREAMING_SNAKE_CASE__ = retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ = handle_test_results(artifact["stats"])
SCREAMING_SNAKE_CASE__ = failed
SCREAMING_SNAKE_CASE__ = success
SCREAMING_SNAKE_CASE__ = time_spent[1:-1] + ", "
SCREAMING_SNAKE_CASE__ = extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
SCREAMING_SNAKE_CASE__ = line.replace("FAILED ", "")
SCREAMING_SNAKE_CASE__ = line.split()[0].replace("\n", "")
if "::" in line:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ = line.split("::")
else:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
SCREAMING_SNAKE_CASE__ = docs[file_regex]
doc_test_results[category]["failed"].append(test)
SCREAMING_SNAKE_CASE__ = all_failures[test] if test in all_failures else "N/A"
SCREAMING_SNAKE_CASE__ = failure
break
SCREAMING_SNAKE_CASE__ = Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 700
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
class lowercase :
def __init__( self , lowercase , lowercase , lowercase = 0 ) -> None:
lowerCAmelCase , lowerCAmelCase = row, column
lowerCAmelCase = [[default_value for c in range(lowercase )] for r in range(lowercase )]
def __str__( self ) -> str:
lowerCAmelCase = f'Matrix consist of {self.row} rows and {self.column} columns\n'
# Make string identifier
lowerCAmelCase = 0
for row_vector in self.array:
for obj in row_vector:
lowerCAmelCase = max(lowercase , len(str(lowercase ) ) )
lowerCAmelCase = f'%{max_element_length}s'
# Make string and return
def single_line(lowercase ) -> str:
nonlocal string_format_identifier
lowerCAmelCase = """["""
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(lowercase ) for row_vector in self.array )
return s
def __repr__( self ) -> str:
return str(self )
def _snake_case ( self , lowercase ) -> bool:
if not (isinstance(lowercase , (list, tuple) ) and len(lowercase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , lowercase ) -> Any:
assert self.validate_indicies(lowercase )
return self.array[loc[0]][loc[1]]
def __setitem__( self , lowercase , lowercase ) -> None:
assert self.validate_indicies(lowercase )
lowerCAmelCase = value
def __add__( self , lowercase ) -> Matrix:
assert isinstance(lowercase , lowercase )
assert self.row == another.row and self.column == another.column
# Add
lowerCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = self[r, c] + another[r, c]
return result
def __neg__( self ) -> Matrix:
lowerCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = -self[r, c]
return result
def __sub__( self , lowercase ) -> Matrix:
return self + (-another)
def __mul__( self , lowercase ) -> Matrix:
if isinstance(lowercase , (int, float) ): # Scalar multiplication
lowerCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = self[r, c] * another
return result
elif isinstance(lowercase , lowercase ): # Matrix multiplication
assert self.column == another.row
lowerCAmelCase = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
lowerCAmelCase = f'Unsupported type given for another ({type(lowercase )})'
raise TypeError(lowercase )
def _snake_case ( self ) -> Matrix:
lowerCAmelCase = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = self[r, c]
return result
def _snake_case ( self , lowercase , lowercase ) -> Any:
assert isinstance(lowercase , lowercase ) and isinstance(lowercase , lowercase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
lowerCAmelCase = v.transpose()
lowerCAmelCase = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = Matrix(3 , 3 , 0 )
for i in range(3 ):
lowerCAmelCase = 1
print(F'a^(-1) is {ainv}' )
# u, v
lowerCAmelCase = Matrix(3 , 1 , 0 )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1, 2, -3
lowerCAmelCase = Matrix(3 , 1 , 0 )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 4, -2, 5
print(F'u is {u}' )
print(F'v is {v}' )
print(F'uv^T is {u * v.transpose()}' )
# Sherman Morrison
print(F'(a + uv^T)^(-1) is {ainv.sherman_morrison(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}' )
def UpperCAmelCase__ ( ):
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 393
| 0
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
_lowerCAmelCase : List[Any] = ArgumentParser("""Accelerate CLI tool""" ,usage="""accelerate <command> [<args>]""" ,allow_abbrev=lowercase_ )
_lowerCAmelCase : Union[str, Any] = parser.add_subparsers(help="""accelerate command helpers""" )
# Register commands
get_config_parser(subparsers=lowercase_ )
env_command_parser(subparsers=lowercase_ )
launch_command_parser(subparsers=lowercase_ )
tpu_command_parser(subparsers=lowercase_ )
test_command_parser(subparsers=lowercase_ )
# Let's go
_lowerCAmelCase : Tuple = parser.parse_args()
if not hasattr(lowercase_ ,"""func""" ):
parser.print_help()
exit(1 )
# Run
args.func(lowercase_ )
if __name__ == "__main__":
main()
| 213
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __A ( lowerCamelCase__ ):
"""simple docstring"""
UpperCAmelCase__ = """vit_msn"""
def __init__( self , a__=768 , a__=12 , a__=12 , a__=3072 , a__="gelu" , a__=0.0 , a__=0.0 , a__=0.02 , a__=1e-06 , a__=224 , a__=16 , a__=3 , a__=True , **a__ , ):
"""simple docstring"""
super().__init__(**a__)
_lowerCamelCase : Tuple = hidden_size
_lowerCamelCase : str = num_hidden_layers
_lowerCamelCase : int = num_attention_heads
_lowerCamelCase : Optional[int] = intermediate_size
_lowerCamelCase : Union[str, Any] = hidden_act
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : List[Any] = attention_probs_dropout_prob
_lowerCamelCase : Optional[int] = initializer_range
_lowerCamelCase : Optional[Any] = layer_norm_eps
_lowerCamelCase : Dict = image_size
_lowerCamelCase : Tuple = patch_size
_lowerCamelCase : List[str] = num_channels
_lowerCamelCase : Optional[int] = qkv_bias
| 114
| 0
|
"""simple docstring"""
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
a__ : int = logging.get_logger(__name__)
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = os.path.abspath(lowerCAmelCase_ )
logger.info(f"""Converting TensorFlow checkpoint from {tf_path}""" )
# Load weights from TF model
__SCREAMING_SNAKE_CASE = tf.train.list_variables(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
__SCREAMING_SNAKE_CASE = full_name.split("/" )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f"""Skipping non-model layer {full_name}""" )
continue
if "optimizer" in full_name:
logger.info(f"""Skipping optimization layer {full_name}""" )
continue
if name[0] == "model":
# ignore initial 'model'
__SCREAMING_SNAKE_CASE = name[1:]
# figure out how many levels deep the name is
__SCREAMING_SNAKE_CASE = 0
for _name in name:
if _name.startswith("layer_with_weights" ):
depth += 1
else:
break
layer_depth.append(lowerCAmelCase_ )
# read data
__SCREAMING_SNAKE_CASE = tf.train.load_variable(lowerCAmelCase_ , lowerCAmelCase_ )
names.append("/".join(lowerCAmelCase_ ) )
arrays.append(lowerCAmelCase_ )
logger.info(f"""Read a total of {len(lowerCAmelCase_ ):,} layers""" )
# Sanity check
if len(set(lowerCAmelCase_ ) ) != 1:
raise ValueError(f"""Found layer names with different depths (layer depth {list(set(lowerCAmelCase_ ) )})""" )
__SCREAMING_SNAKE_CASE = list(set(lowerCAmelCase_ ) )[0]
if layer_depth != 1:
raise ValueError(
"The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"
" heads." )
# convert layers
logger.info("Converting weights..." )
for full_name, array in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = full_name.split("/" )
__SCREAMING_SNAKE_CASE = model
__SCREAMING_SNAKE_CASE = []
for i, m_name in enumerate(lowerCAmelCase_ ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("layer_with_weights" ):
__SCREAMING_SNAKE_CASE = int(m_name.split("-" )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["embeddings", "LayerNorm"] )
__SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase_ , "embeddings" )
__SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase_ , "LayerNorm" )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["encoder", "layer", str(layer_num - 4 )] )
__SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase_ , "encoder" )
__SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase_ , "layer" )
__SCREAMING_SNAKE_CASE = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["pooler", "dense"] )
__SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase_ , "pooler" )
__SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase_ , "dense" )
elif m_name == "embeddings":
trace.append("embeddings" )
__SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase_ , "embeddings" )
if layer_num == 0:
trace.append("word_embeddings" )
__SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase_ , "word_embeddings" )
elif layer_num == 1:
trace.append("position_embeddings" )
__SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase_ , "position_embeddings" )
elif layer_num == 2:
trace.append("token_type_embeddings" )
__SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase_ , "token_type_embeddings" )
else:
raise ValueError(f"""Unknown embedding layer with name {full_name}""" )
trace.append("weight" )
__SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase_ , "weight" )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["attention", "self"] )
__SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase_ , "attention" )
__SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase_ , "self" )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["attention", "output", "LayerNorm"] )
__SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase_ , "attention" )
__SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase_ , "output" )
__SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase_ , "LayerNorm" )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["attention", "output", "dense"] )
__SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase_ , "attention" )
__SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase_ , "output" )
__SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase_ , "dense" )
elif m_name == "_output_dense":
# output dense
trace.extend(["output", "dense"] )
__SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase_ , "output" )
__SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase_ , "dense" )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["output", "LayerNorm"] )
__SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase_ , "output" )
__SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase_ , "LayerNorm" )
elif m_name == "_key_dense":
# attention key
trace.append("key" )
__SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase_ , "key" )
elif m_name == "_query_dense":
# attention query
trace.append("query" )
__SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase_ , "query" )
elif m_name == "_value_dense":
# attention value
trace.append("value" )
__SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase_ , "value" )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["intermediate", "dense"] )
__SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase_ , "intermediate" )
__SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase_ , "dense" )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("output" )
__SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase_ , "output" )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("bias" )
__SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase_ , "bias" )
elif m_name in ["kernel", "gamma"]:
trace.append("weight" )
__SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase_ , "weight" )
else:
logger.warning(f"""Ignored {m_name}""" )
# for certain layers reshape is necessary
__SCREAMING_SNAKE_CASE = ".".join(lowerCAmelCase_ )
if re.match(R"(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)" , lowerCAmelCase_ ) or re.match(
R"(\S+)\.attention\.output\.dense\.weight" , lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = array.reshape(pointer.data.shape )
if "kernel" in full_name:
__SCREAMING_SNAKE_CASE = array.transpose()
if pointer.shape == array.shape:
__SCREAMING_SNAKE_CASE = torch.from_numpy(lowerCAmelCase_ )
else:
raise ValueError(
f"""Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:"""
f""" {array.shape}""" )
logger.info(f"""Successfully set variable {full_name} to PyTorch layer {trace}""" )
return model
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
logger.info(f"""Loading model based on config from {config_path}...""" )
__SCREAMING_SNAKE_CASE = BertConfig.from_json_file(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = BertModel(lowerCAmelCase_ )
# Load weights from checkpoint
logger.info(f"""Loading weights from checkpoint {tf_checkpoint_path}...""" )
load_tfa_weights_in_bert(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Save pytorch-model
logger.info(f"""Saving PyTorch model to {pytorch_dump_path}...""" )
torch.save(model.state_dict() , lowerCAmelCase_ )
if __name__ == "__main__":
a__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow 2.x checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
type=str,
required=True,
help='''The config json file corresponding to the BERT model. This specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''',
type=str,
required=True,
help='''Path to the output PyTorch model (must include filename).''',
)
a__ : Tuple = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 714
|
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False")) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env")
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 650, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "pytorch",
"script": "run_ddp.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "tensorflow",
"script": "run_tf_dist.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.6, "eval_loss": 0.7},
},
])
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
def UpperCAmelCase_ ( self : Dict ) -> Any:
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="utf-8" , check=UpperCAmelCase__ , )
assert hasattr(self , "env" )
def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : int ) -> Any:
__SCREAMING_SNAKE_CASE = F"""{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"""
# distributed data settings
__SCREAMING_SNAKE_CASE = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=UpperCAmelCase__ , instance_count=UpperCAmelCase__ , instance_type=self.instance_type , debugger_hook_config=UpperCAmelCase__ , hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=UpperCAmelCase__ , py_version="py36" , )
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : Optional[int] ) -> Optional[Any]:
TrainingJobAnalytics(UpperCAmelCase__ ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Optional[Any] ) -> List[str]:
# create estimator
__SCREAMING_SNAKE_CASE = self.create_estimator(UpperCAmelCase__ )
# run training
estimator.fit()
# result dataframe
__SCREAMING_SNAKE_CASE = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__SCREAMING_SNAKE_CASE = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
__SCREAMING_SNAKE_CASE = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__SCREAMING_SNAKE_CASE = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , UpperCAmelCase__ )
| 553
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a__: List[Any] = logging.get_logger(__name__)
def UpperCamelCase__( UpperCamelCase__ : Tuple )->Tuple:
A__ = DPTConfig(embedding_type='''hybrid''' )
if "large" in checkpoint_url:
A__ = 10_24
A__ = 40_96
A__ = 24
A__ = 16
A__ = [5, 11, 17, 23]
A__ = [2_56, 5_12, 10_24, 10_24]
A__ = (1, 3_84, 3_84)
if "nyu" or "midas" in checkpoint_url:
A__ = 7_68
A__ = [1, 1, 1, 0.5]
A__ = [2_56, 5_12, 7_68, 7_68]
A__ = 1_50
A__ = 16
A__ = (1, 3_84, 3_84)
A__ = False
A__ = '''project'''
if "ade" in checkpoint_url:
A__ = True
A__ = 7_68
A__ = [1, 1, 1, 0.5]
A__ = 1_50
A__ = 16
A__ = '''huggingface/label-files'''
A__ = '''ade20k-id2label.json'''
A__ = json.load(open(cached_download(hf_hub_url(UpperCamelCase__ , UpperCamelCase__ , repo_type='''dataset''' ) ) , '''r''' ) )
A__ = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
A__ = [1, 1_50, 4_80, 4_80]
return config, expected_shape
def UpperCamelCase__( UpperCamelCase__ : Dict )->str:
A__ = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias''']
for k in ignore_keys:
state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
def UpperCamelCase__( UpperCamelCase__ : List[Any] )->Tuple:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
A__ = name.replace('''pretrained.model''' , '''dpt.encoder''' )
if "pretrained.model" in name:
A__ = name.replace('''pretrained.model''' , '''dpt.embeddings''' )
if "patch_embed" in name:
A__ = name.replace('''patch_embed''' , '''''' )
if "pos_embed" in name:
A__ = name.replace('''pos_embed''' , '''position_embeddings''' )
if "attn.proj" in name:
A__ = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "proj" in name and "project" not in name:
A__ = name.replace('''proj''' , '''projection''' )
if "blocks" in name:
A__ = name.replace('''blocks''' , '''layer''' )
if "mlp.fc1" in name:
A__ = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
A__ = name.replace('''mlp.fc2''' , '''output.dense''' )
if "norm1" in name and "backbone" not in name:
A__ = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name and "backbone" not in name:
A__ = name.replace('''norm2''' , '''layernorm_after''' )
if "scratch.output_conv" in name:
A__ = name.replace('''scratch.output_conv''' , '''head''' )
if "scratch" in name:
A__ = name.replace('''scratch''' , '''neck''' )
if "layer1_rn" in name:
A__ = name.replace('''layer1_rn''' , '''convs.0''' )
if "layer2_rn" in name:
A__ = name.replace('''layer2_rn''' , '''convs.1''' )
if "layer3_rn" in name:
A__ = name.replace('''layer3_rn''' , '''convs.2''' )
if "layer4_rn" in name:
A__ = name.replace('''layer4_rn''' , '''convs.3''' )
if "refinenet" in name:
A__ = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
A__ = name.replace(f"refinenet{layer_idx}" , f"fusion_stage.layers.{abs(layer_idx-4 )}" )
if "out_conv" in name:
A__ = name.replace('''out_conv''' , '''projection''' )
if "resConfUnit1" in name:
A__ = name.replace('''resConfUnit1''' , '''residual_layer1''' )
if "resConfUnit2" in name:
A__ = name.replace('''resConfUnit2''' , '''residual_layer2''' )
if "conv1" in name:
A__ = name.replace('''conv1''' , '''convolution1''' )
if "conv2" in name:
A__ = name.replace('''conv2''' , '''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
A__ = name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
A__ = name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
A__ = name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
A__ = name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
A__ = name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
A__ = name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
A__ = name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
A__ = name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
A__ = name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
A__ = name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
A__ = name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
A__ = name.replace('''pretrained''' , '''dpt''' )
if "bn" in name:
A__ = name.replace('''bn''' , '''batch_norm''' )
if "head" in name:
A__ = name.replace('''head''' , '''head.head''' )
if "encoder.norm" in name:
A__ = name.replace('''encoder.norm''' , '''layernorm''' )
if "auxlayer" in name:
A__ = name.replace('''auxlayer''' , '''auxiliary_head.head''' )
if "backbone" in name:
A__ = name.replace('''backbone''' , '''backbone.bit.encoder''' )
if ".." in name:
A__ = name.replace('''..''' , '''.''' )
if "stem.conv" in name:
A__ = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
A__ = name.replace('''blocks''' , '''layers''' )
if "convolution" in name and "backbone" in name:
A__ = name.replace('''convolution''' , '''conv''' )
if "layer" in name and "backbone" in name:
A__ = name.replace('''layer''' , '''layers''' )
if "backbone.bit.encoder.bit" in name:
A__ = name.replace('''backbone.bit.encoder.bit''' , '''backbone.bit''' )
if "embedder.conv" in name:
A__ = name.replace('''embedder.conv''' , '''embedder.convolution''' )
if "backbone.bit.encoder.stem.norm" in name:
A__ = name.replace('''backbone.bit.encoder.stem.norm''' , '''backbone.bit.embedder.norm''' )
return name
def UpperCamelCase__( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] )->List[str]:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ = state_dict.pop(f"dpt.encoder.layer.{i}.attn.qkv.weight" )
A__ = state_dict.pop(f"dpt.encoder.layer.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[: config.hidden_size, :]
A__ = in_proj_bias[: config.hidden_size]
A__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ = in_proj_weight[
-config.hidden_size :, :
]
A__ = in_proj_bias[-config.hidden_size :]
def UpperCamelCase__( )->Dict:
A__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A__ = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase__( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any )->Dict:
A__ , A__ = get_dpt_config(UpperCamelCase__ )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
A__ = torch.load(UpperCamelCase__ , map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(UpperCamelCase__ )
# rename keys
for key in state_dict.copy().keys():
A__ = state_dict.pop(UpperCamelCase__ )
A__ = val
# read in qkv matrices
read_in_q_k_v(UpperCamelCase__ , UpperCamelCase__ )
# load HuggingFace model
A__ = DPTForSemanticSegmentation(UpperCamelCase__ ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
# Check outputs on an image
A__ = 4_80 if '''ade''' in checkpoint_url else 3_84
A__ = DPTImageProcessor(size=UpperCamelCase__ )
A__ = prepare_img()
A__ = image_processor(UpperCamelCase__ , return_tensors='''pt''' )
# forward pass
A__ = model(**UpperCamelCase__ ).logits if '''ade''' in checkpoint_url else model(**UpperCamelCase__ ).predicted_depth
if show_prediction:
A__ = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='''bicubic''' , align_corners=UpperCamelCase__ , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 2_55 ).show()
if pytorch_dump_folder_path is not None:
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCamelCase__ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
model.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
image_processor.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
if __name__ == "__main__":
a__: str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
parser.add_argument(
'--show_prediction',
action='store_true',
)
a__: Union[str, Any] = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 190
|
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
__SCREAMING_SNAKE_CASE = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
A__ = hf_hub_download(
repo_id='''nateraw/video-demo''',filename='''archery.mp4''',repo_type='''dataset''' )
A__ = VideoClassificationPipeline(model=__lowerCamelCase,image_processor=__lowerCamelCase,top_k=2 )
A__ = [
example_video_filepath,
'''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''',
]
return video_classifier, examples
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
for example in examples:
A__ = video_classifier(__lowerCamelCase )
self.assertEqual(
__lowerCamelCase,[
{'''score''': ANY(__lowerCamelCase ), '''label''': ANY(__lowerCamelCase )},
{'''score''': ANY(__lowerCamelCase ), '''label''': ANY(__lowerCamelCase )},
],)
@require_torch
def UpperCamelCase ( self ):
A__ = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification'''
A__ = VideoMAEFeatureExtractor(
size={'''shortest_edge''': 10},crop_size={'''height''': 10, '''width''': 10} )
A__ = pipeline(
'''video-classification''',model=__lowerCamelCase,feature_extractor=__lowerCamelCase,frame_sampling_rate=4 )
A__ = hf_hub_download(repo_id='''nateraw/video-demo''',filename='''archery.mp4''',repo_type='''dataset''' )
A__ = video_classifier(__lowerCamelCase,top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase,decimals=4 ),[{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}],)
A__ = video_classifier(
[
video_file_path,
video_file_path,
],top_k=2,)
self.assertEqual(
nested_simplify(__lowerCamelCase,decimals=4 ),[
[{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}],
[{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}],
],)
@require_tf
def UpperCamelCase ( self ):
pass
| 190
| 1
|
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 0 , _lowerCAmelCase = 0 ) -> int:
_UpperCAmelCase = right or len(_lowerCAmelCase ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(_lowerCAmelCase , _lowerCAmelCase , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 129
|
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __lowerCamelCase ( _lowerCAmelCase ) -> Any:
_UpperCAmelCase = filter(lambda _lowerCAmelCase : p.requires_grad , model.parameters() )
_UpperCAmelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__lowerCAmelCase = logging.getLogger(__name__)
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
if metric == "rouge2":
_UpperCAmelCase = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
_UpperCAmelCase = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
_UpperCAmelCase = "{val_avg_em:.4f}-{step_count}"
elif metric == "loss":
_UpperCAmelCase = "{val_avg_loss:.4f}-{step_count}"
else:
raise NotImplementedError(
F'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
" function." )
_UpperCAmelCase = ModelCheckpoint(
dirpath=_lowerCAmelCase , filename=_lowerCAmelCase , monitor=F'''val_{metric}''' , mode="max" , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
return EarlyStopping(
monitor=F'''val_{metric}''' , mode="min" if "loss" in metric else "max" , patience=_lowerCAmelCase , verbose=_lowerCAmelCase , )
class __SCREAMING_SNAKE_CASE ( pl.Callback):
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int ):
_UpperCAmelCase = {F'''lr_group_{i}''': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__UpperCamelCase )
@rank_zero_only
def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : pl.Trainer , __UpperCamelCase : pl.LightningModule , __UpperCamelCase : str , __UpperCamelCase : Optional[int]=True ):
logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
_UpperCAmelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
_UpperCAmelCase = Path(pl_module.hparams.output_dir )
if type_path == "test":
_UpperCAmelCase = od / "test_results.txt"
_UpperCAmelCase = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_UpperCAmelCase = od / F'''{type_path}_results/{trainer.global_step:05d}.txt'''
_UpperCAmelCase = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=__UpperCamelCase )
generations_file.parent.mkdir(exist_ok=__UpperCamelCase )
with open(__UpperCamelCase , "a+" ) as writer:
for key in sorted(__UpperCamelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
_UpperCAmelCase = metrics[key]
if isinstance(__UpperCamelCase , torch.Tensor ):
_UpperCAmelCase = val.item()
_UpperCAmelCase = F'''{key}: {val:.6f}\n'''
writer.write(__UpperCamelCase )
if not save_generations:
return
if "preds" in metrics:
_UpperCAmelCase = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(__UpperCamelCase )
@rank_zero_only
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : Dict , __UpperCamelCase : str ):
try:
_UpperCAmelCase = pl_module.model.model.num_parameters()
except AttributeError:
_UpperCAmelCase = pl_module.model.num_parameters()
_UpperCAmelCase = count_trainable_parameters(__UpperCamelCase )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : pl.Trainer , __UpperCamelCase : pl.LightningModule ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__UpperCamelCase , __UpperCamelCase , "test" )
@rank_zero_only
def UpperCAmelCase__ ( self : str , __UpperCamelCase : pl.Trainer , __UpperCamelCase : List[str] ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 129
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowerCamelCase : Optional[Any] = {
"configuration_layoutlmv2": ["LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "LayoutLMv2Config"],
"processing_layoutlmv2": ["LayoutLMv2Processor"],
"tokenization_layoutlmv2": ["LayoutLMv2Tokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = ["LayoutLMv2TokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = ["LayoutLMv2FeatureExtractor"]
_lowerCamelCase : Optional[int] = ["LayoutLMv2ImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = [
"LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"LayoutLMv2ForQuestionAnswering",
"LayoutLMv2ForSequenceClassification",
"LayoutLMv2ForTokenClassification",
"LayoutLMv2Layer",
"LayoutLMv2Model",
"LayoutLMv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 429
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase ={
"configuration_deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaOnnxConfig"],
"tokenization_deberta": ["DebertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =["DebertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =[
"DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"DebertaForMaskedLM",
"DebertaForQuestionAnswering",
"DebertaForSequenceClassification",
"DebertaForTokenClassification",
"DebertaModel",
"DebertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =[
"TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDebertaForMaskedLM",
"TFDebertaForQuestionAnswering",
"TFDebertaForSequenceClassification",
"TFDebertaForTokenClassification",
"TFDebertaModel",
"TFDebertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
__lowerCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 333
| 0
|
'''simple docstring'''
from __future__ import annotations
import bisect
def __UpperCamelCase ( __lowerCamelCase : list[int] , __lowerCamelCase : int , __lowerCamelCase : int = 0 , __lowerCamelCase : int = -1 ) -> int:
'''simple docstring'''
if hi < 0:
_a = len(__lowerCamelCase )
while lo < hi:
_a = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
_a = mid + 1
else:
_a = mid
return lo
def __UpperCamelCase ( __lowerCamelCase : list[int] , __lowerCamelCase : int , __lowerCamelCase : int = 0 , __lowerCamelCase : int = -1 ) -> int:
'''simple docstring'''
if hi < 0:
_a = len(__lowerCamelCase )
while lo < hi:
_a = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
_a = mid + 1
else:
_a = mid
return lo
def __UpperCamelCase ( __lowerCamelCase : list[int] , __lowerCamelCase : int , __lowerCamelCase : int = 0 , __lowerCamelCase : int = -1 ) -> None:
'''simple docstring'''
sorted_collection.insert(bisect_left(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase )
def __UpperCamelCase ( __lowerCamelCase : list[int] , __lowerCamelCase : int , __lowerCamelCase : int = 0 , __lowerCamelCase : int = -1 ) -> None:
'''simple docstring'''
sorted_collection.insert(bisect_right(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase )
def __UpperCamelCase ( __lowerCamelCase : list[int] , __lowerCamelCase : int ) -> int | None:
'''simple docstring'''
_a = 0
_a = len(__lowerCamelCase ) - 1
while left <= right:
_a = left + (right - left) // 2
_a = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
_a = midpoint - 1
else:
_a = midpoint + 1
return None
def __UpperCamelCase ( __lowerCamelCase : list[int] , __lowerCamelCase : int ) -> int | None:
'''simple docstring'''
_a = bisect.bisect_left(__lowerCamelCase , __lowerCamelCase )
if index != len(__lowerCamelCase ) and sorted_collection[index] == item:
return index
return None
def __UpperCamelCase ( __lowerCamelCase : list[int] , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ) -> int | None:
'''simple docstring'''
if right < left:
return None
_a = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , midpoint - 1 )
else:
return binary_search_by_recursion(__lowerCamelCase , __lowerCamelCase , midpoint + 1 , __lowerCamelCase )
if __name__ == "__main__":
lowercase__ = input("Enter numbers separated by comma:\n").strip()
lowercase__ = sorted(int(item) for item in user_input.split(","))
lowercase__ = int(input("Enter a single number to be found in the list:\n"))
lowercase__ = binary_search(collection, target)
if result is None:
print(f'''{target} was not found in {collection}.''')
else:
print(f'''{target} was found at position {result} in {collection}.''')
| 276
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 276
| 1
|
"""simple docstring"""
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
A = get_logger(__name__)
A = Path(__file__).parent / """model_card_template.md"""
A = uuida().hex
A = os.getenv("""HF_HUB_OFFLINE""", """""").upper() in ENV_VARS_TRUE_VALUES
A = os.getenv("""DISABLE_TELEMETRY""", """""").upper() in ENV_VARS_TRUE_VALUES
A = HUGGINGFACE_CO_RESOLVE_ENDPOINT + """/api/telemetry/"""
def _UpperCamelCase ( UpperCamelCase = None ) -> str:
"""simple docstring"""
__UpperCAmelCase : List[str] = f"diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f"; torch/{_torch_version}"
if is_flax_available():
ua += f"; jax/{_jax_version}"
ua += f"; flax/{_flax_version}"
if is_onnx_available():
ua += f"; onnxruntime/{_onnxruntime_version}"
# CI will set this value to True
if os.environ.get("DIFFUSERS_IS_CI" , "" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(_snake_case , _snake_case ):
ua += "; " + "; ".join(f"{k}/{v}" for k, v in user_agent.items() )
elif isinstance(_snake_case , _snake_case ):
ua += "; " + user_agent
return ua
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase = None , UpperCamelCase = None ) -> str:
"""simple docstring"""
if token is None:
__UpperCAmelCase : Dict = HfFolder.get_token()
if organization is None:
__UpperCAmelCase : int = whoami(_snake_case )["name"]
return f"{username}/{model_id}"
else:
return f"{organization}/{model_id}"
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> Dict:
"""simple docstring"""
if not is_jinja_available():
raise ValueError(
"Modelcard rendering is based on Jinja templates."
" Please make sure to have `jinja` installed before using `create_model_card`."
" To install it, please run `pip install Jinja2`." )
if hasattr(_snake_case , "local_rank" ) and args.local_rank not in [-1, 0]:
return
__UpperCAmelCase : Optional[Any] = args.hub_token if hasattr(_snake_case , "hub_token" ) else None
__UpperCAmelCase : int = get_full_repo_name(_snake_case , token=_snake_case )
__UpperCAmelCase : Any = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="en" , license="apache-2.0" , library_name="diffusers" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=_snake_case , model_name=_snake_case , repo_name=_snake_case , dataset_name=args.dataset_name if hasattr(_snake_case , "dataset_name" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(_snake_case , "gradient_accumulation_steps" ) else None
) , adam_betaa=args.adam_betaa if hasattr(_snake_case , "adam_beta1" ) else None , adam_betaa=args.adam_betaa if hasattr(_snake_case , "adam_beta2" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(_snake_case , "adam_weight_decay" ) else None , adam_epsilon=args.adam_epsilon if hasattr(_snake_case , "adam_epsilon" ) else None , lr_scheduler=args.lr_scheduler if hasattr(_snake_case , "lr_scheduler" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(_snake_case , "lr_warmup_steps" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(_snake_case , "ema_inv_gamma" ) else None , ema_power=args.ema_power if hasattr(_snake_case , "ema_power" ) else None , ema_max_decay=args.ema_max_decay if hasattr(_snake_case , "ema_max_decay" ) else None , mixed_precision=args.mixed_precision , )
__UpperCAmelCase : str = os.path.join(args.output_dir , "README.md" )
model_card.save(_snake_case )
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase = None ) -> int:
"""simple docstring"""
if resolved_file is None or commit_hash is not None:
return commit_hash
__UpperCAmelCase : int = str(Path(_snake_case ).as_posix() )
__UpperCAmelCase : Union[str, Any] = re.search(R"snapshots/([^/]+)/" , _snake_case )
if search is None:
return None
__UpperCAmelCase : List[Any] = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(_snake_case ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
A = os.path.expanduser(
os.getenv("""HF_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """huggingface"""))
)
A = os.path.join(hf_cache_home, """diffusers""")
def _UpperCamelCase ( UpperCamelCase = None , UpperCamelCase = None ) -> None:
"""simple docstring"""
if new_cache_dir is None:
__UpperCAmelCase : Optional[Any] = DIFFUSERS_CACHE
if old_cache_dir is None:
__UpperCAmelCase : Tuple = old_diffusers_cache
__UpperCAmelCase : str = Path(_snake_case ).expanduser()
__UpperCAmelCase : int = Path(_snake_case ).expanduser()
for old_blob_path in old_cache_dir.glob("**/blobs/*" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
__UpperCAmelCase : List[Any] = new_cache_dir / old_blob_path.relative_to(_snake_case )
new_blob_path.parent.mkdir(parents=_snake_case , exist_ok=_snake_case )
os.replace(_snake_case , _snake_case )
try:
os.symlink(_snake_case , _snake_case )
except OSError:
logger.warning(
"Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
A = os.path.join(DIFFUSERS_CACHE, """version_diffusers_cache.txt""")
if not os.path.isfile(cache_version_file):
A = 0
else:
with open(cache_version_file) as f:
try:
A = int(f.read())
except ValueError:
A = 0
if cache_version < 1:
A = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
"""The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your """
"""existing cached models. This is a one-time operation, you can interrupt it or run it """
"""later by calling `diffusers.utils.hub_utils.move_cache()`."""
)
try:
move_cache()
except Exception as e:
A = """\n""".join(traceback.format_tb(e.__traceback__))
logger.error(
f'''There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '''
"""file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole """
"""message and we will do our best to help."""
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, """w""") as f:
f.write("""1""")
except Exception:
logger.warning(
f'''There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '''
"""the directory exists and can be written to."""
)
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase = None ) -> str:
"""simple docstring"""
if variant is not None:
__UpperCAmelCase : int = weights_name.split("." )
__UpperCAmelCase : Optional[int] = splits[:-1] + [variant] + splits[-1:]
__UpperCAmelCase : Tuple = ".".join(_snake_case )
return weights_name
def _UpperCamelCase ( UpperCamelCase , *,
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None , ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase : Dict = str(_snake_case )
if os.path.isfile(_snake_case ):
return pretrained_model_name_or_path
elif os.path.isdir(_snake_case ):
if os.path.isfile(os.path.join(_snake_case , _snake_case ) ):
# Load from a PyTorch checkpoint
__UpperCAmelCase : Optional[int] = os.path.join(_snake_case , _snake_case )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(_snake_case , _snake_case , _snake_case ) ):
__UpperCAmelCase : Optional[int] = os.path.join(_snake_case , _snake_case , _snake_case )
return model_file
else:
raise EnvironmentError(
f"Error no file named {weights_name} found in directory {pretrained_model_name_or_path}." )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(_snake_case ).base_version ) >= version.parse("0.20.0" )
):
try:
__UpperCAmelCase : Optional[Any] = hf_hub_download(
_snake_case , filename=_add_variant(_snake_case , _snake_case ) , cache_dir=_snake_case , force_download=_snake_case , proxies=_snake_case , resume_download=_snake_case , local_files_only=_snake_case , use_auth_token=_snake_case , user_agent=_snake_case , subfolder=_snake_case , revision=revision or commit_hash , )
warnings.warn(
f"Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead." , _snake_case , )
return model_file
except: # noqa: E722
warnings.warn(
f"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(_snake_case , _snake_case )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(_snake_case , _snake_case )}' so that the correct variant file can be added." , _snake_case , )
try:
# 2. Load model file as usual
__UpperCAmelCase : str = hf_hub_download(
_snake_case , filename=_snake_case , cache_dir=_snake_case , force_download=_snake_case , proxies=_snake_case , resume_download=_snake_case , local_files_only=_snake_case , use_auth_token=_snake_case , user_agent=_snake_case , subfolder=_snake_case , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier "
"listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a "
"token having permission to this repo with `use_auth_token` or log in with `huggingface-cli "
"login`." )
except RevisionNotFoundError:
raise EnvironmentError(
f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for "
"this model name. Check the model page at "
f"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions." )
except EntryNotFoundError:
raise EnvironmentError(
f"{pretrained_model_name_or_path} does not appear to have a file named {weights_name}." )
except HTTPError as err:
raise EnvironmentError(
f"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}" )
except ValueError:
raise EnvironmentError(
f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"
f" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"
f" directory containing a file named {weights_name} or"
" \nCheckout your internet connection or see how to run the library in"
" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." )
except EnvironmentError:
raise EnvironmentError(
f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from "
"'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory "
f"containing a file named {weights_name}" )
| 77
|
"""simple docstring"""
_A = 256
# Modulus to hash a string
_A = 1_000_003
def lowercase (_snake_case ,_snake_case ) -> bool:
'''simple docstring'''
__UpperCamelCase = len(_snake_case )
__UpperCamelCase = len(_snake_case )
if p_len > t_len:
return False
__UpperCamelCase = 0
__UpperCamelCase = 0
__UpperCamelCase = 1
# Calculating the hash of pattern and substring of text
for i in range(_snake_case ):
__UpperCamelCase = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
__UpperCamelCase = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
__UpperCamelCase = (modulus_power * alphabet_size) % modulus
for i in range(0 ,t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
__UpperCamelCase = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def lowercase () -> None:
'''simple docstring'''
__UpperCamelCase = "abc1abc12"
__UpperCamelCase = "alskfjaldsabc1abc1abc12k23adsfabcabc"
__UpperCamelCase = "alskfjaldsk23adsfabcabc"
assert rabin_karp(_snake_case ,_snake_case ) and not rabin_karp(_snake_case ,_snake_case )
# Test 2)
__UpperCamelCase = "ABABX"
__UpperCamelCase = "ABABZABABYABABX"
assert rabin_karp(_snake_case ,_snake_case )
# Test 3)
__UpperCamelCase = "AAAB"
__UpperCamelCase = "ABAAAAAB"
assert rabin_karp(_snake_case ,_snake_case )
# Test 4)
__UpperCamelCase = "abcdabcy"
__UpperCamelCase = "abcxabcdabxabcdabcdabcy"
assert rabin_karp(_snake_case ,_snake_case )
# Test 5)
__UpperCamelCase = "Lü"
__UpperCamelCase = "Lüsai"
assert rabin_karp(_snake_case ,_snake_case )
__UpperCamelCase = "Lue"
assert not rabin_karp(_snake_case ,_snake_case )
print("Success." )
if __name__ == "__main__":
test_rabin_karp()
| 505
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ : List[str] = {"configuration_opt": ["OPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "OPTConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : str = [
"OPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OPTForCausalLM",
"OPTModel",
"OPTPreTrainedModel",
"OPTForSequenceClassification",
"OPTForQuestionAnswering",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[Any] = ["TFOPTForCausalLM", "TFOPTModel", "TFOPTPreTrainedModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Tuple = [
"FlaxOPTForCausalLM",
"FlaxOPTModel",
"FlaxOPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
lowerCAmelCase__ : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 329
|
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowerCAmelCase__ : Optional[Any] = [{"type": "code", "content": INSTALL_CONTENT}]
lowerCAmelCase__ : Any = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 329
| 1
|
"""simple docstring"""
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
monkeypatch.setattr("""datasets.utils.deprecation_utils._emitted_deprecation_warnings""" , set() )
@pytest.fixture
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple ):
"""simple docstring"""
class __lowercase :
"""simple docstring"""
def __init__(self , lowercase__ ):
snake_case_ : List[str] = metric_id
class __lowercase :
"""simple docstring"""
_A : Optional[int] = [MetricMock(_UpperCAmelCase) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]]
def __UpperCamelCase (self ):
return self._metrics
monkeypatch.setattr("""datasets.inspect.huggingface_hub""" , HfhMock() )
@pytest.mark.parametrize(
"""func, args""" , [(load_metric, ("""metrics/mse""",)), (list_metrics, ()), (inspect_metric, ("""metrics/mse""", """tmp_path"""))] )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple ):
"""simple docstring"""
if "tmp_path" in args:
snake_case_ : Union[str, Any] = tuple(arg if arg != """tmp_path""" else tmp_path for arg in args )
with pytest.warns(__UpperCAmelCase , match="""https://huggingface.co/docs/evaluate""" ):
func(*__UpperCAmelCase )
| 480
|
'''simple docstring'''
def a_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> list:
"""simple docstring"""
snake_case: Optional[int] =len(__UpperCAmelCase )
snake_case: Optional[int] =[[0] * n for i in range(__UpperCAmelCase )]
for i in range(__UpperCAmelCase ):
snake_case: Optional[int] =y_points[i]
for i in range(2 , __UpperCAmelCase ):
for j in range(__UpperCAmelCase , __UpperCAmelCase ):
snake_case: List[str] =(
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350
| 0
|
from __future__ import annotations
import math
from collections.abc import Callable
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = 100, ):
SCREAMING_SNAKE_CASE__ =x_start
SCREAMING_SNAKE_CASE__ =fnc(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ =0.0
for _ in range(__UpperCamelCase ):
# Approximates curve as a sequence of linear lines and sums their length
SCREAMING_SNAKE_CASE__ =(x_end - x_start) / steps + xa
SCREAMING_SNAKE_CASE__ =fnc(__UpperCamelCase )
length += math.hypot(xa - xa, fxa - fxa )
# Increment step
SCREAMING_SNAKE_CASE__ =xa
SCREAMING_SNAKE_CASE__ =fxa
return length
if __name__ == "__main__":
def UpperCAmelCase_ ( __UpperCamelCase ):
return math.sin(10 * x )
print("f(x) = sin(10 * x)")
print("The length of the curve from x = -10 to x = 10 is:")
lowerCamelCase_ = 10
while i <= 100000:
print(f"""With {i} steps: {line_length(f, -10, 10, i)}""")
i *= 10
| 588
|
from __future__ import annotations
import math
from collections.abc import Callable
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = 100, ):
SCREAMING_SNAKE_CASE__ =x_start
SCREAMING_SNAKE_CASE__ =fnc(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ =0.0
for _ in range(__UpperCamelCase ):
# Approximates curve as a sequence of linear lines and sums their length
SCREAMING_SNAKE_CASE__ =(x_end - x_start) / steps + xa
SCREAMING_SNAKE_CASE__ =fnc(__UpperCamelCase )
length += math.hypot(xa - xa, fxa - fxa )
# Increment step
SCREAMING_SNAKE_CASE__ =xa
SCREAMING_SNAKE_CASE__ =fxa
return length
if __name__ == "__main__":
def UpperCAmelCase_ ( __UpperCamelCase ):
return math.sin(10 * x )
print("f(x) = sin(10 * x)")
print("The length of the curve from x = -10 to x = 10 is:")
lowerCamelCase_ = 10
while i <= 100000:
print(f"""With {i} steps: {line_length(f, -10, 10, i)}""")
i *= 10
| 588
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowercase : Dict ={
'''configuration_perceiver''': ['''PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PerceiverConfig''', '''PerceiverOnnxConfig'''],
'''tokenization_perceiver''': ['''PerceiverTokenizer'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] =['''PerceiverFeatureExtractor''']
_lowercase : Dict =['''PerceiverImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple =[
'''PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PerceiverForImageClassificationConvProcessing''',
'''PerceiverForImageClassificationFourier''',
'''PerceiverForImageClassificationLearned''',
'''PerceiverForMaskedLM''',
'''PerceiverForMultimodalAutoencoding''',
'''PerceiverForOpticalFlow''',
'''PerceiverForSequenceClassification''',
'''PerceiverLayer''',
'''PerceiverModel''',
'''PerceiverPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
_lowercase : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 305
|
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase : Any =get_tests_dir('''fixtures/test_sentencepiece_with_bytefallback.model''')
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowercase : int = GPTSwaTokenizer
lowercase : Union[str, Any] = False
lowercase : Dict = True
lowercase : int = False
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Tuple:
super().setUp()
# We have a SentencePiece fixture for testing
A : Dict =GPTSwaTokenizer(SCREAMING_SNAKE_CASE__ , eos_token='<unk>' , bos_token='<unk>' , pad_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[Any]:
A : Union[str, Any] ='This is a test'
A : str ='This is a test'
return input_text, output_text
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Tuple:
A : int ='<s>'
A : Optional[Any] =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Any:
A : Dict =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 20_00 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 20_00 )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Dict:
A : Union[str, Any] =GPTSwaTokenizer(SCREAMING_SNAKE_CASE__ )
A : str =tokenizer.tokenize('This is a test' )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [4_65, 2_87, 2_65, 6_31, 8_42] )
A : Dict =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
# fmt: off
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] , )
# fmt: on
A : int =tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60] , )
A : List[str] =tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
# fmt: off
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] )
# fmt: on
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> List[str]:
A : Dict =GPTSwaTokenizer(SCREAMING_SNAKE_CASE__ )
A : Tuple =['This is a test', 'I was born in 92000, and this is falsé.']
A : int =[
[4_65, 2_87, 2_65, 6_31, 8_42],
[2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertListEqual(tokenizer.encode_fast(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
# Test that decode_fast returns the input text
for text, token_ids in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(tokenizer.decode_fast(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Tuple:
A : Optional[int] =[
'<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')',
'Hey there, how are you doing this fine day?',
'This is a text with a trailing spaces followed by a dot .',
'Häj sväjs lillebrör! =)',
'Det är inget fel på Mr. Cool',
]
# fmt: off
A : Any ={'input_ids': [[6_34_23, 5, 68_11, 1_49_54, 2_82, 8_16, 38_21, 6_34_66, 6_34_25, 6_34_62, 18, 6_39_78, 6_78, 3_01, 13_20, 6_34_23, 6_34_55, 6_34_58, 18, 6_39_82, 42_46, 39_40, 19_01, 4_77_89, 55_47, 1_89_94], [1_96_30, 11_00, 6_34_46, 13_42, 6_33, 5_44, 44_88, 5_93, 51_02, 24_16, 6_34_95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [16_52, 4_28, 2_68, 19_36, 5_15, 2_68, 5_85_93, 2_24_13, 91_06, 5_46, 2_68, 3_32_13, 6_39_79, 6_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_51_30, 6_34_50, 9_24, 6_34_49, 22_49, 40_62, 15_58, 3_18, 6_35_04, 2_14_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_09, 3_77, 28_27, 25_59, 3_32, 65_75, 6_34_43, 2_68_01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE__ , model_name='AI-Sweden/gpt-sw3-126m' , sequences=SCREAMING_SNAKE_CASE__ , )
| 305
| 1
|
"""simple docstring"""
from math import asin, atan, cos, radians, sin, sqrt, tan
_UpperCamelCase : List[str] = 6_3_7_8_1_3_7.0
_UpperCamelCase : Optional[int] = 6_3_5_6_7_5_2.3_1_4_2_4_5
_UpperCamelCase : Any = 6_3_7_8_1_3_7
def _SCREAMING_SNAKE_CASE ( __snake_case : float , __snake_case : float , __snake_case : float , __snake_case : float ):
'''simple docstring'''
lowercase = (AXIS_A - AXIS_B) / AXIS_A
lowercase = atan((1 - flattening) * tan(radians(__snake_case ) ) )
lowercase = atan((1 - flattening) * tan(radians(__snake_case ) ) )
lowercase = radians(__snake_case )
lowercase = radians(__snake_case )
# Equation
lowercase = sin((phi_a - phi_a) / 2 )
lowercase = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
lowercase = sqrt(sin_sq_phi + (cos(__snake_case ) * cos(__snake_case ) * sin_sq_lambda) )
return 2 * RADIUS * asin(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
_UpperCamelCase : Union[str, Any] = None
_UpperCamelCase : str = logging.get_logger(__name__)
_UpperCamelCase : Tuple = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
_UpperCamelCase : str = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'
),
},
}
_UpperCamelCase : str = {
'facebook/nllb-large-en-ro': 1_0_2_4,
'facebook/nllb-200-distilled-600M': 1_0_2_4,
}
# fmt: off
_UpperCamelCase : Tuple = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class a ( a_ ):
UpperCAmelCase_ : int =VOCAB_FILES_NAMES
UpperCAmelCase_ : Dict =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : int =PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Tuple =["input_ids", "attention_mask"]
UpperCAmelCase_ : Any =NllbTokenizer
UpperCAmelCase_ : List[int] =[]
UpperCAmelCase_ : List[int] =[]
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=False , **_lowerCamelCase , ):
# Mask token behave like a normal word, i.e. include the space before it
lowercase = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token
lowercase = legacy_behaviour
super().__init__(
vocab_file=_lowerCamelCase , tokenizer_file=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , src_lang=_lowerCamelCase , tgt_lang=_lowerCamelCase , additional_special_tokens=_lowerCamelCase , legacy_behaviour=_lowerCamelCase , **_lowerCamelCase , )
lowercase = vocab_file
lowercase = False if not self.vocab_file else True
lowercase = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
lowercase = {
lang_code: self.convert_tokens_to_ids(_lowerCamelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowercase = src_lang if src_lang is not None else 'eng_Latn'
lowercase = self.convert_tokens_to_ids(self._src_lang )
lowercase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCamelCase_ ( self ):
return self._src_lang
@src_lang.setter
def UpperCamelCase_ ( self , _lowerCamelCase ):
lowercase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ):
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ):
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
lowercase = src_lang
lowercase = self(_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )
lowercase = self.convert_tokens_to_ids(_lowerCamelCase )
lowercase = tgt_lang_id
return inputs
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase = "eng_Latn" , _lowerCamelCase = None , _lowerCamelCase = "fra_Latn" , **_lowerCamelCase , ):
lowercase = src_lang
lowercase = tgt_lang
return super().prepare_seqaseq_batch(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
def UpperCamelCase_ ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase_ ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase_ ( self , _lowerCamelCase ):
lowercase = self.convert_tokens_to_ids(_lowerCamelCase )
if self.legacy_behaviour:
lowercase = []
lowercase = [self.eos_token_id, self.cur_lang_code]
else:
lowercase = [self.cur_lang_code]
lowercase = [self.eos_token_id]
lowercase = self.convert_ids_to_tokens(self.prefix_tokens )
lowercase = self.convert_ids_to_tokens(self.suffix_tokens )
lowercase = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCamelCase_ ( self , _lowerCamelCase ):
lowercase = self.convert_tokens_to_ids(_lowerCamelCase )
if self.legacy_behaviour:
lowercase = []
lowercase = [self.eos_token_id, self.cur_lang_code]
else:
lowercase = [self.cur_lang_code]
lowercase = [self.eos_token_id]
lowercase = self.convert_ids_to_tokens(self.prefix_tokens )
lowercase = self.convert_ids_to_tokens(self.suffix_tokens )
lowercase = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_lowerCamelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
lowercase = os.path.join(
_lowerCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ):
copyfile(self.vocab_file , _lowerCamelCase )
return (out_vocab_file,)
| 134
| 0
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
a_ :Optional[int] = logging.get_logger(__name__)
a_ :Union[str, Any] = {
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Union[str, Any] = '''marian'''
lowerCamelCase : Tuple = ['''past_key_values''']
lowerCamelCase : Optional[Any] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Optional[Any] , _lowercase : str=5_81_01 , _lowercase : Union[str, Any]=None , _lowercase : Tuple=10_24 , _lowercase : List[Any]=12 , _lowercase : int=40_96 , _lowercase : int=16 , _lowercase : str=12 , _lowercase : List[str]=40_96 , _lowercase : Tuple=16 , _lowercase : List[Any]=0.0 , _lowercase : Any=0.0 , _lowercase : List[Any]=True , _lowercase : Dict=True , _lowercase : Union[str, Any]="gelu" , _lowercase : int=10_24 , _lowercase : Optional[Any]=0.1 , _lowercase : List[Any]=0.0 , _lowercase : Optional[int]=0.0 , _lowercase : str=0.02 , _lowercase : Tuple=5_81_00 , _lowercase : int=False , _lowercase : Any=5_81_00 , _lowercase : Tuple=0 , _lowercase : Tuple=0 , _lowercase : List[Any]=True , **_lowercase : int , ):
SCREAMING_SNAKE_CASE__ : List[str] = vocab_size
SCREAMING_SNAKE_CASE__ : Dict = decoder_vocab_size or vocab_size
SCREAMING_SNAKE_CASE__ : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : int = d_model
SCREAMING_SNAKE_CASE__ : Tuple = encoder_ffn_dim
SCREAMING_SNAKE_CASE__ : int = encoder_layers
SCREAMING_SNAKE_CASE__ : Optional[Any] = encoder_attention_heads
SCREAMING_SNAKE_CASE__ : Dict = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ : Tuple = decoder_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : int = dropout
SCREAMING_SNAKE_CASE__ : Optional[int] = attention_dropout
SCREAMING_SNAKE_CASE__ : List[str] = activation_dropout
SCREAMING_SNAKE_CASE__ : Union[str, Any] = activation_function
SCREAMING_SNAKE_CASE__ : Tuple = init_std
SCREAMING_SNAKE_CASE__ : Optional[Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE__ : List[str] = decoder_layerdrop
SCREAMING_SNAKE_CASE__ : Tuple = use_cache
SCREAMING_SNAKE_CASE__ : str = encoder_layers
SCREAMING_SNAKE_CASE__ : Any = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE__ : str = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=_lowercase , eos_token_id=_lowercase , is_encoder_decoder=_lowercase , decoder_start_token_id=_lowercase , forced_eos_token_id=_lowercase , **_lowercase , )
class lowercase ( _UpperCAmelCase ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def lowercase__ ( self : Tuple ):
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE__ : Any = {0: '''batch'''}
SCREAMING_SNAKE_CASE__ : Optional[Any] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = {0: '''batch''', 1: '''decoder_sequence'''}
SCREAMING_SNAKE_CASE__ : Optional[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(_lowercase , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
SCREAMING_SNAKE_CASE__ : Optional[Any] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self.num_layers
for i in range(_lowercase ):
SCREAMING_SNAKE_CASE__ : int = {0: '''batch''', 2: '''past_sequence + sequence'''}
SCREAMING_SNAKE_CASE__ : Any = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
SCREAMING_SNAKE_CASE__ : int = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def lowercase__ ( self : Tuple ):
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = super().outputs
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = super(_lowercase , self ).outputs
if self.use_past:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = self.num_layers
for i in range(_lowercase ):
SCREAMING_SNAKE_CASE__ : List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
SCREAMING_SNAKE_CASE__ : Any = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def lowercase__ ( self : Optional[Any] , _lowercase : PreTrainedTokenizer , _lowercase : int = -1 , _lowercase : int = -1 , _lowercase : bool = False , _lowercase : Optional[TensorType] = None , ):
SCREAMING_SNAKE_CASE__ : str = self._generate_dummy_inputs_for_encoder_and_decoder(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# Generate decoder inputs
SCREAMING_SNAKE_CASE__ : str = seq_length if not self.use_past else 1
SCREAMING_SNAKE_CASE__ : List[str] = self._generate_dummy_inputs_for_encoder_and_decoder(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ : Dict = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
SCREAMING_SNAKE_CASE__ : Optional[int] = dict(**_lowercase , **_lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = common_inputs['''input_ids'''].shape
SCREAMING_SNAKE_CASE__ : str = common_inputs['''decoder_input_ids'''].shape[1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.num_attention_heads
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = decoder_seq_length + 3
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
SCREAMING_SNAKE_CASE__ : str = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(_lowercase , _lowercase )] , dim=1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.num_layers
SCREAMING_SNAKE_CASE__ : str = min(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = max(_lowercase , _lowercase ) - min_num_layers
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(_lowercase ):
common_inputs["past_key_values"].append(
(
torch.zeros(_lowercase ),
torch.zeros(_lowercase ),
torch.zeros(_lowercase ),
torch.zeros(_lowercase ),
) )
# TODO: test this.
SCREAMING_SNAKE_CASE__ : List[Any] = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(_lowercase , _lowercase ):
common_inputs["past_key_values"].append((torch.zeros(_lowercase ), torch.zeros(_lowercase )) )
return common_inputs
def lowercase__ ( self : int , _lowercase : PreTrainedTokenizer , _lowercase : int = -1 , _lowercase : int = -1 , _lowercase : bool = False , _lowercase : Optional[TensorType] = None , ):
SCREAMING_SNAKE_CASE__ : List[str] = self._generate_dummy_inputs_for_encoder_and_decoder(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE__ : Any = seqlen + 2
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = self.num_layers
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.num_attention_heads
SCREAMING_SNAKE_CASE__ : str = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE__ : Any = common_inputs['''attention_mask'''].dtype
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(_lowercase , _lowercase , dtype=_lowercase )] , dim=1 )
SCREAMING_SNAKE_CASE__ : List[str] = [
(torch.zeros(_lowercase ), torch.zeros(_lowercase )) for _ in range(_lowercase )
]
return common_inputs
def lowercase__ ( self : Optional[Any] , _lowercase : PreTrainedTokenizer , _lowercase : int = -1 , _lowercase : int = -1 , _lowercase : bool = False , _lowercase : Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE__ : str = compute_effective_axis_dimension(
_lowercase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE__ : str = tokenizer.num_special_tokens_to_add(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = compute_effective_axis_dimension(
_lowercase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowercase )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE__ : List[Any] = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = dict(tokenizer(_lowercase , return_tensors=_lowercase ) )
return common_inputs
def lowercase__ ( self : List[str] , _lowercase : PreTrainedTokenizer , _lowercase : int = -1 , _lowercase : int = -1 , _lowercase : bool = False , _lowercase : Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE__ : Dict = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_lowercase , batch_size=_lowercase , seq_length=_lowercase , is_pair=_lowercase , framework=_lowercase )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = self._generate_dummy_inputs_for_causal_lm(
_lowercase , batch_size=_lowercase , seq_length=_lowercase , is_pair=_lowercase , framework=_lowercase )
return common_inputs
def lowercase__ ( self : str , _lowercase : Union[str, Any] , _lowercase : Any , _lowercase : Optional[int] , _lowercase : Union[str, Any] ):
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = super()._flatten_past_key_values_(_lowercase , _lowercase , _lowercase , _lowercase )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = super(_lowercase , self )._flatten_past_key_values_(
_lowercase , _lowercase , _lowercase , _lowercase )
@property
def lowercase__ ( self : Dict ):
return 1E-4
| 35
|
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[str] ,**lowercase__ : Tuple ):
super().__init__(**lowercase__ )
if self.framework == "tf":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
requires_backends(self ,'''vision''' )
self.check_model_type(lowercase__ )
def __call__( self : List[str] ,lowercase__ : Union[str, "Image.Image", List[Dict[str, Any]]] ,lowercase__ : Union[str, List[str]] = None ,**lowercase__ : str ,):
if "text_queries" in kwargs:
__lowercase = kwargs.pop('''text_queries''' )
if isinstance(lowercase__ ,(str, Image.Image) ):
__lowercase = {'''image''': image, '''candidate_labels''': candidate_labels}
else:
__lowercase = image
__lowercase = super().__call__(lowercase__ ,**lowercase__ )
return results
def SCREAMING_SNAKE_CASE ( self : int ,**lowercase__ : List[Any] ):
__lowercase = {}
if "threshold" in kwargs:
__lowercase = kwargs['''threshold''']
if "top_k" in kwargs:
__lowercase = kwargs['''top_k''']
return {}, {}, postprocess_params
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Optional[Any] ):
__lowercase = load_image(inputs['''image'''] )
__lowercase = inputs['''candidate_labels''']
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = candidate_labels.split(''',''' )
__lowercase = torch.tensor([[image.height, image.width]] ,dtype=torch.intaa )
for i, candidate_label in enumerate(lowercase__ ):
__lowercase = self.tokenizer(lowercase__ ,return_tensors=self.framework )
__lowercase = self.image_processor(lowercase__ ,return_tensors=self.framework )
yield {
"is_last": i == len(lowercase__ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ):
__lowercase = model_inputs.pop('''target_size''' )
__lowercase = model_inputs.pop('''candidate_label''' )
__lowercase = model_inputs.pop('''is_last''' )
__lowercase = self.model(**lowercase__ )
__lowercase = {'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs}
return model_outputs
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : List[Any]=0.1 ,lowercase__ : List[str]=None ):
__lowercase = []
for model_output in model_outputs:
__lowercase = model_output['''candidate_label''']
__lowercase = BaseModelOutput(lowercase__ )
__lowercase = self.image_processor.post_process_object_detection(
outputs=lowercase__ ,threshold=lowercase__ ,target_sizes=model_output['''target_size'''] )[0]
for index in outputs["scores"].nonzero():
__lowercase = outputs['''scores'''][index].item()
__lowercase = self._get_bounding_box(outputs['''boxes'''][index][0] )
__lowercase = {'''score''': score, '''label''': label, '''box''': box}
results.append(lowercase__ )
__lowercase = sorted(lowercase__ ,key=lambda lowercase__ : x["score"] ,reverse=lowercase__ )
if top_k:
__lowercase = results[:top_k]
return results
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : "torch.Tensor" ):
if self.framework != "pt":
raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' )
__lowercase , __lowercase , __lowercase , __lowercase = box.int().tolist()
__lowercase = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 41
| 0
|
'''simple docstring'''
def __A ( UpperCAmelCase ,UpperCAmelCase ) -> float:
'''simple docstring'''
return base * power(UpperCAmelCase ,(exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print("""Raise base to the power of exponent using recursion...""")
lowerCAmelCase_ : List[Any] = int(input("""Enter the base: """).strip())
lowerCAmelCase_ : Any = int(input("""Enter the exponent: """).strip())
lowerCAmelCase_ : Union[str, Any] = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
lowerCAmelCase_ : Union[str, Any] = 1 / result
print(f"""{base} to the power of {exponent} is {result}""")
| 204
|
'''simple docstring'''
def __A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase=False ) -> Optional[int]:
'''simple docstring'''
if isinstance(UpperCAmelCase ,UpperCAmelCase ) and isinstance(UpperCAmelCase ,UpperCAmelCase ):
_UpperCamelCase : Any = len(set_a.intersection(UpperCAmelCase ) )
if alternative_union:
_UpperCamelCase : List[str] = len(UpperCAmelCase ) + len(UpperCAmelCase )
else:
_UpperCamelCase : List[str] = len(set_a.union(UpperCAmelCase ) )
return intersection / union
if isinstance(UpperCAmelCase ,(list, tuple) ) and isinstance(UpperCAmelCase ,(list, tuple) ):
_UpperCamelCase : List[Any] = [element for element in set_a if element in set_b]
if alternative_union:
_UpperCamelCase : str = len(UpperCAmelCase ) + len(UpperCAmelCase )
return len(UpperCAmelCase ) / union
else:
_UpperCamelCase : int = set_a + [element for element in set_b if element not in set_a]
return len(UpperCAmelCase ) / len(UpperCAmelCase )
return len(UpperCAmelCase ) / len(UpperCAmelCase )
return None
if __name__ == "__main__":
lowerCAmelCase_ : List[str] = {"""a""", """b""", """c""", """d""", """e"""}
lowerCAmelCase_ : Dict = {"""c""", """d""", """e""", """f""", """h""", """i"""}
print(jaccard_similarity(set_a, set_b))
| 204
| 1
|
'''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def lowerCAmelCase (__A = "laptop"):
"""simple docstring"""
_a = F'''https://www.amazon.in/laptop/s?k={product}'''
_a = {
'''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''',
'''Accept-Language''': '''en-US, en;q=0.5''',
}
_a = BeautifulSoup(requests.get(__A , headers=__A).text)
# Initialize a Pandas dataframe with the column titles
_a = DataFrame(
columns=[
'''Product Title''',
'''Product Link''',
'''Current Price of the product''',
'''Product Rating''',
'''MRP of the product''',
'''Discount''',
])
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''}) , ):
try:
_a = item.ha.text
_a = '''https://www.amazon.in/''' + item.ha.a['''href''']
_a = item.find('''span''' , attrs={'''class''': '''a-offscreen'''}).text
try:
_a = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''}).text
except AttributeError:
_a = '''Not available'''
try:
_a = (
'''₹'''
+ item.find(
'''span''' , attrs={'''class''': '''a-price a-text-price'''}).text.split('''₹''')[1]
)
except AttributeError:
_a = ''''''
try:
_a = float(
(
(
float(product_mrp.strip('''₹''').replace(''',''' , ''''''))
- float(product_price.strip('''₹''').replace(''',''' , ''''''))
)
/ float(product_mrp.strip('''₹''').replace(''',''' , ''''''))
)
* 100)
except ValueError:
_a = float('''nan''')
except AttributeError:
pass
_a = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
_a = ''' '''
_a = ''' '''
data_frame.index += 1
return data_frame
if __name__ == "__main__":
lowercase_ = "headphones"
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 11
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class __A ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.02 , A=4 , ) -> List[str]:
"""simple docstring"""
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_attention_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_choices
def a__ (self ) -> str:
"""simple docstring"""
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_attention_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
_a , _a , _a , _a = config_and_inputs
_a = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class __A ( A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
_a = FlaxAlbertModelTester(self )
@slow
def a__ (self ) -> str:
"""simple docstring"""
for model_class_name in self.all_model_classes:
_a = model_class_name.from_pretrained('''albert-base-v2''' )
_a = model(np.ones((1, 1) ) )
self.assertIsNotNone(A )
@require_flax
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ) -> Dict:
"""simple docstring"""
_a = FlaxAlbertModel.from_pretrained('''albert-base-v2''' )
_a = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
_a = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_a = model(A , attention_mask=A )[0]
_a = (1, 11, 768)
self.assertEqual(output.shape , A )
_a = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , A , atol=1E-4 ) )
| 11
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
A: Optional[int] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: int = ["MLukeTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
A: Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7
|
'''simple docstring'''
def _UpperCAmelCase ( a : list ) -> list:
"""simple docstring"""
for i in range(len(a ) - 1 , 0 , -1 ):
lowercase_ : Any = False
for j in range(a , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
lowercase_ , lowercase_ : Any = unsorted[j - 1], unsorted[j]
lowercase_ : int = True
for j in range(a ):
if unsorted[j] > unsorted[j + 1]:
lowercase_ , lowercase_ : Union[str, Any] = unsorted[j + 1], unsorted[j]
lowercase_ : Optional[Any] = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
A: Union[str, Any] = input("Enter numbers separated by a comma:\n").strip()
A: Tuple = [int(item) for item in user_input.split(",")]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 7
| 1
|
'''simple docstring'''
def a ( lowerCamelCase__ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase__ , lowerCamelCase__ ) and number_of_steps > 0
), f'number_of_steps needs to be positive integer, your input {number_of_steps}'
if number_of_steps == 1:
return 1
A_, A_ : List[str] = 1, 1
for _ in range(number_of_steps - 1 ):
A_, A_ : Optional[int] = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667
|
'''simple docstring'''
from __future__ import annotations
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if partitions <= 0:
raise ValueError("""partitions must be a positive number!""" )
if partitions > number_of_bytes:
raise ValueError("""partitions can not > number_of_bytes!""" )
A_ : int = number_of_bytes // partitions
A_ : Union[str, Any] = []
for i in range(lowerCamelCase__ ):
A_ : Dict = i * bytes_per_partition + 1
A_ : Tuple = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667
| 1
|
'''simple docstring'''
UpperCAmelCase : Dict = [0, 2, 4, 6, 8]
UpperCAmelCase : List[str] = [1, 3, 5, 7, 9]
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
_snake_case : Union[str, Any] = 0
for digit in range(10 ):
_snake_case : Optional[int] = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , lowerCAmelCase_ , lowerCAmelCase_ )
return result
_snake_case : List[Any] = 0
for digita in range(10 ):
_snake_case : Tuple = digita
if (remainder + digita) % 2 == 0:
_snake_case : str = ODD_DIGITS
else:
_snake_case : Optional[Any] = EVEN_DIGITS
for digita in other_parity_digits:
_snake_case : Union[str, Any] = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , lowerCAmelCase_ , lowerCAmelCase_ , )
return result
def _a ( lowerCAmelCase_ = 9 ):
"""simple docstring"""
_snake_case : List[Any] = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(lowerCAmelCase_ , 0 , [0] * length , lowerCAmelCase_ )
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 47
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
UpperCAmelCase : Any = TypeVar('T')
UpperCAmelCase : str = TypeVar('U')
class lowerCamelCase (Generic[T, U] ):
def __init__( self , lowercase__ , lowercase__ ) -> List[Any]:
"""simple docstring"""
_snake_case : str = key
_snake_case : Optional[int] = val
_snake_case : DoubleLinkedListNode[T, U] | None = None
_snake_case : DoubleLinkedListNode[T, U] | None = None
def __repr__( self ) -> str:
"""simple docstring"""
return (
F'''Node: key: {self.key}, val: {self.val}, '''
F'''has next: {bool(self.next )}, has prev: {bool(self.prev )}'''
)
class lowerCamelCase (Generic[T, U] ):
def __init__( self ) -> None:
"""simple docstring"""
_snake_case : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowercase__ , lowercase__ )
_snake_case : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowercase__ , lowercase__ )
_snake_case , _snake_case : Union[str, Any] = self.rear, self.head
def __repr__( self ) -> str:
"""simple docstring"""
_snake_case : List[Any] = ['''DoubleLinkedList''']
_snake_case : str = self.head
while node.next is not None:
rep.append(str(lowercase__ ) )
_snake_case : List[str] = node.next
rep.append(str(self.rear ) )
return ",\n ".join(lowercase__ )
def UpperCAmelCase_ ( self , lowercase__ ) -> None:
"""simple docstring"""
_snake_case : Tuple = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
_snake_case : Union[str, Any] = node
_snake_case : Optional[Any] = previous
_snake_case : int = node
_snake_case : Union[str, Any] = self.rear
def UpperCAmelCase_ ( self , lowercase__ ) -> DoubleLinkedListNode[T, U] | None:
"""simple docstring"""
if node.prev is None or node.next is None:
return None
_snake_case : Optional[int] = node.next
_snake_case : Any = node.prev
_snake_case : List[str] = None
_snake_case : Optional[int] = None
return node
class lowerCamelCase (Generic[T, U] ):
_lowercase : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self , lowercase__ ) -> Union[str, Any]:
"""simple docstring"""
_snake_case : DoubleLinkedList[T, U] = DoubleLinkedList()
_snake_case : Union[str, Any] = capacity
_snake_case : int = 0
_snake_case : Dict = 0
_snake_case : Union[str, Any] = 0
_snake_case : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self ) -> str:
"""simple docstring"""
return (
F'''CacheInfo(hits={self.hits}, misses={self.miss}, '''
F'''capacity={self.capacity}, current size={self.num_keys})'''
)
def __contains__( self , lowercase__ ) -> bool:
"""simple docstring"""
return key in self.cache
def UpperCAmelCase_ ( self , lowercase__ ) -> U | None:
"""simple docstring"""
if key in self.cache:
self.hits += 1
_snake_case : DoubleLinkedListNode[T, U] = self.cache[key]
_snake_case : Tuple = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(lowercase__ )
return node.val
self.miss += 1
return None
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> None:
"""simple docstring"""
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
_snake_case : Dict = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(lowercase__ ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
_snake_case : Optional[int] = DoubleLinkedListNode(lowercase__ , lowercase__ )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
_snake_case : Optional[Any] = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
_snake_case : Optional[Any] = value
self.list.add(lowercase__ )
@classmethod
def UpperCAmelCase_ ( cls , lowercase__ = 128 ) -> Callable[[Callable[[T], U]], Callable[..., U]]:
"""simple docstring"""
def cache_decorator_inner(lowercase__ ) -> Callable[..., U]:
def cache_decorator_wrapper(*lowercase__ ) -> U:
if func not in cls.decorator_function_to_instance_map:
_snake_case : Optional[Any] = LRUCache(lowercase__ )
_snake_case : Union[str, Any] = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
_snake_case : Tuple = func(*lowercase__ )
cls.decorator_function_to_instance_map[func].put(args[0] , lowercase__ )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(lowercase__ , '''cache_info''' , lowercase__ ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 47
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case : Optional[int] = {
"configuration_blip_2": [
"BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Blip2Config",
"Blip2QFormerConfig",
"Blip2VisionConfig",
],
"processing_blip_2": ["Blip2Processor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[Any] = [
"BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Blip2Model",
"Blip2QFormerModel",
"Blip2PreTrainedModel",
"Blip2ForConditionalGeneration",
"Blip2VisionModel",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
_snake_case : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 441
|
import numpy as np
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self :List[Any] ) ->Any:
lowercase = (0, 0)
lowercase = None
lowercase = 0
lowercase = 0
lowercase = 0
def __eq__( self :Optional[int] , lowerCAmelCase__ :Any ) ->Optional[int]:
return self.position == cell.position
def SCREAMING_SNAKE_CASE( self :Optional[Any] ) ->Optional[Any]:
print(self.position )
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self :Optional[Any] , lowerCAmelCase__ :Tuple=(5, 5) ) ->str:
lowercase = np.zeros(lowerCAmelCase__ )
lowercase = world_size[0]
lowercase = world_size[1]
def SCREAMING_SNAKE_CASE( self :List[Any] ) ->Optional[int]:
print(self.w )
def SCREAMING_SNAKE_CASE( self :Optional[int] , lowerCAmelCase__ :Optional[Any] ) ->Optional[Any]:
lowercase = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
lowercase = cell.position[0]
lowercase = cell.position[1]
lowercase = []
for n in neughbour_cord:
lowercase = current_x + n[0]
lowercase = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
lowercase = Cell()
lowercase = (x, y)
lowercase = cell
neighbours.append(lowerCAmelCase__ )
return neighbours
def __snake_case ( __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
lowercase = []
lowercase = []
_open.append(__magic_name__ )
while _open:
lowercase = np.argmin([n.f for n in _open] )
lowercase = _open[min_f]
_closed.append(_open.pop(__magic_name__ ) )
if current == goal:
break
for n in world.get_neigbours(__magic_name__ ):
for c in _closed:
if c == n:
continue
lowercase = current.g + 1
lowercase , lowercase = n.position
lowercase , lowercase = goal.position
lowercase = (ya - ya) ** 2 + (xa - xa) ** 2
lowercase = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(__magic_name__ )
lowercase = []
while current.parent is not None:
path.append(current.position )
lowercase = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
_snake_case : Optional[int] = Gridworld()
# Start position and goal
_snake_case : Dict = Cell()
_snake_case : List[str] = (0, 0)
_snake_case : Union[str, Any] = Cell()
_snake_case : List[Any] = (4, 4)
print(F"path from {start.position} to {goal.position}")
_snake_case : str = astar(world, start, goal)
# Just for visual reasons.
for i in s:
_snake_case : List[Any] = 1
print(world.w)
| 441
| 1
|
'''simple docstring'''
import numpy as np
import qiskit
def _SCREAMING_SNAKE_CASE ( UpperCamelCase = 8 , UpperCamelCase = None ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = np.random.default_rng(seed=UpperCamelCase )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
lowerCAmelCase__ : Dict = 6 * key_len
# Measurement basis for Alice's qubits.
lowerCAmelCase__ : str = rng.integers(2 , size=UpperCamelCase )
# The set of states Alice will prepare.
lowerCAmelCase__ : List[str] = rng.integers(2 , size=UpperCamelCase )
# Measurement basis for Bob's qubits.
lowerCAmelCase__ : Any = rng.integers(2 , size=UpperCamelCase )
# Quantum Circuit to simulate BB84
lowerCAmelCase__ : Any = qiskit.QuantumCircuit(UpperCamelCase , name="""BB84""" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(UpperCamelCase ):
if alice_state[index] == 1:
bbaa_circ.x(UpperCamelCase )
if alice_basis[index] == 1:
bbaa_circ.h(UpperCamelCase )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(UpperCamelCase ):
if bob_basis[index] == 1:
bbaa_circ.h(UpperCamelCase )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
lowerCAmelCase__ : Tuple = qiskit.Aer.get_backend("""aer_simulator""" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
lowerCAmelCase__ : Tuple = qiskit.execute(UpperCamelCase , UpperCamelCase , shots=1 , seed_simulator=UpperCamelCase )
# Returns the result of measurement.
lowerCAmelCase__ : Optional[int] = job.result().get_counts(UpperCamelCase ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
lowerCAmelCase__ : Dict = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
UpperCamelCase , UpperCamelCase , UpperCamelCase )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
lowerCAmelCase__ : Tuple = gen_key[:key_len] if len(UpperCamelCase ) >= key_len else gen_key.ljust(UpperCamelCase , """0""" )
return key
if __name__ == "__main__":
print(F"""The generated key is : {bbaa(8, seed=0)}""")
from doctest import testmod
testmod()
| 714
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 160
| 0
|
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase ) ->Any:
"""simple docstring"""
__magic_name__ : List[Any] = [False] * len(snake_case_ )
__magic_name__ : Union[str, Any] = []
queue.append(snake_case_ )
__magic_name__ : Optional[int] = True
while queue:
__magic_name__ : List[Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(snake_case_ )
__magic_name__ : Tuple = True
__magic_name__ : str = u
return visited[t]
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase ) ->Dict:
"""simple docstring"""
__magic_name__ : str = [-1] * (len(snake_case_ ))
__magic_name__ : Tuple = 0
while bfs(snake_case_, snake_case_, snake_case_, snake_case_ ):
__magic_name__ : Optional[int] = float('''Inf''' )
__magic_name__ : Any = sink
while s != source:
# Find the minimum value in select path
__magic_name__ : str = min(snake_case_, graph[parent[s]][s] )
__magic_name__ : str = parent[s]
max_flow += path_flow
__magic_name__ : int = sink
while v != source:
__magic_name__ : str = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__magic_name__ : List[Any] = parent[v]
return max_flow
lowercase_ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
lowercase_, lowercase_ = 0, 5
print(ford_fulkerson(graph, source, sink))
| 154
|
"""simple docstring"""
def lowerCAmelCase_ ( snake_case_ : list[list[int]] , snake_case_ : int , snake_case_ : int , snake_case_ : set ) ->int:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =len(snake_case_ ), len(grid[0] )
if (
min(snake_case_ , snake_case_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
lowerCamelCase__ : List[Any] =0
count += depth_first_search(snake_case_ , row + 1 , snake_case_ , snake_case_ )
count += depth_first_search(snake_case_ , row - 1 , snake_case_ , snake_case_ )
count += depth_first_search(snake_case_ , snake_case_ , col + 1 , snake_case_ )
count += depth_first_search(snake_case_ , snake_case_ , col - 1 , snake_case_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 174
| 0
|
'''simple docstring'''
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'''--original_config_file''',
default=None,
type=str,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--scheduler_type''',
default='''pndm''',
type=str,
help='''Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']''',
)
parser.add_argument(
'''--pipeline_type''',
default=None,
type=str,
help=(
'''The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''''
'''. If `None` pipeline will be automatically inferred.'''
),
)
parser.add_argument(
'''--image_size''',
default=None,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--prediction_type''',
default=None,
type=str,
help=(
'''The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'''
''' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
parser.add_argument(
'''--stable_unclip''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.''',
)
parser.add_argument(
'''--stable_unclip_prior''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.''',
)
parser.add_argument(
'''--clip_stats_path''',
type=str,
help='''Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.''',
required=False,
)
parser.add_argument(
'''--controlnet''', action='''store_true''', default=None, help='''Set flag if this is a controlnet checkpoint.'''
)
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--vae_path''',
type=str,
default=None,
required=False,
help='''Set to a path, hub id to an already converted vae to not convert it again.''',
)
_lowerCAmelCase = parser.parse_args()
_lowerCAmelCase = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 399
|
'''simple docstring'''
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class A :
'''simple docstring'''
def __init__(self , _UpperCAmelCase , _UpperCAmelCase=1_3 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=9_9 , _UpperCAmelCase=3_2 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=3_7 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=5_0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=None , ) -> Any:
__UpperCamelCase : Union[str, Any] = parent
__UpperCamelCase : Dict = batch_size
__UpperCamelCase : Dict = seq_length
__UpperCamelCase : Optional[int] = is_training
__UpperCamelCase : Optional[Any] = use_input_mask
__UpperCamelCase : Optional[Any] = vocab_size
__UpperCamelCase : Tuple = hidden_size
__UpperCamelCase : Optional[Any] = num_hidden_layers
__UpperCamelCase : Optional[Any] = num_attention_heads
__UpperCamelCase : Union[str, Any] = intermediate_size
__UpperCamelCase : List[str] = hidden_act
__UpperCamelCase : Optional[int] = hidden_dropout_prob
__UpperCamelCase : Any = attention_probs_dropout_prob
__UpperCamelCase : Dict = max_position_embeddings
__UpperCamelCase : List[str] = initializer_range
__UpperCamelCase : Union[str, Any] = use_labels
__UpperCamelCase : Optional[Any] = scope
def a_ (self ) -> Tuple:
__UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase : Any = None
if self.use_input_mask:
__UpperCamelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
__UpperCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase : Any = self.get_config()
return config, input_ids, input_mask, token_labels
def a_ (self ) -> Tuple:
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
def a_ (self ) -> Dict:
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) : Any = self.prepare_config_and_inputs()
__UpperCamelCase : int = True
__UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , ) -> Optional[int]:
__UpperCamelCase : Union[str, Any] = BertGenerationEncoder(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__UpperCamelCase : int = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )
__UpperCamelCase : List[Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , ) -> Any:
__UpperCamelCase : Any = True
__UpperCamelCase : Optional[Any] = BertGenerationEncoder(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__UpperCamelCase : Tuple = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , )
__UpperCamelCase : Any = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , ) -> Optional[int]:
__UpperCamelCase : Optional[int] = True
__UpperCamelCase : Optional[int] = True
__UpperCamelCase : Dict = BertGenerationDecoder(config=_UpperCAmelCase ).to(_UpperCAmelCase ).eval()
# first forward pass
__UpperCamelCase : Tuple = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase , )
__UpperCamelCase : Optional[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__UpperCamelCase : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCamelCase : str = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__UpperCamelCase : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCamelCase : Any = torch.cat([input_mask, next_mask] , dim=-1 )
__UpperCamelCase : Any = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , )["hidden_states"][0]
__UpperCamelCase : str = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , )["hidden_states"][0]
# select random slice
__UpperCamelCase : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCamelCase : int = output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCamelCase : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ) )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase , ) -> Optional[Any]:
__UpperCamelCase : List[Any] = BertGenerationDecoder(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__UpperCamelCase : List[str] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a_ (self ) -> Dict:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : str = self.prepare_config_and_inputs()
__UpperCamelCase : Any = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
A = (BertGenerationDecoder,) if is_torch_available() else ()
A = (
{"feature-extraction": BertGenerationEncoder, "text-generation": BertGenerationDecoder}
if is_torch_available()
else {}
)
def a_ (self ) -> Tuple:
__UpperCamelCase : Optional[Any] = BertGenerationEncoderTester(self )
__UpperCamelCase : Optional[Any] = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=3_7 )
def a_ (self ) -> List[Any]:
self.config_tester.run_common_tests()
def a_ (self ) -> List[str]:
__UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def a_ (self ) -> Union[str, Any]:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
__UpperCamelCase : List[Any] = "bert"
self.model_tester.create_and_check_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def a_ (self ) -> Any:
__UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_UpperCAmelCase )
def a_ (self ) -> Optional[int]:
__UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*_UpperCAmelCase )
def a_ (self ) -> Tuple:
# This regression test was failing with PyTorch < 1.3
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
__UpperCamelCase : Optional[int] = None
self.model_tester.create_and_check_model_as_decoder(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
def a_ (self ) -> Dict:
__UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*_UpperCAmelCase )
@slow
def a_ (self ) -> int:
__UpperCamelCase : Dict = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
self.assertIsNotNone(_UpperCAmelCase )
@require_torch
class A ( unittest.TestCase ):
'''simple docstring'''
@slow
def a_ (self ) -> Tuple:
__UpperCamelCase : List[str] = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
__UpperCamelCase : List[str] = torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] )
with torch.no_grad():
__UpperCamelCase : Any = model(_UpperCAmelCase )[0]
__UpperCamelCase : List[Any] = torch.Size([1, 8, 1_0_2_4] )
self.assertEqual(output.shape , _UpperCAmelCase )
__UpperCamelCase : List[Any] = torch.tensor(
[[[0.1_775, 0.0_083, -0.0_321], [1.6_002, 0.1_287, 0.3_912], [2.1_473, 0.5_791, 0.6_066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1E-4 ) )
@require_torch
class A ( unittest.TestCase ):
'''simple docstring'''
@slow
def a_ (self ) -> Tuple:
__UpperCamelCase : Any = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
__UpperCamelCase : str = torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] )
with torch.no_grad():
__UpperCamelCase : Tuple = model(_UpperCAmelCase )[0]
__UpperCamelCase : Tuple = torch.Size([1, 8, 5_0_3_5_8] )
self.assertEqual(output.shape , _UpperCAmelCase )
__UpperCamelCase : int = torch.tensor(
[[[-0.5_788, -2.5_994, -3.7_054], [0.0_438, 4.7_997, 1.8_795], [1.5_862, 6.6_409, 4.4_638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1E-4 ) )
| 399
| 1
|
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : float , _lowerCamelCase : float )-> float:
'''simple docstring'''
return round(float(moles / volume ) * nfactor )
def _UpperCamelCase (_lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float )-> float:
'''simple docstring'''
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def _UpperCamelCase (_lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float )-> float:
'''simple docstring'''
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def _UpperCamelCase (_lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float )-> float:
'''simple docstring'''
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24
|
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int = 1_0_0_0 ):
snake_case_ ,snake_case_ : List[str] = 1, 1
snake_case_ : List[str] = 2
while True:
snake_case_ : Tuple = 0
snake_case_ : Union[str, Any] = fa + fa
snake_case_ ,snake_case_ : str = fa, f
index += 1
for _ in str(lowerCAmelCase_ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 666
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ) ->Optional[Any]:
if attention_mask is None:
a__: List[Any] = tf.cast(tf.math.not_equal(_SCREAMING_SNAKE_CASE , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class __snake_case :
a__ = OPTConfig
a__ = {}
a__ = """gelu"""
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=False , lowercase=99 , lowercase=16 , lowercase=2 , lowercase=4 , lowercase=4 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=20 , lowercase=2 , lowercase=1 , lowercase=0 , lowercase=16 , lowercase=16 , ) -> Optional[Any]:
'''simple docstring'''
a__: Any = parent
a__: Any = batch_size
a__: Tuple = seq_length
a__: List[str] = is_training
a__: int = use_labels
a__: Union[str, Any] = vocab_size
a__: Optional[Any] = hidden_size
a__: Union[str, Any] = num_hidden_layers
a__: str = num_attention_heads
a__: Optional[int] = intermediate_size
a__: List[str] = hidden_act
a__: Any = hidden_dropout_prob
a__: str = attention_probs_dropout_prob
a__: List[Any] = max_position_embeddings
a__: Optional[int] = eos_token_id
a__: Optional[Any] = pad_token_id
a__: str = bos_token_id
a__: int = embed_dim
a__: Optional[int] = word_embed_proj_dim
a__: Optional[int] = False
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: Any = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size)
a__: Any = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1)
a__: List[Any] = tf.concat([input_ids, eos_tensor] , axis=1)
a__: List[str] = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowercase , **self.config_updates , )
a__: Tuple = prepare_opt_inputs_dict(lowercase , lowercase)
return config, inputs_dict
def lowerCamelCase_ ( self , lowercase , lowercase) -> Union[str, Any]:
'''simple docstring'''
a__: Any = TFOPTModel(config=lowercase)
a__: Tuple = inputs_dict['input_ids']
a__: str = input_ids[:1, :]
a__: Dict = inputs_dict['attention_mask'][:1, :]
a__: Optional[Any] = 1
# first forward pass
a__: int = model(lowercase , attention_mask=lowercase , use_cache=lowercase)
a__: Any = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
a__: List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size)
a__: Union[str, Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta)
# append to next input_ids and
a__: Optional[int] = tf.concat([input_ids, next_tokens] , axis=-1)
a__: Tuple = tf.concat([attention_mask, next_attn_mask] , axis=-1)
a__: int = model(lowercase , attention_mask=lowercase)[0]
a__: str = model(lowercase , attention_mask=lowercase , past_key_values=lowercase)[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1])
# select random slice
a__: Dict = int(ids_tensor((1,) , output_from_past.shape[-1]))
a__: str = output_from_no_past[:, -3:, random_slice_idx]
a__: List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase , lowercase , rtol=1e-3)
@require_tf
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
a__ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
a__ = (TFOPTForCausalLM,) if is_tf_available() else ()
a__ = (
{"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {}
)
a__ = False
a__ = False
a__ = False
a__ = 10
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
a__: List[Any] = TFOPTModelTester(self)
a__: Optional[Any] = ConfigTester(self , config_class=lowercase)
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase)
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
a__: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowercase , lowercase):
if hasattr(lowercase , 'weight'):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowercase , 'weight'):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
a__: Optional[Any] = model_class(config=lowercase)
a__: Any = _get_word_embedding_weight(lowercase , model.get_input_embeddings())
a__: int = _get_word_embedding_weight(lowercase , model.get_output_embeddings())
# reshape the embeddings
model.resize_token_embeddings(lowercase)
a__: int = _get_word_embedding_weight(lowercase , model.get_input_embeddings())
a__: List[Any] = _get_word_embedding_weight(lowercase , model.get_output_embeddings())
# check that the resized embeddings size matches the desired size.
a__: Any = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , lowercase)
# check that weights remain the same after resizing
a__: Optional[int] = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value()):
if tf.math.reduce_sum(tf.math.abs(pa - pa)) > 0:
a__: Optional[Any] = False
self.assertTrue(lowercase)
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , lowercase)
a__: int = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value()):
if tf.math.reduce_sum(tf.math.abs(pa - pa)) > 0:
a__: List[Any] = False
self.assertTrue(lowercase)
def __a ( _SCREAMING_SNAKE_CASE ) ->int:
return tf.constant(_SCREAMING_SNAKE_CASE , dtype=tf.intaa )
@require_tf
class __snake_case ( unittest.TestCase ):
a__ = 99
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: List[Any] = tf.ones((4, 1) , dtype=tf.intaa) * 2
a__: int = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3) + 3, eos_column_vector] , axis=1)
a__: List[str] = input_ids.shape[0]
a__: Any = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class __snake_case ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: Optional[Any] = TFOPTModel.from_pretrained('facebook/opt-350m')
a__: Any = _long_tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]])
a__: Optional[int] = tf.not_equal(lowercase , model.config.pad_token_id)
with tf.GradientTape():
a__: Tuple = model(input_ids=lowercase , attention_mask=lowercase).last_hidden_state
a__: Any = (1, 11, 5_12)
self.assertEqual(output.shape , lowercase)
a__: Optional[int] = tf.constant(
[[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]])
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase , atol=4e-3))
a__: Optional[Any] = tf.function(lowercase , jit_compile=lowercase)
a__: int = xla_generate(lowercase , lowercase)[0]
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase , atol=4e-2))
@require_tf
@slow
class __snake_case ( unittest.TestCase ):
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
super().setUp()
a__: Union[str, Any] = 'facebook/opt-350m'
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
a__: Tuple = TFOPTForCausalLM.from_pretrained(self.path_model)
a__: List[str] = GPTaTokenizer.from_pretrained(self.path_model)
a__: List[str] = [
'Today is a beautiful day and I want to',
'In the city of',
'Paris is the capital of France and',
'Computers and mobile phones have taken',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
a__: Optional[int] = tokenizer(lowercase , return_tensors='tf' , padding=lowercase , add_special_tokens=lowercase)
a__: List[str] = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask)[0] , axis=-1)
a__: int = tf.constant(
[
[1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670],
[-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822],
[0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703],
[6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477],
])
self.assertTrue(np.allclose(lowercase , lowercase , atol=1e-4))
a__: List[Any] = tf.function(lowercase , jit_compile=lowercase)
a__: List[str] = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask)[0] , axis=-1)
self.assertTrue(np.allclose(lowercase , lowercase , atol=1e-4))
@require_tf
@slow
class __snake_case ( unittest.TestCase ):
@property
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def lowerCamelCase_ ( self) -> Dict:
'''simple docstring'''
a__: Union[str, Any] = 'facebook/opt-125m'
a__: int = [
'Today is a beautiful day and I want to',
'In the city of New York, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
a__: Optional[Any] = []
a__: List[str] = GPTaTokenizer.from_pretrained(lowercase)
a__: Optional[int] = TFOPTForCausalLM.from_pretrained(lowercase)
for prompt in self.prompts:
a__: Dict = tokenizer(lowercase , return_tensors='tf').input_ids
a__: Optional[int] = model.generate(lowercase , max_length=10)
a__: int = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase)
predicted_outputs += generated_string
self.assertListEqual(lowercase , lowercase)
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__: List[str] = 'facebook/opt-350m'
a__: List[Any] = GPTaTokenizer.from_pretrained(lowercase)
a__: List[str] = TFOPTForCausalLM.from_pretrained(lowercase)
a__: List[str] = 'left'
# use different length sentences to test batching
a__: Any = [
'Hello, my dog is a little',
'Today, I',
]
a__: Optional[Any] = tokenizer(lowercase , return_tensors='tf' , padding=lowercase)
a__: str = inputs['input_ids']
a__: Any = model.generate(input_ids=lowercase , attention_mask=inputs['attention_mask'])
a__: Dict = tokenizer(sentences[0] , return_tensors='tf').input_ids
a__: int = model.generate(input_ids=lowercase)
a__: Any = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['attention_mask'][-1] , tf.intaa))
a__: Tuple = tokenizer(sentences[1] , return_tensors='tf').input_ids
a__: Union[str, Any] = model.generate(input_ids=lowercase , max_length=model.config.max_length - num_paddings)
a__: Tuple = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase)
a__: Optional[int] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase)
a__: Tuple = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase)
a__: str = [
'Hello, my dog is a little bit of a dork.\nI\'m a little bit',
'Today, I was in the middle of a conversation with a friend about the',
]
self.assertListEqual(lowercase , lowercase)
self.assertListEqual(lowercase , [non_padded_sentence, padded_sentence])
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
a__: List[str] = 'facebook/opt-350m'
a__: List[Any] = [
'Today is a beautiful day and I want to',
'In the city of San Francisco, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
a__: Dict = []
a__: List[str] = GPTaTokenizer.from_pretrained(lowercase)
a__: str = TFOPTForCausalLM.from_pretrained(lowercase)
for prompt in self.prompts:
a__: Optional[Any] = tokenizer(lowercase , return_tensors='tf').input_ids
a__: Optional[int] = model.generate(lowercase , max_length=10)
a__: Optional[int] = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase)
predicted_outputs += generated_string
self.assertListEqual(lowercase , lowercase)
| 718
|
"""simple docstring"""
from __future__ import annotations
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) ->tuple:
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif electron_conc < 0:
raise ValueError('Electron concentration cannot be negative in a semiconductor' )
elif hole_conc < 0:
raise ValueError('Hole concentration cannot be negative in a semiconductor' )
elif intrinsic_conc < 0:
raise ValueError(
'Intrinsic concentration cannot be negative in a semiconductor' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 217
| 0
|
'''simple docstring'''
def lowercase_ ( _lowercase = 50_000_000 ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = set()
lowerCamelCase_ : Optional[int] = int((limit - 24) ** (1 / 2) )
lowerCamelCase_ : Any = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , lowerCAmelCase_ ) ) )
for primea in primes:
lowerCamelCase_ : Optional[int] = primea * primea
for primea in primes:
lowerCamelCase_ : Dict = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
lowerCamelCase_ : str = primea * primea * primea * primea
lowerCamelCase_ : Tuple = square + cube + tetr
if total >= limit:
break
ret.add(lowerCAmelCase_ )
return len(lowerCAmelCase_ )
if __name__ == "__main__":
print(f'{solution() = }')
| 422
|
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
_A : Union[str, Any] = TransfoXLTokenizer
_A : Union[str, Any] = False
_A : Tuple = False
def lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
super().setUp()
__lowercase : List[str] = [
"""<unk>""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""unwanted""",
"""wa""",
"""un""",
"""running""",
""",""",
"""low""",
"""l""",
]
__lowercase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def lowerCAmelCase ( self : Union[str, Any] , **__a : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Any = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : Any , __a : int ) -> Tuple:
"""simple docstring"""
__lowercase : Tuple = """<unk> UNwanted , running"""
__lowercase : Dict = """<unk> unwanted, running"""
return input_text, output_text
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase : Optional[int] = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=__a )
__lowercase : Any = tokenizer.tokenize("""<unk> UNwanted , running""" )
self.assertListEqual(__a , ["""<unk>""", """unwanted""", """,""", """running"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [0, 4, 8, 7] )
def lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
__lowercase : Tuple = TransfoXLTokenizer(lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
def lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase : List[Any] = TransfoXLTokenizer(lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
__lowercase : Tuple = TransfoXLTokenizer(lower_case=__a )
__lowercase : List[str] = """Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?"""
__lowercase : Tuple = [
"""Hello""",
"""(""",
"""bracket""",
""")""",
"""and""",
"""side""",
"""@-@""",
"""scrolled""",
"""[""",
"""and""",
"""]""",
"""Henry""",
"""'s""",
"""$""",
"""5""",
"""@,@""",
"""000""",
"""with""",
"""3""",
"""@.@""",
"""34""",
"""m""",
""".""",
"""What""",
"""'s""",
"""up""",
"""!""",
"""?""",
]
self.assertListEqual(tokenizer.tokenize(__a ) , __a )
self.assertEqual(tokenizer.convert_tokens_to_string(__a ) , __a )
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : str = self.get_tokenizer()
__lowercase : Union[str, Any] = len(__a )
tokenizer.add_tokens(["""new1""", """new2"""] )
tokenizer.move_added_token("""new1""" , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(__a ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode("""new1""" ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , """new1""" )
| 149
| 0
|
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
if index == r:
for j in range(_lowerCAmelCase ):
print(data[j] , end=" " )
print(" " )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
UpperCAmelCase = arr[i]
combination_util(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , index + 1 , _lowerCAmelCase , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
UpperCAmelCase = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , 0 , _lowerCAmelCase , 0 )
if __name__ == "__main__":
# Driver code to check the function above
__lowerCAmelCase =[10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 708
|
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
__lowerCAmelCase =pytest.mark.integration
@pytest.mark.parametrize("path" , ["paws", "csv"] )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
inspect_dataset(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase = path + ".py"
assert script_name in os.listdir(_lowerCAmelCase )
assert "__pycache__" not in os.listdir(_lowerCAmelCase )
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.parametrize("path" , ["accuracy"] )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
inspect_metric(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase = path + ".py"
assert script_name in os.listdir(_lowerCAmelCase )
assert "__pycache__" not in os.listdir(_lowerCAmelCase )
@pytest.mark.parametrize(
"path, config_name, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
UpperCAmelCase = get_dataset_config_info(_lowerCAmelCase , config_name=_lowerCAmelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
with pytest.raises(_lowerCAmelCase ):
get_dataset_config_info(_lowerCAmelCase , config_name=_lowerCAmelCase )
@pytest.mark.parametrize(
"path, expected" , [
("squad", "plain_text"),
("acronym_identification", "default"),
("lhoestq/squad", "plain_text"),
("lhoestq/test", "default"),
("lhoestq/demo1", "lhoestq--demo1"),
("dalle-mini/wit", "dalle-mini--wit"),
] , )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
UpperCAmelCase = get_dataset_config_names(_lowerCAmelCase )
assert expected in config_names
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config" , [
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["dalle-mini--wit"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
] , )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
UpperCAmelCase = get_dataset_infos(_lowerCAmelCase )
assert list(infos.keys() ) == expected_configs
UpperCAmelCase = expected_configs[0]
assert expected_config in infos
UpperCAmelCase = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
UpperCAmelCase = get_dataset_infos(_lowerCAmelCase )
assert expected_config in infos
UpperCAmelCase = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
with pytest.raises(_lowerCAmelCase ):
get_dataset_split_names(_lowerCAmelCase , config_name=_lowerCAmelCase )
| 405
| 0
|
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : List[str] = XCLIPTextConfig()
# derive patch size from model name
SCREAMING_SNAKE_CASE : Dict = model_name.find("patch")
SCREAMING_SNAKE_CASE : List[str] = int(model_name[start_idx + len("patch") : start_idx + len("patch") + 2])
SCREAMING_SNAKE_CASE : int = XCLIPVisionConfig(patch_size=__SCREAMING_SNAKE_CASE , num_frames=__SCREAMING_SNAKE_CASE)
if "large" in model_name:
SCREAMING_SNAKE_CASE : Union[str, Any] = 768
SCREAMING_SNAKE_CASE : int = 3072
SCREAMING_SNAKE_CASE : Tuple = 12
SCREAMING_SNAKE_CASE : int = 1024
SCREAMING_SNAKE_CASE : List[str] = 4096
SCREAMING_SNAKE_CASE : List[str] = 16
SCREAMING_SNAKE_CASE : str = 24
SCREAMING_SNAKE_CASE : Tuple = 768
SCREAMING_SNAKE_CASE : List[Any] = 3072
if model_name == "xclip-large-patch14-16-frames":
SCREAMING_SNAKE_CASE : Any = 336
SCREAMING_SNAKE_CASE : Union[str, Any] = XCLIPConfig.from_text_vision_configs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
if "large" in model_name:
SCREAMING_SNAKE_CASE : Optional[int] = 768
return config
def lowerCamelCase__ ( _a):
# text encoder
if name == "token_embedding.weight":
SCREAMING_SNAKE_CASE : Any = name.replace("token_embedding.weight" , "text_model.embeddings.token_embedding.weight")
if name == "positional_embedding":
SCREAMING_SNAKE_CASE : List[Any] = name.replace("positional_embedding" , "text_model.embeddings.position_embedding.weight")
if "ln_1" in name:
SCREAMING_SNAKE_CASE : Tuple = name.replace("ln_1" , "layer_norm1")
if "ln_2" in name:
SCREAMING_SNAKE_CASE : List[str] = name.replace("ln_2" , "layer_norm2")
if "c_fc" in name:
SCREAMING_SNAKE_CASE : Any = name.replace("c_fc" , "fc1")
if "c_proj" in name:
SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("c_proj" , "fc2")
if name.startswith("transformer.resblocks"):
SCREAMING_SNAKE_CASE : Tuple = name.replace("transformer.resblocks" , "text_model.encoder.layers")
if "attn.out_proj" in name and "message" not in name:
SCREAMING_SNAKE_CASE : Optional[int] = name.replace("attn.out_proj" , "self_attn.out_proj")
if "ln_final" in name:
SCREAMING_SNAKE_CASE : List[Any] = name.replace("ln_final" , "text_model.final_layer_norm")
# visual encoder
if name == "visual.class_embedding":
SCREAMING_SNAKE_CASE : Dict = name.replace("visual.class_embedding" , "vision_model.embeddings.class_embedding")
if name == "visual.positional_embedding":
SCREAMING_SNAKE_CASE : Dict = name.replace("visual.positional_embedding" , "vision_model.embeddings.position_embedding.weight")
if name.startswith("visual.transformer.resblocks"):
SCREAMING_SNAKE_CASE : str = name.replace("visual.transformer.resblocks" , "vision_model.encoder.layers")
if "visual.conv1" in name:
SCREAMING_SNAKE_CASE : List[Any] = name.replace("visual.conv1" , "vision_model.embeddings.patch_embedding")
if "visual.ln_pre" in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("visual.ln_pre" , "vision_model.pre_layernorm")
if "visual.ln_post" in name:
SCREAMING_SNAKE_CASE : Optional[int] = name.replace("visual.ln_post" , "vision_model.post_layernorm")
if "visual.proj" in name:
SCREAMING_SNAKE_CASE : Tuple = name.replace("visual.proj" , "visual_projection.weight")
if "text_projection" in name:
SCREAMING_SNAKE_CASE : int = name.replace("text_projection" , "text_projection.weight")
# things on top
if "prompts_visual_proj" in name:
SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("prompts_visual_proj" , "prompts_visual_projection")
if "prompts_visual_ln" in name:
SCREAMING_SNAKE_CASE : Any = name.replace("prompts_visual_ln" , "prompts_visual_layernorm")
# mit
if name == "mit.positional_embedding":
SCREAMING_SNAKE_CASE : str = name.replace("positional" , "position")
if name.startswith("mit.resblocks"):
SCREAMING_SNAKE_CASE : Optional[int] = name.replace("mit.resblocks" , "mit.encoder.layers")
# prompts generator
if name.startswith("prompts_generator.norm"):
SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("prompts_generator.norm" , "prompts_generator.layernorm")
return name
def lowerCamelCase__ ( _a , _a):
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE : List[Any] = orig_state_dict.pop(__SCREAMING_SNAKE_CASE)
if "attn.in_proj" in key:
SCREAMING_SNAKE_CASE : Any = key.split(".")
if key.startswith("visual"):
SCREAMING_SNAKE_CASE : List[Any] = key_split[3]
SCREAMING_SNAKE_CASE : Union[str, Any] = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
SCREAMING_SNAKE_CASE : List[str] = val[
:dim, :
]
SCREAMING_SNAKE_CASE : int = val[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE : Optional[int] = val[
-dim:, :
]
else:
SCREAMING_SNAKE_CASE : Any = val[
:dim
]
SCREAMING_SNAKE_CASE : Optional[int] = val[
dim : dim * 2
]
SCREAMING_SNAKE_CASE : Any = val[
-dim:
]
else:
if "weight" in key:
SCREAMING_SNAKE_CASE : Any = val[
:dim, :
]
SCREAMING_SNAKE_CASE : Dict = val[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE : int = val[
-dim:, :
]
else:
SCREAMING_SNAKE_CASE : int = val[:dim]
SCREAMING_SNAKE_CASE : Optional[int] = val[
dim : dim * 2
]
SCREAMING_SNAKE_CASE : str = val[-dim:]
elif key.startswith("mit"):
SCREAMING_SNAKE_CASE : List[str] = key_split[2]
SCREAMING_SNAKE_CASE : str = config.vision_config.mit_hidden_size
if "weight" in key:
SCREAMING_SNAKE_CASE : List[Any] = val[:dim, :]
SCREAMING_SNAKE_CASE : List[str] = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE : Any = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE : Optional[int] = val[:dim]
SCREAMING_SNAKE_CASE : Tuple = val[dim : dim * 2]
SCREAMING_SNAKE_CASE : int = val[-dim:]
else:
SCREAMING_SNAKE_CASE : Optional[int] = key_split[2]
SCREAMING_SNAKE_CASE : int = config.text_config.hidden_size
if "weight" in key:
SCREAMING_SNAKE_CASE : int = val[:dim, :]
SCREAMING_SNAKE_CASE : Optional[int] = val[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE : Optional[Any] = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE : Optional[int] = val[:dim]
SCREAMING_SNAKE_CASE : List[Any] = val[
dim : dim * 2
]
SCREAMING_SNAKE_CASE : Union[str, Any] = val[-dim:]
else:
SCREAMING_SNAKE_CASE : int = rename_key(__SCREAMING_SNAKE_CASE)
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
SCREAMING_SNAKE_CASE : List[str] = val.T
SCREAMING_SNAKE_CASE : Tuple = val
return orig_state_dict
def lowerCamelCase__ ( _a):
if num_frames == 8:
SCREAMING_SNAKE_CASE : str = '''eating_spaghetti_8_frames.npy'''
elif num_frames == 16:
SCREAMING_SNAKE_CASE : List[str] = '''eating_spaghetti.npy'''
elif num_frames == 32:
SCREAMING_SNAKE_CASE : Optional[Any] = '''eating_spaghetti_32_frames.npy'''
SCREAMING_SNAKE_CASE : Dict = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename=__SCREAMING_SNAKE_CASE , repo_type="dataset" , )
SCREAMING_SNAKE_CASE : str = np.load(__SCREAMING_SNAKE_CASE)
return list(__SCREAMING_SNAKE_CASE)
def lowerCamelCase__ ( _a , _a=None , _a=False):
SCREAMING_SNAKE_CASE : Any = {
# fully supervised kinetics-400 checkpoints
'''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''',
'''xclip-base-patch32-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'''
),
'''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''',
'''xclip-base-patch16-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'''
),
'''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb''',
'''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f''',
# fully supervised kinetics-600 checkpoints
'''xclip-base-patch16-kinetics-600''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'''
),
'''xclip-base-patch16-kinetics-600-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'''
),
'''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be''',
# few shot
'''xclip-base-patch16-hmdb-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'''
),
'''xclip-base-patch16-hmdb-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'''
),
'''xclip-base-patch16-hmdb-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'''
),
'''xclip-base-patch16-hmdb-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'''
),
'''xclip-base-patch16-ucf-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'''
),
'''xclip-base-patch16-ucf-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'''
),
'''xclip-base-patch16-ucf-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'''
),
'''xclip-base-patch16-ucf-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'''
),
# zero shot
'''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''',
}
SCREAMING_SNAKE_CASE : int = model_to_url[model_name]
SCREAMING_SNAKE_CASE : List[Any] = 8
if "16-frames" in model_name:
SCREAMING_SNAKE_CASE : Dict = 16
elif "shot" in model_name:
SCREAMING_SNAKE_CASE : List[Any] = 32
SCREAMING_SNAKE_CASE : int = get_xclip_config(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
SCREAMING_SNAKE_CASE : List[str] = XCLIPModel(__SCREAMING_SNAKE_CASE)
model.eval()
if "drive" in checkpoint_url:
SCREAMING_SNAKE_CASE : Dict = '''pytorch_model.bin'''
gdown.cached_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , quiet=__SCREAMING_SNAKE_CASE)
SCREAMING_SNAKE_CASE : int = torch.load(__SCREAMING_SNAKE_CASE , map_location="cpu")['''model''']
else:
SCREAMING_SNAKE_CASE : List[str] = torch.hub.load_state_dict_from_url(__SCREAMING_SNAKE_CASE)['''model''']
SCREAMING_SNAKE_CASE : Optional[int] = convert_state_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
SCREAMING_SNAKE_CASE : List[Any] = XCLIPModel(__SCREAMING_SNAKE_CASE)
SCREAMING_SNAKE_CASE : Optional[Any] = model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE)
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
SCREAMING_SNAKE_CASE : Dict = 336 if model_name == '''xclip-large-patch14-16-frames''' else 224
SCREAMING_SNAKE_CASE : Dict = VideoMAEImageProcessor(size=__SCREAMING_SNAKE_CASE)
SCREAMING_SNAKE_CASE : int = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32")
SCREAMING_SNAKE_CASE : List[str] = CLIPTokenizerFast.from_pretrained("openai/clip-vit-base-patch32")
SCREAMING_SNAKE_CASE : int = XCLIPProcessor(image_processor=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE)
SCREAMING_SNAKE_CASE : str = prepare_video(__SCREAMING_SNAKE_CASE)
SCREAMING_SNAKE_CASE : int = processor(
text=["playing sports", "eating spaghetti", "go shopping"] , videos=__SCREAMING_SNAKE_CASE , return_tensors="pt" , padding=__SCREAMING_SNAKE_CASE)
print("Shape of pixel values:" , inputs.pixel_values.shape)
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = model(**__SCREAMING_SNAKE_CASE)
# Verify outputs
SCREAMING_SNAKE_CASE : Tuple = outputs.logits_per_video
SCREAMING_SNAKE_CASE : int = logits_per_video.softmax(dim=1)
print("Probs:" , __SCREAMING_SNAKE_CASE)
# kinetics-400
if model_name == "xclip-base-patch32":
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[0.0019, 0.9951, 0.0030]])
elif model_name == "xclip-base-patch32-16-frames":
SCREAMING_SNAKE_CASE : Any = torch.tensor([[7.0_999E-04, 9.9_883E-01, 4.5_580E-04]])
elif model_name == "xclip-base-patch16":
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[0.0083, 0.9681, 0.0236]])
elif model_name == "xclip-base-patch16-16-frames":
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[7.6_937E-04, 9.9_728E-01, 1.9_473E-03]])
elif model_name == "xclip-large-patch14":
SCREAMING_SNAKE_CASE : str = torch.tensor([[0.0062, 0.9864, 0.0075]])
elif model_name == "xclip-large-patch14-16-frames":
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[3.3_877E-04, 9.9_937E-01, 2.8_888E-04]])
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[0.0555, 0.8914, 0.0531]])
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
SCREAMING_SNAKE_CASE : str = torch.tensor([[3.8_554E-04, 9.9_929E-01, 3.2_754E-04]])
elif model_name == "xclip-large-patch14-kinetics-600":
SCREAMING_SNAKE_CASE : Any = torch.tensor([[0.0036, 0.9920, 0.0045]])
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[7.1_890E-06, 9.9_994E-01, 5.6_559E-05]])
elif model_name == "xclip-base-patch16-hmdb-4-shot":
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[1.0_320E-05, 9.9_993E-01, 6.2_435E-05]])
elif model_name == "xclip-base-patch16-hmdb-8-shot":
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[4.1_377E-06, 9.9_990E-01, 9.8_386E-05]])
elif model_name == "xclip-base-patch16-hmdb-16-shot":
SCREAMING_SNAKE_CASE : Dict = torch.tensor([[4.1_347E-05, 9.9_962E-01, 3.3_411E-04]])
elif model_name == "xclip-base-patch16-ucf-2-shot":
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[8.5_857E-05, 9.9_928E-01, 6.3_291E-04]])
elif model_name == "xclip-base-patch16-ucf-4-shot":
SCREAMING_SNAKE_CASE : Dict = torch.tensor([[8.5_857E-05, 9.9_928E-01, 6.3_291E-04]])
elif model_name == "xclip-base-patch16-ucf-8-shot":
SCREAMING_SNAKE_CASE : Any = torch.tensor([[0.0027, 0.9904, 0.0070]])
elif model_name == "xclip-base-patch16-ucf-16-shot":
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[9.8_219E-04, 9.9_593E-01, 3.0_863E-03]])
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[3.5_082E-04, 9.9_785E-01, 1.7_966E-03]])
else:
raise ValueError(f"Model name {model_name} not supported")
assert torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3)
print("Looks ok!")
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}")
model.save_pretrained(__SCREAMING_SNAKE_CASE)
if push_to_hub:
print("Pushing model, processor and slow tokenizer files to the hub...")
model.push_to_hub(__SCREAMING_SNAKE_CASE , organization="nielsr")
processor.push_to_hub(__SCREAMING_SNAKE_CASE , organization="nielsr")
slow_tokenizer.push_to_hub(__SCREAMING_SNAKE_CASE , organization="nielsr")
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='xclip-base-patch32',
type=str,
help='Name of the model.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
a_ = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 25
|
'''simple docstring'''
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def A_ ( __SCREAMING_SNAKE_CASE : ndarray ) -> float:
return np.dot(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE_ :
def __init__( self , *,
lowercase = np.inf , lowercase = "linear" , lowercase = 0.0 , ) -> None:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : int = regularization
__SCREAMING_SNAKE_CASE : List[str] = gamma
if kernel == "linear":
__SCREAMING_SNAKE_CASE : str = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('''rbf kernel requires gamma''' )
if not isinstance(self.gamma , (float, int) ):
raise ValueError('''gamma must be float or int''' )
if not self.gamma > 0:
raise ValueError('''gamma must be > 0''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
__SCREAMING_SNAKE_CASE : str = f"""Unknown kernel: {kernel}"""
raise ValueError(lowercase )
def _snake_case ( self , lowercase , lowercase ) -> float:
'''simple docstring'''
return np.dot(lowercase , lowercase )
def _snake_case ( self , lowercase , lowercase ) -> float:
'''simple docstring'''
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def _snake_case ( self , lowercase , lowercase ) -> None:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Optional[int] = observations
__SCREAMING_SNAKE_CASE : str = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((__SCREAMING_SNAKE_CASE) , ) : int = np.shape(lowercase )
def to_minimize(lowercase ) -> float:
__SCREAMING_SNAKE_CASE : Dict = 0
((__SCREAMING_SNAKE_CASE) , ) : Optional[Any] = np.shape(lowercase )
for i in range(lowercase ):
for j in range(lowercase ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(lowercase )
__SCREAMING_SNAKE_CASE : str = LinearConstraint(lowercase , 0 , 0 )
__SCREAMING_SNAKE_CASE : Dict = Bounds(0 , self.regularization )
__SCREAMING_SNAKE_CASE : Optional[int] = minimize(
lowercase , np.ones(lowercase ) , bounds=lowercase , constraints=[ly_contraint] ).x
__SCREAMING_SNAKE_CASE : str = l_star
# calculating mean offset of separation plane to points
__SCREAMING_SNAKE_CASE : Any = 0
for i in range(lowercase ):
for j in range(lowercase ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
__SCREAMING_SNAKE_CASE : Tuple = s / n
def _snake_case ( self , lowercase ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Any = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , lowercase )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 158
| 0
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A__ = ['''sentencepiece''']
def __init__( self : Any , *__A : Optional[Any] , **__A : str ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A__ = ['''sentencepiece''']
def __init__( self : int , *__A : Union[str, Any] , **__A : List[Any] ) -> str:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A__ = ['''sentencepiece''']
def __init__( self : Optional[Any] , *__A : int , **__A : Union[str, Any] ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A__ = ['''sentencepiece''']
def __init__( self : List[Any] , *__A : Optional[int] , **__A : Optional[int] ) -> str:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A__ = ['''sentencepiece''']
def __init__( self : Dict , *__A : Optional[int] , **__A : List[str] ) -> Any:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A__ = ['''sentencepiece''']
def __init__( self : Any , *__A : str , **__A : List[str] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A__ = ['''sentencepiece''']
def __init__( self : Any , *__A : Any , **__A : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A__ = ['''sentencepiece''']
def __init__( self : str , *__A : Any , **__A : str ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A__ = ['''sentencepiece''']
def __init__( self : int , *__A : List[Any] , **__A : Dict ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A__ = ['''sentencepiece''']
def __init__( self : Dict , *__A : Union[str, Any] , **__A : Any ) -> int:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A__ = ['''sentencepiece''']
def __init__( self : Dict , *__A : Tuple , **__A : Dict ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A__ = ['''sentencepiece''']
def __init__( self : List[str] , *__A : str , **__A : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A__ = ['''sentencepiece''']
def __init__( self : Any , *__A : Optional[int] , **__A : int ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A__ = ['''sentencepiece''']
def __init__( self : Dict , *__A : List[str] , **__A : List[str] ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A__ = ['''sentencepiece''']
def __init__( self : Any , *__A : Optional[int] , **__A : str ) -> str:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A__ = ['''sentencepiece''']
def __init__( self : Union[str, Any] , *__A : Tuple , **__A : List[str] ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A__ = ['''sentencepiece''']
def __init__( self : Dict , *__A : Tuple , **__A : Optional[Any] ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A__ = ['''sentencepiece''']
def __init__( self : Optional[int] , *__A : List[Any] , **__A : Tuple ) -> Any:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A__ = ['''sentencepiece''']
def __init__( self : Optional[int] , *__A : Optional[int] , **__A : str ) -> int:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A__ = ['''sentencepiece''']
def __init__( self : str , *__A : Dict , **__A : Tuple ) -> Any:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A__ = ['''sentencepiece''']
def __init__( self : str , *__A : int , **__A : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A__ = ['''sentencepiece''']
def __init__( self : Optional[int] , *__A : List[str] , **__A : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A__ = ['''sentencepiece''']
def __init__( self : List[Any] , *__A : Dict , **__A : Union[str, Any] ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A__ = ['''sentencepiece''']
def __init__( self : Dict , *__A : Dict , **__A : int ) -> str:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A__ = ['''sentencepiece''']
def __init__( self : Dict , *__A : Any , **__A : Tuple ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A__ = ['''sentencepiece''']
def __init__( self : str , *__A : Optional[int] , **__A : Dict ) -> Any:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A__ = ['''sentencepiece''']
def __init__( self : Dict , *__A : Dict , **__A : Any ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A__ = ['''sentencepiece''']
def __init__( self : Any , *__A : Optional[Any] , **__A : int ) -> int:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A__ = ['''sentencepiece''']
def __init__( self : List[Any] , *__A : Any , **__A : List[str] ) -> int:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A__ = ['''sentencepiece''']
def __init__( self : List[Any] , *__A : Optional[Any] , **__A : Tuple ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class lowerCamelCase__ ( metaclass=SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A__ = ['''sentencepiece''']
def __init__( self : str , *__A : Union[str, Any] , **__A : Dict ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
| 721
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_UpperCamelCase = {
"""configuration_vivit""": ["""VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VivitConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ["""VivitImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"""VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VivitModel""",
"""VivitPreTrainedModel""",
"""VivitForVideoClassification""",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 211
| 0
|
from ..utils import DummyObject, requires_backends
class _lowerCAmelCase ( metaclass=lowercase__ ):
_lowercase =['''torch''', '''transformers''', '''onnx''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> List[Any]:
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[Any]:
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> int:
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _lowerCAmelCase ( metaclass=lowercase__ ):
_lowercase =['''torch''', '''transformers''', '''onnx''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> str:
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[Any]:
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _lowerCAmelCase ( metaclass=lowercase__ ):
_lowercase =['''torch''', '''transformers''', '''onnx''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> List[str]:
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[str]:
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[str]:
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _lowerCAmelCase ( metaclass=lowercase__ ):
_lowercase =['''torch''', '''transformers''', '''onnx''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Any:
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> List[Any]:
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> int:
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _lowerCAmelCase ( metaclass=lowercase__ ):
_lowercase =['''torch''', '''transformers''', '''onnx''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[Any]:
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> str:
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> int:
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _lowerCAmelCase ( metaclass=lowercase__ ):
_lowercase =['''torch''', '''transformers''', '''onnx''']
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> str:
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Any:
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __a ( cls , *_UpperCamelCase , **_UpperCamelCase ) -> Union[str, Any]:
requires_backends(cls , ["torch", "transformers", "onnx"] )
| 290
|
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : int = 600851475143 ) -> int:
try:
lowercase : Any =int(__magic_name__ )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
lowercase : Optional[Any] =2
lowercase : Dict =0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
lowercase : Union[str, Any] =i
while n % i == 0:
lowercase : Optional[int] =n // i
i += 1
return int(__magic_name__ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 92
| 0
|
'''simple docstring'''
from datetime import datetime
import requests
def lowerCamelCase__ ( __lowerCamelCase : str ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] ='https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
_UpperCAmelCase : Optional[int] =requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(__lowerCamelCase ).content
if __name__ == "__main__":
lowercase =input('Enter Video/IGTV url: ').strip()
lowercase =F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(F"""Done. Video saved to disk as {file_name}.""")
| 331
|
'''simple docstring'''
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
lowercase =logging.get_logger(__name__)
lowercase ={
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'encoder.layer_norm_for_extract': 'layer_norm_for_extract',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'label_embs_concat': 'label_embeddings_concat',
'mask_emb': 'masked_spec_embed',
'spk_proj': 'speaker_proj',
}
lowercase =[
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'label_embeddings_concat',
'speaker_proj',
'layer_norm_for_extract',
]
def lowerCamelCase__ ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] ):
'''simple docstring'''
for attribute in key.split('.' ):
_UpperCAmelCase : Optional[Any] =getattr(__lowerCamelCase , __lowerCamelCase )
if weight_type is not None:
_UpperCAmelCase : List[Any] =getattr(__lowerCamelCase , __lowerCamelCase ).shape
else:
_UpperCAmelCase : Dict =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
_UpperCAmelCase : Tuple =value
elif weight_type == "weight_g":
_UpperCAmelCase : List[str] =value
elif weight_type == "weight_v":
_UpperCAmelCase : Any =value
elif weight_type == "bias":
_UpperCAmelCase : Dict =value
else:
_UpperCAmelCase : Optional[Any] =value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] ):
'''simple docstring'''
_UpperCAmelCase : Tuple =[]
_UpperCAmelCase : List[Any] =fairseq_model.state_dict()
_UpperCAmelCase : Union[str, Any] =hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
_UpperCAmelCase : Optional[int] =False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == 'group' , )
_UpperCAmelCase : Dict =True
else:
for key, mapped_key in MAPPING.items():
_UpperCAmelCase : str ='unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
_UpperCAmelCase : Optional[Any] =True
if "*" in mapped_key:
_UpperCAmelCase : List[str] =name.split(__lowerCamelCase )[0].split('.' )[-2]
_UpperCAmelCase : Any =mapped_key.replace('*' , __lowerCamelCase )
if "weight_g" in name:
_UpperCAmelCase : int ='weight_g'
elif "weight_v" in name:
_UpperCAmelCase : List[str] ='weight_v'
elif "bias" in name:
_UpperCAmelCase : Optional[int] ='bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_UpperCAmelCase : str ='weight'
else:
_UpperCAmelCase : str =None
set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
continue
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(f"Unused weights: {unused_weights}" )
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] ):
'''simple docstring'''
_UpperCAmelCase : Dict =full_name.split('conv_layers.' )[-1]
_UpperCAmelCase : List[str] =name.split('.' )
_UpperCAmelCase : List[str] =int(items[0] )
_UpperCAmelCase : Optional[int] =int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
_UpperCAmelCase : List[str] =value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
_UpperCAmelCase : Union[str, Any] =value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found." )
_UpperCAmelCase : Tuple =value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." )
_UpperCAmelCase : Tuple =value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(__lowerCamelCase )
@torch.no_grad()
def lowerCamelCase__ ( __lowerCamelCase : List[str] , __lowerCamelCase : str , __lowerCamelCase : Any=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Any=True ):
'''simple docstring'''
if config_path is not None:
_UpperCAmelCase : Dict =UniSpeechSatConfig.from_pretrained(__lowerCamelCase )
else:
_UpperCAmelCase : Union[str, Any] =UniSpeechSatConfig()
_UpperCAmelCase : List[Any] =''
if is_finetuned:
_UpperCAmelCase : Union[str, Any] =UniSpeechSatForCTC(__lowerCamelCase )
else:
_UpperCAmelCase : Tuple =UniSpeechSatForPreTraining(__lowerCamelCase )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
_UpperCAmelCase : Tuple =model[0].eval()
recursively_load_weights(__lowerCamelCase , __lowerCamelCase )
hf_wavavec.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
lowercase =argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
lowercase =parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 331
| 1
|
"""simple docstring"""
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
__SCREAMING_SNAKE_CASE = threading.Lock()
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
__SCREAMING_SNAKE_CASE = logging.WARNING
__SCREAMING_SNAKE_CASE = True
def A_ ( ):
UpperCamelCase_ : Any =os.getenv('TRANSFORMERS_VERBOSITY' , __lowercase )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'''Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '''
F'''has to be one of: { ', '.join(log_levels.keys() ) }''' )
return _default_log_level
def A_ ( ):
return __name__.split('.' )[0]
def A_ ( ):
return logging.getLogger(_get_library_name() )
def A_ ( ):
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
UpperCamelCase_ : Optional[Any] =logging.StreamHandler() # Set sys.stderr as stream.
UpperCamelCase_ : Tuple =sys.stderr.flush
# Apply our default configuration to the library root logger.
UpperCamelCase_ : Optional[Any] =_get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
UpperCamelCase_ : str =False
def A_ ( ):
global _default_handler
with _lock:
if not _default_handler:
return
UpperCamelCase_ : List[Any] =_get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
UpperCamelCase_ : str =None
def A_ ( ):
return log_levels
def A_ ( __lowercase = None ):
if name is None:
UpperCamelCase_ : Tuple =_get_library_name()
_configure_library_root_logger()
return logging.getLogger(__lowercase )
def A_ ( ):
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def A_ ( __lowercase ):
_configure_library_root_logger()
_get_library_root_logger().setLevel(__lowercase )
def A_ ( ):
return set_verbosity(__lowercase )
def A_ ( ):
return set_verbosity(__lowercase )
def A_ ( ):
return set_verbosity(__lowercase )
def A_ ( ):
return set_verbosity(__lowercase )
def A_ ( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def A_ ( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def A_ ( __lowercase ):
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(__lowercase )
def A_ ( __lowercase ):
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(__lowercase )
def A_ ( ):
_configure_library_root_logger()
UpperCamelCase_ : Union[str, Any] =False
def A_ ( ):
_configure_library_root_logger()
UpperCamelCase_ : Any =True
def A_ ( ):
UpperCamelCase_ : Union[str, Any] =_get_library_root_logger().handlers
for handler in handlers:
UpperCamelCase_ : str =logging.Formatter('[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s' )
handler.setFormatter(__lowercase )
def A_ ( ):
UpperCamelCase_ : Tuple =_get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(__lowercase )
def A_ ( self , *__lowercase , **__lowercase ):
UpperCamelCase_ : Dict =os.getenv('TRANSFORMERS_NO_ADVISORY_WARNINGS' , __lowercase )
if no_advisory_warnings:
return
self.warning(*__lowercase , **__lowercase )
__SCREAMING_SNAKE_CASE = warning_advice
@functools.lru_cache(__lowercase )
def A_ ( self , *__lowercase , **__lowercase ):
self.warning(*__lowercase , **__lowercase )
__SCREAMING_SNAKE_CASE = warning_once
class a__ :
def __init__( self :str , *_lowerCamelCase :Any , **_lowerCamelCase :int ): # pylint: disable=unused-argument
'''simple docstring'''
UpperCamelCase_ : Any =args[0] if args else None
def __iter__( self :Any ):
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self :str , _lowerCamelCase :Any ):
'''simple docstring'''
def empty_fn(*_lowerCamelCase :Optional[Any] , **_lowerCamelCase :Optional[int] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self :Optional[Any] ):
'''simple docstring'''
return self
def __exit__( self :Tuple , _lowerCamelCase :str , _lowerCamelCase :Optional[int] , _lowerCamelCase :int ):
'''simple docstring'''
return
class a__ :
def __call__( self :str , *_lowerCamelCase :Tuple , **_lowerCamelCase :List[Any] ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm(*_lowerCamelCase , **_lowerCamelCase )
else:
return EmptyTqdm(*_lowerCamelCase , **_lowerCamelCase )
def lowerCamelCase_ ( self :str , *_lowerCamelCase :str , **_lowerCamelCase :Optional[int] ):
'''simple docstring'''
UpperCamelCase_ : Dict =None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_lowerCamelCase , **_lowerCamelCase )
def lowerCamelCase_ ( self :Optional[Any] ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__SCREAMING_SNAKE_CASE = _tqdm_cls()
def A_ ( ):
global _tqdm_active
return bool(_tqdm_active )
def A_ ( ):
global _tqdm_active
UpperCamelCase_ : str =True
hf_hub_utils.enable_progress_bars()
def A_ ( ):
global _tqdm_active
UpperCamelCase_ : int =False
hf_hub_utils.disable_progress_bars()
| 357
|
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def A_ ( __lowercase ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F)
or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) #
or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) #
or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) #
or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) #
or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F)
or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) #
): #
return True
return False
def A_ ( __lowercase ):
# word like '180' or '身高' or '神'
for char in word:
UpperCamelCase_ : Union[str, Any] =ord(__lowercase )
if not _is_chinese_char(__lowercase ):
return 0
return 1
def A_ ( __lowercase ):
UpperCamelCase_ : List[str] =set()
for token in tokens:
UpperCamelCase_ : Optional[int] =len(__lowercase ) > 1 and is_chinese(__lowercase )
if chinese_word:
word_set.add(__lowercase )
UpperCamelCase_ : Tuple =list(__lowercase )
return word_list
def A_ ( __lowercase , __lowercase ):
if not chinese_word_set:
return bert_tokens
UpperCamelCase_ : List[str] =max([len(__lowercase ) for w in chinese_word_set] )
UpperCamelCase_ : Optional[Any] =bert_tokens
UpperCamelCase_ , UpperCamelCase_ : Union[str, Any] =0, len(__lowercase )
while start < end:
UpperCamelCase_ : str =True
if is_chinese(bert_word[start] ):
UpperCamelCase_ : Optional[int] =min(end - start , __lowercase )
for i in range(__lowercase , 1 , -1 ):
UpperCamelCase_ : Tuple =''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
UpperCamelCase_ : Tuple ='##' + bert_word[j]
UpperCamelCase_ : int =start + i
UpperCamelCase_ : Dict =False
break
if single_word:
start += 1
return bert_word
def A_ ( __lowercase , __lowercase , __lowercase ):
UpperCamelCase_ : Tuple =[]
for i in range(0 , len(__lowercase ) , 1_00 ):
UpperCamelCase_ : Union[str, Any] =ltp_tokenizer.seg(lines[i : i + 1_00] )[0]
UpperCamelCase_ : int =[get_chinese_word(__lowercase ) for r in res]
ltp_res.extend(__lowercase )
assert len(__lowercase ) == len(__lowercase )
UpperCamelCase_ : Dict =[]
for i in range(0 , len(__lowercase ) , 1_00 ):
UpperCamelCase_ : int =bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=__lowercase , truncation=__lowercase , max_length=5_12 )
bert_res.extend(res['input_ids'] )
assert len(__lowercase ) == len(__lowercase )
UpperCamelCase_ : Dict =[]
for input_ids, chinese_word in zip(__lowercase , __lowercase ):
UpperCamelCase_ : List[str] =[]
for id in input_ids:
UpperCamelCase_ : Union[str, Any] =bert_tokenizer._convert_id_to_token(__lowercase )
input_tokens.append(__lowercase )
UpperCamelCase_ : Optional[int] =add_sub_symbol(__lowercase , __lowercase )
UpperCamelCase_ : Dict =[]
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__lowercase ):
if token[:2] == "##":
UpperCamelCase_ : Optional[int] =token[2:]
# save chinese tokens' pos
if len(__lowercase ) == 1 and _is_chinese_char(ord(__lowercase ) ):
ref_id.append(__lowercase )
ref_ids.append(__lowercase )
assert len(__lowercase ) == len(__lowercase )
return ref_ids
def A_ ( __lowercase ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
UpperCamelCase_ : Tuple =f.readlines()
UpperCamelCase_ : Optional[int] =[line.strip() for line in data if len(__lowercase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
UpperCamelCase_ : Optional[Any] =LTP(args.ltp ) # faster in GPU device
UpperCamelCase_ : Dict =BertTokenizer.from_pretrained(args.bert )
UpperCamelCase_ : int =prepare_ref(__lowercase , __lowercase , __lowercase )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
UpperCamelCase_ : Tuple =[json.dumps(__lowercase ) + '\n' for ref in ref_ids]
f.writelines(__lowercase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp', type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path'
)
parser.add_argument('--bert', type=str, default='./resources/robert', help='resources for Bert tokenizer')
parser.add_argument('--save_path', type=str, default='./resources/ref.txt', help='path to save res')
__SCREAMING_SNAKE_CASE = parser.parse_args()
main(args)
| 357
| 1
|
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase (_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a = ["image_processor", "tokenizer"]
a = "FlavaImageProcessor"
a = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : Optional[int] , _snake_case : Any=None , _snake_case : List[Any]=None , **_snake_case : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _snake_case , )
SCREAMING_SNAKE_CASE__ = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_snake_case , _snake_case )
SCREAMING_SNAKE_CASE__ = self.image_processor
def __call__( self : Dict , _snake_case : Optional[ImageInput] = None , _snake_case : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , _snake_case : bool = True , _snake_case : Union[bool, str, PaddingStrategy] = False , _snake_case : Union[bool, str, TruncationStrategy] = False , _snake_case : Optional[int] = None , _snake_case : int = 0 , _snake_case : Optional[int] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[bool] = None , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = True , _snake_case : Optional[Union[str, TensorType]] = None , **_snake_case : Dict , ) -> List[str]:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
SCREAMING_SNAKE_CASE__ = self.tokenizer(
text=_snake_case , add_special_tokens=_snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , stride=_snake_case , pad_to_multiple_of=_snake_case , return_token_type_ids=_snake_case , return_attention_mask=_snake_case , return_overflowing_tokens=_snake_case , return_special_tokens_mask=_snake_case , return_offsets_mapping=_snake_case , return_length=_snake_case , verbose=_snake_case , return_tensors=_snake_case , **_snake_case , )
if images is not None:
SCREAMING_SNAKE_CASE__ = self.image_processor(
_snake_case , return_image_mask=_snake_case , return_codebook_pixels=_snake_case , return_tensors=_snake_case , **_snake_case , )
if text is not None and images is not None:
encoding.update(_snake_case )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_snake_case ) , tensor_type=_snake_case )
def lowerCAmelCase_ ( self : str , *_snake_case : Union[str, Any] , **_snake_case : Optional[Any] ) -> List[Any]:
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def lowerCAmelCase_ ( self : Tuple , *_snake_case : Dict , **_snake_case : Optional[int] ) -> Union[str, Any]:
return self.tokenizer.decode(*_snake_case , **_snake_case )
@property
def lowerCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCAmelCase_ ( self : List[Any] ) -> Any:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _snake_case , )
return self.image_processor_class
@property
def lowerCAmelCase_ ( self : str ) -> Optional[int]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _snake_case , )
return self.image_processor
| 701
|
"""simple docstring"""
import collections
import os
import re
from pathlib import Path
_A = 'src/transformers'
# Matches is_xxx_available()
_A = re.compile(R'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
_A = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_A = re.compile(R'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
_A = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
_A = re.compile(R'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_A = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
_A = re.compile(R'^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
_A = re.compile(R'^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
_A = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
_A = re.compile(R'^\s*try:')
# Catches a line with else:
_A = re.compile(R'^\s*else:')
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> List[str]:
if _re_test_backend.search(__UpperCAmelCase ) is None:
return None
SCREAMING_SNAKE_CASE__ = [b[0] for b in _re_backend.findall(__UpperCAmelCase )]
backends.sort()
return "_and_".join(__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> Optional[Any]:
with open(__UpperCAmelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
SCREAMING_SNAKE_CASE__ = f.readlines()
SCREAMING_SNAKE_CASE__ = 0
while line_index < len(__UpperCAmelCase ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__UpperCAmelCase ):
return None
# First grab the objects without a specific backend in _import_structure
SCREAMING_SNAKE_CASE__ = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
SCREAMING_SNAKE_CASE__ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = _re_one_line_import_struct.search(__UpperCAmelCase ).groups()[0]
SCREAMING_SNAKE_CASE__ = re.findall(R"\[([^\]]+)\]" , __UpperCAmelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
SCREAMING_SNAKE_CASE__ = _re_import_struct_key_value.search(__UpperCAmelCase )
if single_line_import_search is not None:
SCREAMING_SNAKE_CASE__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(__UpperCAmelCase ) > 0]
objects.extend(__UpperCAmelCase )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
SCREAMING_SNAKE_CASE__ = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
SCREAMING_SNAKE_CASE__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
SCREAMING_SNAKE_CASE__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
SCREAMING_SNAKE_CASE__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
SCREAMING_SNAKE_CASE__ = lines[line_index]
if _re_import_struct_add_one.search(__UpperCAmelCase ) is not None:
objects.append(_re_import_struct_add_one.search(__UpperCAmelCase ).groups()[0] )
elif _re_import_struct_add_many.search(__UpperCAmelCase ) is not None:
SCREAMING_SNAKE_CASE__ = _re_import_struct_add_many.search(__UpperCAmelCase ).groups()[0].split(", " )
SCREAMING_SNAKE_CASE__ = [obj[1:-1] for obj in imports if len(__UpperCAmelCase ) > 0]
objects.extend(__UpperCAmelCase )
elif _re_between_brackets.search(__UpperCAmelCase ) is not None:
SCREAMING_SNAKE_CASE__ = _re_between_brackets.search(__UpperCAmelCase ).groups()[0].split(", " )
SCREAMING_SNAKE_CASE__ = [obj[1:-1] for obj in imports if len(__UpperCAmelCase ) > 0]
objects.extend(__UpperCAmelCase )
elif _re_quote_object.search(__UpperCAmelCase ) is not None:
objects.append(_re_quote_object.search(__UpperCAmelCase ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
SCREAMING_SNAKE_CASE__ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
SCREAMING_SNAKE_CASE__ = []
while (
line_index < len(__UpperCAmelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
SCREAMING_SNAKE_CASE__ = lines[line_index]
SCREAMING_SNAKE_CASE__ = _re_import.search(__UpperCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
SCREAMING_SNAKE_CASE__ = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(__UpperCAmelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
SCREAMING_SNAKE_CASE__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
SCREAMING_SNAKE_CASE__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
SCREAMING_SNAKE_CASE__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
SCREAMING_SNAKE_CASE__ = lines[line_index]
SCREAMING_SNAKE_CASE__ = _re_import.search(__UpperCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
SCREAMING_SNAKE_CASE__ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase ) -> Any:
def find_duplicates(__UpperCAmelCase ):
return [k for k, v in collections.Counter(__UpperCAmelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
SCREAMING_SNAKE_CASE__ = []
for key in import_dict_objects.keys():
SCREAMING_SNAKE_CASE__ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
SCREAMING_SNAKE_CASE__ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
SCREAMING_SNAKE_CASE__ = "base imports" if key == "none" else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = []
for root, _, files in os.walk(__UpperCAmelCase ):
if "__init__.py" in files:
SCREAMING_SNAKE_CASE__ = os.path.join(__UpperCAmelCase , "__init__.py" )
SCREAMING_SNAKE_CASE__ = parse_init(__UpperCAmelCase )
if objects is not None:
SCREAMING_SNAKE_CASE__ = analyze_results(*__UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
SCREAMING_SNAKE_CASE__ = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append("\n".join(__UpperCAmelCase ) )
if len(__UpperCAmelCase ) > 0:
raise ValueError("\n\n".join(__UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = []
for path, directories, files in os.walk(__UpperCAmelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(__UpperCAmelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__UpperCAmelCase ) / folder).glob("*.py" ) ) ) == 0:
continue
SCREAMING_SNAKE_CASE__ = str((Path(__UpperCAmelCase ) / folder).relative_to(__UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ = short_path.replace(os.path.sep , "." )
submodules.append(__UpperCAmelCase )
for fname in files:
if fname == "__init__.py":
continue
SCREAMING_SNAKE_CASE__ = str((Path(__UpperCAmelCase ) / fname).relative_to(__UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(__UpperCAmelCase )
return submodules
_A = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
'models.esm.openfold_utils',
]
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
SCREAMING_SNAKE_CASE__ = direct_transformers_import(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(__UpperCAmelCase , "__init__.py" ) , "r" ) as f:
SCREAMING_SNAKE_CASE__ = f.read()
import_structure_keys.update(set(re.findall(R"import_structure\[\"([^\"]*)\"\]" , __UpperCAmelCase ) ) )
SCREAMING_SNAKE_CASE__ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(__UpperCAmelCase ) > 0:
SCREAMING_SNAKE_CASE__ = "\n".join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registed in the main init of Transformers:\n"
F"""{list_of_modules}\n"""
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 538
| 0
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class lowercase_ ( unittest.TestCase ):
def __init__( self , __A , __A=7 , __A=3 , __A=30 , __A=400 , __A=True , __A=None , __A=True , __A=[0.5, 0.5, 0.5] , __A=[0.5, 0.5, 0.5] , __A=True , __A=1 / 255 , __A=True , ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ : List[str] =size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1_333}
SCREAMING_SNAKE_CASE_ : List[Any] =parent
SCREAMING_SNAKE_CASE_ : Tuple =batch_size
SCREAMING_SNAKE_CASE_ : int =num_channels
SCREAMING_SNAKE_CASE_ : str =min_resolution
SCREAMING_SNAKE_CASE_ : List[str] =max_resolution
SCREAMING_SNAKE_CASE_ : List[str] =do_resize
SCREAMING_SNAKE_CASE_ : List[str] =size
SCREAMING_SNAKE_CASE_ : Optional[Any] =do_normalize
SCREAMING_SNAKE_CASE_ : int =image_mean
SCREAMING_SNAKE_CASE_ : Dict =image_std
SCREAMING_SNAKE_CASE_ : Any =do_rescale
SCREAMING_SNAKE_CASE_ : Optional[int] =rescale_factor
SCREAMING_SNAKE_CASE_ : Any =do_pad
def _snake_case ( self ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _snake_case ( self , __A , __A=False ) -> Any:
if not batched:
SCREAMING_SNAKE_CASE_ : Dict =image_inputs[0]
if isinstance(SCREAMING_SNAKE_CASE__ , Image.Image ):
SCREAMING_SNAKE_CASE_ : int =image.size
else:
SCREAMING_SNAKE_CASE_ : Optional[int] =image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =int(self.size['''shortest_edge'''] * h / w )
SCREAMING_SNAKE_CASE_ : Any =self.size['''shortest_edge''']
elif w > h:
SCREAMING_SNAKE_CASE_ : Any =self.size['''shortest_edge''']
SCREAMING_SNAKE_CASE_ : str =int(self.size['''shortest_edge'''] * w / h )
else:
SCREAMING_SNAKE_CASE_ : Any =self.size['''shortest_edge''']
SCREAMING_SNAKE_CASE_ : List[str] =self.size['''shortest_edge''']
else:
SCREAMING_SNAKE_CASE_ : Any =[]
for image in image_inputs:
SCREAMING_SNAKE_CASE_ : List[str] =self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE_ : List[Any] =max(SCREAMING_SNAKE_CASE__ , key=lambda __A : item[0] )[0]
SCREAMING_SNAKE_CASE_ : int =max(SCREAMING_SNAKE_CASE__ , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase_ ( A , unittest.TestCase ):
__lowerCamelCase = YolosImageProcessor if is_vision_available() else None
def _snake_case ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ : Dict =YolosImageProcessingTester(self )
@property
def _snake_case ( self ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self ) -> int:
SCREAMING_SNAKE_CASE_ : Optional[int] =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''image_mean''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''image_std''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_normalize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_resize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''size''' ) )
def _snake_case ( self ) -> str:
SCREAMING_SNAKE_CASE_ : Dict =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1_333} )
self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE_ : Dict =self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=SCREAMING_SNAKE_CASE__ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE__ )
def _snake_case ( self ) -> Union[str, Any]:
pass
def _snake_case ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ : Dict =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ : List[str] =prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Dict =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE_ : List[str] =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE_ : int =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE_ : Tuple =image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _snake_case ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ : Optional[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , numpify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Dict =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE_ : Tuple =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE_ : Union[str, Any] =image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE_ : Tuple =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _snake_case ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ : Dict =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ : Tuple =prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Tuple =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE_ : Tuple =image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).pixel_values
SCREAMING_SNAKE_CASE_ : List[str] =self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _snake_case ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
SCREAMING_SNAKE_CASE_ : Any =self.image_processing_class(do_resize=SCREAMING_SNAKE_CASE__ , do_normalize=SCREAMING_SNAKE_CASE__ , do_rescale=SCREAMING_SNAKE_CASE__ )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ : Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
SCREAMING_SNAKE_CASE_ : int =image_processing_a.pad(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE_ : List[Any] =image_processing_a(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' )
self.assertTrue(
torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1e-4 ) )
@slow
def _snake_case ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ : Optional[Any] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
SCREAMING_SNAKE_CASE_ : Tuple =json.loads(f.read() )
SCREAMING_SNAKE_CASE_ : Optional[int] ={'''image_id''': 39_769, '''annotations''': target}
# encode them
SCREAMING_SNAKE_CASE_ : Optional[Any] =YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' )
SCREAMING_SNAKE_CASE_ : Optional[Any] =image_processing(images=SCREAMING_SNAKE_CASE__ , annotations=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' )
# verify pixel values
SCREAMING_SNAKE_CASE_ : Tuple =torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['''pixel_values'''].shape , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE_ : Dict =torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE_ : Dict =torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , SCREAMING_SNAKE_CASE__ ) )
# verify boxes
SCREAMING_SNAKE_CASE_ : int =torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE_ : Tuple =torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE_ : List[str] =torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , SCREAMING_SNAKE_CASE__ ) )
# verify is_crowd
SCREAMING_SNAKE_CASE_ : int =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , SCREAMING_SNAKE_CASE__ ) )
# verify class_labels
SCREAMING_SNAKE_CASE_ : Dict =torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , SCREAMING_SNAKE_CASE__ ) )
# verify orig_size
SCREAMING_SNAKE_CASE_ : List[str] =torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , SCREAMING_SNAKE_CASE__ ) )
# verify size
SCREAMING_SNAKE_CASE_ : int =torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , SCREAMING_SNAKE_CASE__ ) )
@slow
def _snake_case ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ : Tuple =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] =json.loads(f.read() )
SCREAMING_SNAKE_CASE_ : List[str] ={'''file_name''': '''000000039769.png''', '''image_id''': 39_769, '''segments_info''': target}
SCREAMING_SNAKE_CASE_ : Union[str, Any] =pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
SCREAMING_SNAKE_CASE_ : Tuple =YolosImageProcessor(format='''coco_panoptic''' )
SCREAMING_SNAKE_CASE_ : List[Any] =image_processing(images=SCREAMING_SNAKE_CASE__ , annotations=SCREAMING_SNAKE_CASE__ , masks_path=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' )
# verify pixel values
SCREAMING_SNAKE_CASE_ : int =torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding['''pixel_values'''].shape , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE_ : Tuple =torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
# verify area
SCREAMING_SNAKE_CASE_ : Union[str, Any] =torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , SCREAMING_SNAKE_CASE__ ) )
# verify boxes
SCREAMING_SNAKE_CASE_ : List[str] =torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE_ : List[Any] =torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE_ : List[str] =torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , SCREAMING_SNAKE_CASE__ ) )
# verify is_crowd
SCREAMING_SNAKE_CASE_ : Optional[int] =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , SCREAMING_SNAKE_CASE__ ) )
# verify class_labels
SCREAMING_SNAKE_CASE_ : Dict =torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , SCREAMING_SNAKE_CASE__ ) )
# verify masks
SCREAMING_SNAKE_CASE_ : Optional[int] =822_873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , SCREAMING_SNAKE_CASE__ )
# verify orig_size
SCREAMING_SNAKE_CASE_ : str =torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , SCREAMING_SNAKE_CASE__ ) )
# verify size
SCREAMING_SNAKE_CASE_ : List[Any] =torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , SCREAMING_SNAKE_CASE__ ) )
| 443
|
"""simple docstring"""
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class _SCREAMING_SNAKE_CASE:
SCREAMING_SNAKE_CASE_ : float
SCREAMING_SNAKE_CASE_ : TreeNode | None = None
SCREAMING_SNAKE_CASE_ : TreeNode | None = None
def __lowerCamelCase ( a_ : TreeNode | None ) -> bool:
# Validation
def is_valid_tree(a_ : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(a_ , a_ ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(a_ ):
raise ValueError(
'''Each node should be type of TreeNode and data should be float.''' )
def is_binary_search_tree_recursive_check(
a_ : TreeNode | None , a_ : float , a_ : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , a_ , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , a_ )
)
return is_binary_search_tree_recursive_check(a_ , -float('''inf''' ) , float('''inf''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 498
| 0
|
'''simple docstring'''
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def SCREAMING_SNAKE_CASE__ ( __A ) -> int:
_snake_case = model.config
_snake_case = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
_snake_case = MBartConfig(
is_decoder=__A , is_encoder_decoder=__A , add_cross_attention=__A , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=__A , add_final_layer_norm=__A , )
return encoder_config, decoder_config
def SCREAMING_SNAKE_CASE__ ( __A ) -> Union[str, Any]:
if "encoder.model" in name:
_snake_case = name.replace('encoder.model' , 'encoder' )
if "decoder.model" in name:
_snake_case = name.replace('decoder.model' , 'decoder' )
if "patch_embed.proj" in name:
_snake_case = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_snake_case = name.replace('patch_embed.norm' , 'embeddings.norm' )
if name.startswith('encoder' ):
if "layers" in name:
_snake_case = 'encoder.' + name
if "attn.proj" in name:
_snake_case = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name and "mask" not in name:
_snake_case = name.replace('attn' , 'attention.self' )
if "norm1" in name:
_snake_case = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
_snake_case = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
_snake_case = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_snake_case = name.replace('mlp.fc2' , 'output.dense' )
if name == "encoder.norm.weight":
_snake_case = 'encoder.layernorm.weight'
if name == "encoder.norm.bias":
_snake_case = 'encoder.layernorm.bias'
return name
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> str:
for key in orig_state_dict.copy().keys():
_snake_case = orig_state_dict.pop(__A )
if "qkv" in key:
_snake_case = key.split('.' )
_snake_case = int(key_split[3] )
_snake_case = int(key_split[5] )
_snake_case = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_snake_case = val[:dim, :]
_snake_case = val[dim : dim * 2, :]
_snake_case = val[-dim:, :]
else:
_snake_case = val[:dim]
_snake_case = val[dim : dim * 2]
_snake_case = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
_snake_case = val
return orig_state_dict
def SCREAMING_SNAKE_CASE__ ( __A , __A=None , __A=False ) -> List[Any]:
# load original model
_snake_case = DonutModel.from_pretrained(__A ).eval()
# load HuggingFace model
_snake_case , _snake_case = get_configs(__A )
_snake_case = DonutSwinModel(__A )
_snake_case = MBartForCausalLM(__A )
_snake_case = VisionEncoderDecoderModel(encoder=__A , decoder=__A )
model.eval()
_snake_case = original_model.state_dict()
_snake_case = convert_state_dict(__A , __A )
model.load_state_dict(__A )
# verify results on scanned document
_snake_case = load_dataset('hf-internal-testing/example-documents' )
_snake_case = dataset['test'][0]['image'].convert('RGB' )
_snake_case = XLMRobertaTokenizerFast.from_pretrained(__A , from_slow=__A )
_snake_case = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
_snake_case = DonutProcessor(__A , __A )
_snake_case = processor(__A , return_tensors='pt' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
_snake_case = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
_snake_case = 'When is the coffee break?'
_snake_case = task_prompt.replace('{user_input}' , __A )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
_snake_case = '<s_rvlcdip>'
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
_snake_case = '<s_cord>'
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
_snake_case = 's_cord-v2>'
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
_snake_case = '<s_zhtrainticket>'
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
_snake_case = 'hello world'
else:
raise ValueError('Model name not supported' )
_snake_case = original_model.decoder.tokenizer(__A , add_special_tokens=__A , return_tensors='pt' )[
'input_ids'
]
_snake_case = original_model.encoder.model.patch_embed(__A )
_snake_case , _snake_case = model.encoder.embeddings(__A )
assert torch.allclose(__A , __A , atol=1e-3 )
# verify encoder hidden states
_snake_case = original_model.encoder(__A )
_snake_case = model.encoder(__A ).last_hidden_state
assert torch.allclose(__A , __A , atol=1e-2 )
# verify decoder hidden states
_snake_case = original_model(__A , __A , __A ).logits
_snake_case = model(__A , decoder_input_ids=__A ).logits
assert torch.allclose(__A , __A , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'Saving model and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(__A )
processor.save_pretrained(__A )
if push_to_hub:
model.push_to_hub('nielsr/' + model_name.split('/' )[-1] , commit_message='Update model' )
processor.push_to_hub('nielsr/' + model_name.split('/' )[-1] , commit_message='Update model' )
if __name__ == "__main__":
lowercase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="naver-clova-ix/donut-base-finetuned-docvqa",
required=False,
type=str,
help="Name of the original model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
required=False,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub.",
)
lowercase : List[Any] = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 542
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class __UpperCAmelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=7 , lowerCAmelCase_=3 , lowerCAmelCase_=18 , lowerCAmelCase_=30 , lowerCAmelCase_=4_00 , lowerCAmelCase_=True , lowerCAmelCase_=32 , lowerCAmelCase_=True , ):
"""simple docstring"""
_snake_case = parent
_snake_case = batch_size
_snake_case = num_channels
_snake_case = image_size
_snake_case = min_resolution
_snake_case = max_resolution
_snake_case = do_resize
_snake_case = size_divisor
_snake_case = do_rescale
def lowerCamelCase ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class __UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ):
__lowercase = GLPNImageProcessor if is_vision_available() else None
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = GLPNImageProcessingTester(self )
@property
def lowerCamelCase ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase_ , 'do_resize' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'size_divisor' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'resample' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'do_rescale' ) )
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
_snake_case = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , numpify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
_snake_case = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , torchify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
_snake_case = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 542
| 1
|
"""simple docstring"""
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Dict ):
"""simple docstring"""
snake_case : Optional[int] = VideoMAEConfig()
set_architecture_configs(lowerCamelCase_ , lowerCamelCase_ )
if "finetuned" not in model_name:
snake_case : Tuple = False
if "finetuned" in model_name:
snake_case : List[Any] = "huggingface/label-files"
if "kinetics" in model_name:
snake_case : Dict = 4_0_0
snake_case : Any = "kinetics400-id2label.json"
elif "ssv2" in model_name:
snake_case : Optional[Any] = 1_7_4
snake_case : Optional[int] = "something-something-v2-id2label.json"
else:
raise ValueError("Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned." )
snake_case : Dict = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type="dataset" ) , "r" ) )
snake_case : List[str] = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
snake_case : int = idalabel
snake_case : List[Any] = {v: k for k, v in idalabel.items()}
return config
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Optional[Any] , lowerCamelCase_: int ):
"""simple docstring"""
if "small" in model_name:
snake_case : Any = 3_8_4
snake_case : int = 1_5_3_6
snake_case : List[str] = 1_2
snake_case : List[Any] = 1_6
snake_case : Dict = 1_2
snake_case : List[str] = 3
snake_case : Optional[int] = 1_9_2
snake_case : List[Any] = 7_6_8
elif "large" in model_name:
snake_case : Union[str, Any] = 1_0_2_4
snake_case : List[Any] = 4_0_9_6
snake_case : str = 2_4
snake_case : Optional[Any] = 1_6
snake_case : Optional[Any] = 1_2
snake_case : Dict = 8
snake_case : Dict = 5_1_2
snake_case : List[str] = 2_0_4_8
elif "huge" in model_name:
snake_case : List[Any] = 1_2_8_0
snake_case : Optional[int] = 5_1_2_0
snake_case : Tuple = 3_2
snake_case : List[str] = 1_6
snake_case : Optional[Any] = 1_2
snake_case : Tuple = 8
snake_case : Union[str, Any] = 6_4_0
snake_case : List[str] = 2_5_6_0
elif "base" not in model_name:
raise ValueError("Model name should include either \"small\", \"base\", \"large\", or \"huge\"" )
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Tuple ):
"""simple docstring"""
if "encoder." in name:
snake_case : Union[str, Any] = name.replace("encoder." , "" )
if "cls_token" in name:
snake_case : Optional[Any] = name.replace("cls_token" , "videomae.embeddings.cls_token" )
if "decoder_pos_embed" in name:
snake_case : str = name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed" )
if "pos_embed" in name and "decoder" not in name:
snake_case : Any = name.replace("pos_embed" , "videomae.embeddings.position_embeddings" )
if "patch_embed.proj" in name:
snake_case : List[str] = name.replace("patch_embed.proj" , "videomae.embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
snake_case : int = name.replace("patch_embed.norm" , "videomae.embeddings.norm" )
if "decoder.blocks" in name:
snake_case : Dict = name.replace("decoder.blocks" , "decoder.decoder_layers" )
if "blocks" in name:
snake_case : Optional[int] = name.replace("blocks" , "videomae.encoder.layer" )
if "attn.proj" in name:
snake_case : Union[str, Any] = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name and "bias" not in name:
snake_case : Any = name.replace("attn" , "attention.self" )
if "attn" in name:
snake_case : Tuple = name.replace("attn" , "attention.attention" )
if "norm1" in name:
snake_case : List[Any] = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
snake_case : str = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
snake_case : int = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
snake_case : Dict = name.replace("mlp.fc2" , "output.dense" )
if "decoder_embed" in name:
snake_case : Any = name.replace("decoder_embed" , "decoder.decoder_embed" )
if "decoder_norm" in name:
snake_case : Optional[int] = name.replace("decoder_norm" , "decoder.decoder_norm" )
if "decoder_pred" in name:
snake_case : str = name.replace("decoder_pred" , "decoder.decoder_pred" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
snake_case : Optional[Any] = name.replace("norm.weight" , "videomae.layernorm.weight" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
snake_case : Any = name.replace("norm.bias" , "videomae.layernorm.bias" )
if "head" in name and "decoder" not in name:
snake_case : Optional[Any] = name.replace("head" , "classifier" )
return name
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Any , lowerCamelCase_: str ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
snake_case : List[str] = orig_state_dict.pop(lowerCamelCase_ )
if key.startswith("encoder." ):
snake_case : Any = key.replace("encoder." , "" )
if "qkv" in key:
snake_case : Dict = key.split("." )
if key.startswith("decoder.blocks" ):
snake_case : str = config.decoder_hidden_size
snake_case : Optional[Any] = int(key_split[2] )
snake_case : str = "decoder.decoder_layers."
if "weight" in key:
snake_case : List[str] = val[:dim, :]
snake_case : Union[str, Any] = val[dim : dim * 2, :]
snake_case : Optional[int] = val[-dim:, :]
else:
snake_case : Optional[int] = config.hidden_size
snake_case : Tuple = int(key_split[1] )
snake_case : str = "videomae.encoder.layer."
if "weight" in key:
snake_case : Union[str, Any] = val[:dim, :]
snake_case : Optional[int] = val[dim : dim * 2, :]
snake_case : str = val[-dim:, :]
else:
snake_case : Union[str, Any] = val
return orig_state_dict
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
snake_case : List[Any] = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
snake_case : Optional[Any] = np.load(lowerCamelCase_ )
return list(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: Optional[int] , lowerCamelCase_: str , lowerCamelCase_: Optional[Any] , lowerCamelCase_: Optional[Any] ):
"""simple docstring"""
snake_case : Tuple = get_videomae_config(lowerCamelCase_ )
if "finetuned" in model_name:
snake_case : Optional[Any] = VideoMAEForVideoClassification(lowerCamelCase_ )
else:
snake_case : List[str] = VideoMAEForPreTraining(lowerCamelCase_ )
# download original checkpoint, hosted on Google Drive
snake_case : Tuple = "pytorch_model.bin"
gdown.cached_download(lowerCamelCase_ , lowerCamelCase_ , quiet=lowerCamelCase_ )
snake_case : str = torch.load(lowerCamelCase_ , map_location="cpu" )
if "model" in files:
snake_case : str = files["model"]
else:
snake_case : Optional[Any] = files["module"]
snake_case : str = convert_state_dict(lowerCamelCase_ , lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
model.eval()
# verify model on basic input
snake_case : Any = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
snake_case : List[str] = prepare_video()
snake_case : str = image_processor(lowerCamelCase_ , return_tensors="pt" )
if "finetuned" not in model_name:
snake_case : List[str] = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" )
snake_case : Tuple = torch.load(lowerCamelCase_ )
snake_case : int = model(**lowerCamelCase_ )
snake_case : str = outputs.logits
snake_case : List[Any] = [
"videomae-small-finetuned-kinetics",
"videomae-small-finetuned-ssv2",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"videomae-base-short",
"videomae-base-short-finetuned-kinetics",
"videomae-base",
"videomae-base-finetuned-kinetics",
"videomae-large",
"videomae-large-finetuned-kinetics",
"videomae-huge-finetuned-kinetics",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"videomae-base-short-ssv2",
"videomae-base-short-finetuned-ssv2",
"videomae-base-ssv2",
"videomae-base-finetuned-ssv2",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
snake_case : Any = torch.Size([1, 4_0_0] )
snake_case : int = torch.tensor([-0.9_2_9_1, -0.4_0_6_1, -0.9_3_0_7] )
elif model_name == "videomae-small-finetuned-ssv2":
snake_case : List[Any] = torch.Size([1, 1_7_4] )
snake_case : Optional[Any] = torch.tensor([0.2_6_7_1, -0.4_6_8_9, -0.8_2_3_5] )
elif model_name == "videomae-base":
snake_case : List[Any] = torch.Size([1, 1_4_0_8, 1_5_3_6] )
snake_case : Union[str, Any] = torch.tensor([[0.7_7_3_9, 0.7_9_6_8, 0.7_0_8_9], [0.6_7_0_1, 0.7_4_8_7, 0.6_2_0_9], [0.4_2_8_7, 0.5_1_5_8, 0.4_7_7_3]] )
elif model_name == "videomae-base-short":
snake_case : Any = torch.Size([1, 1_4_0_8, 1_5_3_6] )
snake_case : Optional[int] = torch.tensor([[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] )
# we verified the loss both for normalized and unnormalized targets for this one
snake_case : int = torch.tensor([0.5_1_4_2] ) if config.norm_pix_loss else torch.tensor([0.6_4_6_9] )
elif model_name == "videomae-large":
snake_case : List[str] = torch.Size([1, 1_4_0_8, 1_5_3_6] )
snake_case : Any = torch.tensor([[0.7_1_4_9, 0.7_9_9_7, 0.6_9_6_6], [0.6_7_6_8, 0.7_8_6_9, 0.6_9_4_8], [0.5_1_3_9, 0.6_2_2_1, 0.5_6_0_5]] )
elif model_name == "videomae-large-finetuned-kinetics":
snake_case : List[str] = torch.Size([1, 4_0_0] )
snake_case : Optional[Any] = torch.tensor([0.0_7_7_1, 0.0_0_1_1, -0.3_6_2_5] )
elif model_name == "videomae-huge-finetuned-kinetics":
snake_case : Optional[Any] = torch.Size([1, 4_0_0] )
snake_case : List[Any] = torch.tensor([0.2_4_3_3, 0.1_6_3_2, -0.4_8_9_4] )
elif model_name == "videomae-base-short-finetuned-kinetics":
snake_case : Optional[int] = torch.Size([1, 4_0_0] )
snake_case : Optional[int] = torch.tensor([0.6_5_8_8, 0.0_9_9_0, -0.2_4_9_3] )
elif model_name == "videomae-base-finetuned-kinetics":
snake_case : List[str] = torch.Size([1, 4_0_0] )
snake_case : Optional[Any] = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] )
elif model_name == "videomae-base-short-ssv2":
snake_case : Optional[Any] = torch.Size([1, 1_4_0_8, 1_5_3_6] )
snake_case : int = torch.tensor([[0.4_7_1_2, 0.5_2_9_6, 0.5_7_8_6], [0.2_2_7_8, 0.2_7_2_9, 0.4_0_2_6], [0.0_3_5_2, 0.0_7_3_0, 0.2_5_0_6]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
snake_case : Dict = torch.Size([1, 1_7_4] )
snake_case : Any = torch.tensor([-0.0_5_3_7, -0.1_5_3_9, -0.3_2_6_6] )
elif model_name == "videomae-base-ssv2":
snake_case : List[str] = torch.Size([1, 1_4_0_8, 1_5_3_6] )
snake_case : Union[str, Any] = torch.tensor([[0.8_1_3_1, 0.8_7_2_7, 0.8_5_4_6], [0.7_3_6_6, 0.9_3_7_7, 0.8_8_7_0], [0.5_9_3_5, 0.8_8_7_4, 0.8_5_6_4]] )
elif model_name == "videomae-base-finetuned-ssv2":
snake_case : Tuple = torch.Size([1, 1_7_4] )
snake_case : Dict = torch.tensor([0.1_9_6_1, -0.8_3_3_7, -0.6_3_8_9] )
else:
raise ValueError(f'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , lowerCamelCase_ , atol=1e-4 )
else:
print("Logits:" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , lowerCamelCase_ , atol=1e-4 )
print("Logits ok!" )
# verify loss, if applicable
if model_name == "videomae-base-short":
snake_case : Any = outputs.loss
assert torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-4 )
print("Loss ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
if push_to_hub:
print("Pushing to the hub..." )
model.push_to_hub(lowerCamelCase_ , organization="nielsr" )
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4',
type=str,
help=(
'URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'
' download link.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='/Users/nielsrogge/Documents/VideoMAE/Test',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--model_name', default='videomae-base', type=str, help='Name of the model.')
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
A = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 449
|
"""simple docstring"""
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
A = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
A = 'main'
# Default branch name
A = 'f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'
# One particular commit (not the top of `main`)
A = 'aaaaaaa'
# This commit does not exist, so we should 404.
A = 'd9e9f15bc825e4b2c9249e9578f884bbcb5e3684'
# Sha-1 of config.json on the top of `main`, for checking purposes
A = '4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'
@contextlib.contextmanager
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
print("Welcome!" )
yield
print("Bye!" )
@contextlib.contextmanager
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
print("Bonjour!" )
yield
print("Au revoir!" )
class _a ( unittest.TestCase):
def __lowercase ( self : Dict ) -> Optional[int]:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec("transformers" ) is not None
class _a ( unittest.TestCase):
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def __lowercase ( self : Union[str, Any] , _lowercase : Optional[int] ) -> Optional[Any]:
with ContextManagers([] ):
print("Transformers are awesome!" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , "Transformers are awesome!\n" )
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def __lowercase ( self : Any , _lowercase : Any ) -> Dict:
with ContextManagers([context_en()] ):
print("Transformers are awesome!" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , "Welcome!\nTransformers are awesome!\nBye!\n" )
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def __lowercase ( self : str , _lowercase : List[Any] ) -> Optional[int]:
with ContextManagers([context_fr(), context_en()] ):
print("Transformers are awesome!" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , "Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n" )
@require_torch
def __lowercase ( self : Any ) -> Union[str, Any]:
self.assertEqual(find_labels(_lowercase ) , ["labels"] )
self.assertEqual(find_labels(_lowercase ) , ["labels", "next_sentence_label"] )
self.assertEqual(find_labels(_lowercase ) , ["start_positions", "end_positions"] )
class _a ( SCREAMING_SNAKE_CASE__):
pass
self.assertEqual(find_labels(_lowercase ) , ["labels"] )
@require_tf
def __lowercase ( self : Optional[Any] ) -> int:
self.assertEqual(find_labels(_lowercase ) , ["labels"] )
self.assertEqual(find_labels(_lowercase ) , ["labels", "next_sentence_label"] )
self.assertEqual(find_labels(_lowercase ) , ["start_positions", "end_positions"] )
class _a ( SCREAMING_SNAKE_CASE__):
pass
self.assertEqual(find_labels(_lowercase ) , ["labels"] )
@require_flax
def __lowercase ( self : Tuple ) -> List[Any]:
# Flax models don't have labels
self.assertEqual(find_labels(_lowercase ) , [] )
self.assertEqual(find_labels(_lowercase ) , [] )
self.assertEqual(find_labels(_lowercase ) , [] )
class _a ( SCREAMING_SNAKE_CASE__):
pass
self.assertEqual(find_labels(_lowercase ) , [] )
| 449
| 1
|
def _lowerCAmelCase ( __lowerCAmelCase ) -> bool:
"""simple docstring"""
if not all(x.isalpha() for x in string ):
raise ValueError('''String must only contain alphabetic characters.''' )
snake_case__ : Optional[int] = sorted(string.lower() )
return len(__lowerCAmelCase ) == len(set(__lowerCAmelCase ) )
if __name__ == "__main__":
A__ = input('''Enter a string ''').strip()
A__ = is_isogram(input_str)
print(f"""{input_str} is {'an' if isogram else 'not an'} isogram.""")
| 219
|
from scipy.stats import spearmanr
import datasets
A__ = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
A__ = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
A__ = r'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def __lowerCamelCase ( self :Union[str, Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) ,reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'''] ,)
def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :Union[str, Any] ,__lowercase :Dict ,__lowercase :Dict=False ):
snake_case__ : str = spearmanr(__lowercase ,__lowercase )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 219
| 1
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_lowercase : Optional[int] =logging.get_logger(__name__)
class UpperCamelCase_ ( snake_case__ ):
_a : List[str] = ['input_features', 'is_longer']
def __init__( self : int , lowerCamelCase : str=64 , lowerCamelCase : str=4_80_00 , lowerCamelCase : Any=4_80 , lowerCamelCase : List[str]=10 , lowerCamelCase : List[Any]=10_24 , lowerCamelCase : Union[str, Any]=0.0 , lowerCamelCase : Tuple=False , lowerCamelCase : float = 0 , lowerCamelCase : float = 1_40_00 , lowerCamelCase : int = None , lowerCamelCase : str = "fusion" , lowerCamelCase : str = "repeatpad" , **lowerCamelCase : Optional[int] , ):
super().__init__(
feature_size=lowerCamelCase , sampling_rate=lowerCamelCase , padding_value=lowerCamelCase , return_attention_mask=lowerCamelCase , **lowerCamelCase , )
lowerCamelCase_ : List[Any] = top_db
lowerCamelCase_ : Union[str, Any] = truncation
lowerCamelCase_ : str = padding
lowerCamelCase_ : Union[str, Any] = fft_window_size
lowerCamelCase_ : int = (fft_window_size >> 1) + 1
lowerCamelCase_ : Tuple = hop_length
lowerCamelCase_ : Optional[int] = max_length_s
lowerCamelCase_ : Optional[Any] = max_length_s * sampling_rate
lowerCamelCase_ : int = sampling_rate
lowerCamelCase_ : Union[str, Any] = frequency_min
lowerCamelCase_ : str = frequency_max
lowerCamelCase_ : str = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCamelCase , min_frequency=lowerCamelCase , max_frequency=lowerCamelCase , sampling_rate=lowerCamelCase , norm=lowerCamelCase , mel_scale='htk' , )
lowerCamelCase_ : Optional[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCamelCase , min_frequency=lowerCamelCase , max_frequency=lowerCamelCase , sampling_rate=lowerCamelCase , norm='slaney' , mel_scale='slaney' , )
def __a ( self : Tuple ):
lowerCamelCase_ : str = copy.deepcopy(self.__dict__ )
lowerCamelCase_ : List[str] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __a ( self : List[Any] , lowerCamelCase : np.array , lowerCamelCase : Optional[np.array] = None ):
lowerCamelCase_ : Dict = spectrogram(
lowerCamelCase , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=lowerCamelCase , log_mel='dB' , )
return log_mel_spectrogram.T
def __a ( self : str , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : Tuple ):
lowerCamelCase_ : Tuple = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCamelCase_ : int = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCamelCase_ : List[Any] = [0]
# randomly choose index for each part
lowerCamelCase_ : Optional[Any] = np.random.choice(ranges[0] )
lowerCamelCase_ : Tuple = np.random.choice(ranges[1] )
lowerCamelCase_ : Any = np.random.choice(ranges[2] )
lowerCamelCase_ : str = mel[idx_front : idx_front + chunk_frames, :]
lowerCamelCase_ : Dict = mel[idx_middle : idx_middle + chunk_frames, :]
lowerCamelCase_ : List[str] = mel[idx_back : idx_back + chunk_frames, :]
lowerCamelCase_ : str = torch.tensor(mel[None, None, :] )
lowerCamelCase_ : Union[str, Any] = torch.nn.functional.interpolate(
lowerCamelCase , size=[chunk_frames, 64] , mode='bilinear' , align_corners=lowerCamelCase )
lowerCamelCase_ : List[Any] = mel_shrink[0][0].numpy()
lowerCamelCase_ : Dict = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def __a ( self : Dict , lowerCamelCase : np.array , lowerCamelCase : int , lowerCamelCase : List[str] , lowerCamelCase : List[str] ):
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
lowerCamelCase_ : Dict = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
lowerCamelCase_ : Tuple = len(lowerCamelCase ) - max_length
lowerCamelCase_ : int = np.random.randint(0 , overflow + 1 )
lowerCamelCase_ : Dict = waveform[idx : idx + max_length]
lowerCamelCase_ : int = self._np_extract_fbank_features(lowerCamelCase , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
lowerCamelCase_ : List[Any] = self._np_extract_fbank_features(lowerCamelCase , self.mel_filters )
lowerCamelCase_ : str = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
lowerCamelCase_ : Optional[Any] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
lowerCamelCase_ : int = np.stack([mel, mel, mel, mel] , axis=0 )
lowerCamelCase_ : Any = False
else:
lowerCamelCase_ : Optional[int] = self._random_mel_fusion(lowerCamelCase , lowerCamelCase , lowerCamelCase )
lowerCamelCase_ : Tuple = True
else:
raise NotImplementedError(F"data_truncating {truncation} not implemented" )
else:
lowerCamelCase_ : Dict = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
lowerCamelCase_ : Dict = int(max_length / len(lowerCamelCase ) )
lowerCamelCase_ : Union[str, Any] = np.stack(np.tile(lowerCamelCase , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
lowerCamelCase_ : int = int(max_length / len(lowerCamelCase ) )
lowerCamelCase_ : Optional[int] = np.stack(np.tile(lowerCamelCase , lowerCamelCase ) )
lowerCamelCase_ : str = np.pad(lowerCamelCase , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 )
if truncation == "fusion":
lowerCamelCase_ : Dict = self._np_extract_fbank_features(lowerCamelCase , self.mel_filters )
lowerCamelCase_ : Any = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
lowerCamelCase_ : Optional[Any] = self._np_extract_fbank_features(lowerCamelCase , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Tuple , lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCamelCase : str = None , lowerCamelCase : Optional[str] = None , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[Union[str, TensorType]] = None , **lowerCamelCase : int , ):
lowerCamelCase_ : Any = truncation if truncation is not None else self.truncation
lowerCamelCase_ : int = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
F" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
F" was sampled with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
lowerCamelCase_ : Tuple = isinstance(lowerCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
lowerCamelCase_ : List[Any] = is_batched_numpy or (
isinstance(lowerCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase_ : Union[str, Any] = [np.asarray(lowerCamelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase , np.ndarray ):
lowerCamelCase_ : Union[str, Any] = np.asarray(lowerCamelCase , dtype=np.floataa )
elif isinstance(lowerCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase_ : List[str] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase_ : Optional[int] = [np.asarray(lowerCamelCase )]
# convert to mel spectrogram, truncate and pad if needed.
lowerCamelCase_ : Any = [
self._get_input_mel(lowerCamelCase , max_length if max_length else self.nb_max_samples , lowerCamelCase , lowerCamelCase )
for waveform in raw_speech
]
lowerCamelCase_ : List[Any] = []
lowerCamelCase_ : List[Any] = []
for mel, longer in padded_inputs:
input_mel.append(lowerCamelCase )
is_longer.append(lowerCamelCase )
if truncation == "fusion" and sum(lowerCamelCase ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
lowerCamelCase_ : Dict = np.random.randint(0 , len(lowerCamelCase ) )
lowerCamelCase_ : str = True
if isinstance(input_mel[0] , lowerCamelCase ):
lowerCamelCase_ : int = [np.asarray(lowerCamelCase , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
lowerCamelCase_ : Dict = [[longer] for longer in is_longer]
lowerCamelCase_ : Optional[int] = {'input_features': input_mel, 'is_longer': is_longer}
lowerCamelCase_ : str = BatchFeature(lowerCamelCase )
if return_tensors is not None:
lowerCamelCase_ : str = input_features.convert_to_tensors(lowerCamelCase )
return input_features
| 364
|
_lowercase : Any =[0, 2, 4, 6, 8]
_lowercase : List[Any] =[1, 3, 5, 7, 9]
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 ,-1 ,-1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
lowerCamelCase_ : Dict = 0
for digit in range(10 ):
lowerCamelCase_ : List[Any] = digit
result += reversible_numbers(
0 ,(remainder + 2 * digit) // 10 ,lowerCAmelCase__ ,lowerCAmelCase__ )
return result
lowerCamelCase_ : List[str] = 0
for digita in range(10 ):
lowerCamelCase_ : Tuple = digita
if (remainder + digita) % 2 == 0:
lowerCamelCase_ : Dict = ODD_DIGITS
else:
lowerCamelCase_ : str = EVEN_DIGITS
for digita in other_parity_digits:
lowerCamelCase_ : Optional[int] = digita
result += reversible_numbers(
remaining_length - 2 ,(remainder + digita + digita) // 10 ,lowerCAmelCase__ ,lowerCAmelCase__ ,)
return result
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ = 9 ):
lowerCamelCase_ : int = 0
for length in range(1 ,max_power + 1 ):
result += reversible_numbers(lowerCAmelCase__ ,0 ,[0] * length ,lowerCAmelCase__ )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 364
| 1
|
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class a ( _UpperCAmelCase ,unittest.TestCase ):
UpperCAmelCase__ : List[str] = BioGptTokenizer
UpperCAmelCase__ : Any = False
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCamelCase: Any = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
__lowerCamelCase: Any = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
__lowerCamelCase: Optional[Any] = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
__lowerCamelCase: Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowerCamelCase: Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(SCREAMING_SNAKE_CASE_ ) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple ):
__lowerCamelCase: Dict = """lower newer"""
__lowerCamelCase: Dict = """lower newer"""
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self : int ):
__lowerCamelCase: Union[str, Any] = BioGptTokenizer(self.vocab_file , self.merges_file )
__lowerCamelCase: str = """lower"""
__lowerCamelCase: Any = ["""low""", """er</w>"""]
__lowerCamelCase: Optional[int] = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Dict = tokens + ["""<unk>"""]
__lowerCamelCase: int = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
__lowerCamelCase: Optional[int] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
__lowerCamelCase: List[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Optional[int] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Optional[int] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: str = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 189
|
_A : Tuple = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def __lowerCAmelCase ( ) -> None:
__lowerCamelCase: Optional[int] = input("""Enter message: """ )
__lowerCamelCase: Dict = input("""Enter key [alphanumeric]: """ )
__lowerCamelCase: List[Any] = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
__lowerCamelCase: Optional[int] = """encrypt"""
__lowerCamelCase: Optional[int] = encrypt_message(snake_case , snake_case )
elif mode.lower().startswith("""d""" ):
__lowerCamelCase: Union[str, Any] = """decrypt"""
__lowerCamelCase: Optional[Any] = decrypt_message(snake_case , snake_case )
print(f'\n{mode.title()}ed message:' )
print(snake_case )
def __lowerCAmelCase ( snake_case : str , snake_case : str ) -> str:
return translate_message(snake_case , snake_case , """encrypt""" )
def __lowerCAmelCase ( snake_case : str , snake_case : str ) -> str:
return translate_message(snake_case , snake_case , """decrypt""" )
def __lowerCAmelCase ( snake_case : str , snake_case : str , snake_case : str ) -> str:
__lowerCamelCase: Any = []
__lowerCamelCase: Optional[int] = 0
__lowerCamelCase: Any = key.upper()
for symbol in message:
__lowerCamelCase: int = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(snake_case )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(snake_case ):
__lowerCamelCase: Union[str, Any] = 0
else:
translated.append(snake_case )
return "".join(snake_case )
if __name__ == "__main__":
main()
| 189
| 1
|
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 65_521
def lowerCamelCase__ ( a__) -> int:
"""simple docstring"""
_snake_case : List[str] = 1
_snake_case : Dict = 0
for plain_chr in plain_text:
_snake_case : str = (a + ord(a__)) % MOD_ADLER
_snake_case : Union[str, Any] = (b + a) % MOD_ADLER
return (b << 1_6) | a
| 517
|
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
SCREAMING_SNAKE_CASE_ = ""
SCREAMING_SNAKE_CASE_ = ""
SCREAMING_SNAKE_CASE_ = ""
SCREAMING_SNAKE_CASE_ = 1 # (0 is vertical, 1 is horizontal)
def lowerCamelCase__ ( ) -> None:
"""simple docstring"""
_snake_case , _snake_case : Optional[Any] = get_dataset(a__ , a__)
print('Processing...')
_snake_case , _snake_case , _snake_case : Tuple = update_image_and_anno(a__ , a__ , a__)
for index, image in enumerate(a__):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_snake_case : List[str] = random_chars(3_2)
_snake_case : List[str] = paths[index].split(os.sep)[-1].rsplit('.' , 1)[0]
_snake_case : Optional[Any] = F"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(F"""/{file_root}.jpg""" , a__ , [cva.IMWRITE_JPEG_QUALITY, 8_5])
print(F"""Success {index+1}/{len(a__)} with {file_name}""")
_snake_case : str = []
for anno in new_annos[index]:
_snake_case : Union[str, Any] = F"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(a__)
with open(F"""/{file_root}.txt""" , 'w') as outfile:
outfile.write('\n'.join(line for line in annos_list))
def lowerCamelCase__ ( a__ , a__) -> tuple[list, list]:
"""simple docstring"""
_snake_case : Optional[int] = []
_snake_case : int = []
for label_file in glob.glob(os.path.join(a__ , '*.txt')):
_snake_case : List[str] = label_file.split(os.sep)[-1].rsplit('.' , 1)[0]
with open(a__) as in_file:
_snake_case : Union[str, Any] = in_file.readlines()
_snake_case : str = os.path.join(a__ , F"""{label_name}.jpg""")
_snake_case : Dict = []
for obj_list in obj_lists:
_snake_case : Dict = obj_list.rstrip('\n').split(' ')
boxes.append(
[
int(obj[0]),
float(obj[1]),
float(obj[2]),
float(obj[3]),
float(obj[4]),
])
if not boxes:
continue
img_paths.append(a__)
labels.append(a__)
return img_paths, labels
def lowerCamelCase__ ( a__ , a__ , a__ = 1) -> tuple[list, list, list]:
"""simple docstring"""
_snake_case : Dict = []
_snake_case : List[str] = []
_snake_case : Optional[int] = []
for idx in range(len(a__)):
_snake_case : Any = []
_snake_case : str = img_list[idx]
path_list.append(a__)
_snake_case : List[Any] = anno_list[idx]
_snake_case : Union[str, Any] = cva.imread(a__)
if flip_type == 1:
_snake_case : Optional[int] = cva.flip(a__ , a__)
for bbox in img_annos:
_snake_case : List[str] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]])
elif flip_type == 0:
_snake_case : Tuple = cva.flip(a__ , a__)
for bbox in img_annos:
_snake_case : List[str] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]])
new_annos_lists.append(a__)
new_imgs_list.append(a__)
return new_imgs_list, new_annos_lists, path_list
def lowerCamelCase__ ( a__ = 3_2) -> str:
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
_snake_case : int = ascii_lowercase + digits
return "".join(random.choice(a__) for _ in range(a__))
if __name__ == "__main__":
main()
print("DONE ✅")
| 517
| 1
|
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A :
'''simple docstring'''
def __init__( self : Optional[int] , _UpperCamelCase : Optional[int] , _UpperCamelCase : str=2 , _UpperCamelCase : Dict=3 , _UpperCamelCase : Dict=4 , _UpperCamelCase : int=2 , _UpperCamelCase : List[str]=7 , _UpperCamelCase : str=True , _UpperCamelCase : Optional[Any]=True , _UpperCamelCase : List[str]=True , _UpperCamelCase : Tuple=True , _UpperCamelCase : List[Any]=99 , _UpperCamelCase : List[str]=36 , _UpperCamelCase : str=2 , _UpperCamelCase : List[Any]=4 , _UpperCamelCase : Optional[Any]=37 , _UpperCamelCase : Optional[Any]="gelu" , _UpperCamelCase : Any=0.1 , _UpperCamelCase : Optional[int]=0.1 , _UpperCamelCase : List[Any]=512 , _UpperCamelCase : List[Any]=16 , _UpperCamelCase : Any=2 , _UpperCamelCase : List[str]=0.0_2 , _UpperCamelCase : Tuple=6 , _UpperCamelCase : Optional[Any]=6 , _UpperCamelCase : str=3 , _UpperCamelCase : List[str]=4 , _UpperCamelCase : Dict=None , _UpperCamelCase : str=1_000 , ):
_lowercase: Tuple = parent
_lowercase: Optional[Any] = batch_size
_lowercase: Optional[Any] = num_channels
_lowercase: Union[str, Any] = image_size
_lowercase: Union[str, Any] = patch_size
_lowercase: Union[str, Any] = is_training
_lowercase: Optional[int] = use_input_mask
_lowercase: Tuple = use_token_type_ids
_lowercase: Any = use_labels
_lowercase: List[Any] = vocab_size
_lowercase: Tuple = hidden_size
_lowercase: Union[str, Any] = num_hidden_layers
_lowercase: List[str] = num_attention_heads
_lowercase: Union[str, Any] = intermediate_size
_lowercase: str = hidden_act
_lowercase: List[Any] = hidden_dropout_prob
_lowercase: Tuple = attention_probs_dropout_prob
_lowercase: int = max_position_embeddings
_lowercase: Optional[Any] = type_vocab_size
_lowercase: Optional[Any] = type_sequence_label_size
_lowercase: Dict = initializer_range
_lowercase: Tuple = coordinate_size
_lowercase: List[str] = shape_size
_lowercase: int = num_labels
_lowercase: Dict = num_choices
_lowercase: Optional[Any] = scope
_lowercase: str = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_lowercase: Union[str, Any] = text_seq_length
_lowercase: Tuple = (image_size // patch_size) ** 2 + 1
_lowercase: Optional[Any] = self.text_seq_length + self.image_seq_length
def UpperCAmelCase__ ( self : Optional[int]):
_lowercase: int = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size)
_lowercase: List[str] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox)
_lowercase: int = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowercase: Tuple = bbox[i, j, 3]
_lowercase: Tuple = bbox[i, j, 1]
_lowercase: List[str] = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowercase: int = bbox[i, j, 2]
_lowercase: Dict = bbox[i, j, 0]
_lowercase: Dict = tmp_coordinate
_lowercase: Union[str, Any] = tf.constant(_UpperCamelCase)
_lowercase: Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowercase: List[str] = None
if self.use_input_mask:
_lowercase: Optional[Any] = random_attention_mask([self.batch_size, self.text_seq_length])
_lowercase: List[Any] = None
if self.use_token_type_ids:
_lowercase: Tuple = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size)
_lowercase: List[str] = None
_lowercase: Optional[int] = None
if self.use_labels:
_lowercase: Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_lowercase: Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels)
_lowercase: Any = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase__ ( self : str , _UpperCamelCase : str , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : List[str] , _UpperCamelCase : Any , _UpperCamelCase : Tuple):
_lowercase: List[str] = TFLayoutLMvaModel(config=_UpperCamelCase)
# text + image
_lowercase: Union[str, Any] = model(_UpperCamelCase , pixel_values=_UpperCamelCase , training=_UpperCamelCase)
_lowercase: Optional[Any] = model(
_UpperCamelCase , bbox=_UpperCamelCase , pixel_values=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , training=_UpperCamelCase , )
_lowercase: Optional[Any] = model(_UpperCamelCase , bbox=_UpperCamelCase , pixel_values=_UpperCamelCase , training=_UpperCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
# text only
_lowercase: int = model(_UpperCamelCase , training=_UpperCamelCase)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size))
# image only
_lowercase: List[Any] = model({"pixel_values": pixel_values} , training=_UpperCamelCase)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size))
def UpperCAmelCase__ ( self : Union[str, Any] , _UpperCamelCase : Any , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any , _UpperCamelCase : Dict , _UpperCamelCase : str , _UpperCamelCase : int):
_lowercase: Optional[Any] = self.num_labels
_lowercase: int = TFLayoutLMvaForSequenceClassification(config=_UpperCamelCase)
_lowercase: str = model(
_UpperCamelCase , bbox=_UpperCamelCase , pixel_values=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase , training=_UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase__ ( self : Union[str, Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : List[str] , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict , _UpperCamelCase : Dict):
_lowercase: Any = self.num_labels
_lowercase: str = TFLayoutLMvaForTokenClassification(config=_UpperCamelCase)
_lowercase: Optional[Any] = model(
_UpperCamelCase , bbox=_UpperCamelCase , pixel_values=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase , training=_UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels))
def UpperCAmelCase__ ( self : Tuple , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : int , _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any]):
_lowercase: Any = 2
_lowercase: str = TFLayoutLMvaForQuestionAnswering(config=_UpperCamelCase)
_lowercase: Any = model(
_UpperCamelCase , bbox=_UpperCamelCase , pixel_values=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , start_positions=_UpperCamelCase , end_positions=_UpperCamelCase , training=_UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase__ ( self : Union[str, Any]):
_lowercase: int = self.prepare_config_and_inputs()
((_lowercase) , (_lowercase) , (_lowercase) , (_lowercase) , (_lowercase) , (_lowercase) , (_lowercase) , (_lowercase)): str = config_and_inputs
_lowercase: Dict = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class A ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCamelCase : List[Any] = (
{"""document-question-answering""": TFLayoutLMvaForQuestionAnswering, """feature-extraction""": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
lowerCamelCase : int = False
lowerCamelCase : str = False
lowerCamelCase : List[Any] = False
def UpperCAmelCase__ ( self : Any , _UpperCamelCase : Tuple , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[int]):
return True
def UpperCAmelCase__ ( self : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : List[str]=False):
_lowercase: List[Any] = copy.deepcopy(_UpperCamelCase)
if model_class in get_values(_UpperCamelCase):
_lowercase: Optional[int] = {
k: tf.tile(tf.expand_dims(_UpperCamelCase , 1) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1))
if isinstance(_UpperCamelCase , tf.Tensor) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_UpperCamelCase):
_lowercase: Optional[int] = tf.ones(self.model_tester.batch_size , dtype=tf.intaa)
elif model_class in get_values(_UpperCamelCase):
_lowercase: Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa)
_lowercase: Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa)
elif model_class in get_values(_UpperCamelCase):
_lowercase: Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa)
elif model_class in get_values(_UpperCamelCase):
_lowercase: str = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa)
return inputs_dict
def UpperCAmelCase__ ( self : Tuple):
_lowercase: List[Any] = TFLayoutLMvaModelTester(self)
_lowercase: Union[str, Any] = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=37)
def UpperCAmelCase__ ( self : Dict):
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Union[str, Any]):
_lowercase , _lowercase: Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase: Tuple = model_class(_UpperCamelCase)
if getattr(_UpperCamelCase , "hf_compute_loss" , _UpperCamelCase):
# The number of elements in the loss should be the same as the number of elements in the label
_lowercase: List[str] = self._prepare_for_class(inputs_dict.copy() , _UpperCamelCase , return_labels=_UpperCamelCase)
_lowercase: Tuple = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=_UpperCamelCase)[0]
]
_lowercase: List[Any] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
_lowercase: List[Any] = self._prepare_for_class(inputs_dict.copy() , _UpperCamelCase , return_labels=_UpperCamelCase)
_lowercase: Optional[Any] = prepared_for_class.pop("input_ids")
_lowercase: List[Any] = model(_UpperCamelCase , **_UpperCamelCase)[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
# Test that model correctly compute the loss when we mask some positions
_lowercase: List[Any] = self._prepare_for_class(inputs_dict.copy() , _UpperCamelCase , return_labels=_UpperCamelCase)
_lowercase: str = prepared_for_class.pop("input_ids")
if "labels" in prepared_for_class:
_lowercase: List[str] = prepared_for_class["labels"].numpy()
if len(labels.shape) > 1 and labels.shape[1] != 1:
_lowercase: int = -100
_lowercase: int = tf.convert_to_tensor(_UpperCamelCase)
_lowercase: List[str] = model(_UpperCamelCase , **_UpperCamelCase)[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
self.assertTrue(not np.any(np.isnan(loss.numpy())))
# Test that model correctly compute the loss with a dict
_lowercase: Any = self._prepare_for_class(inputs_dict.copy() , _UpperCamelCase , return_labels=_UpperCamelCase)
_lowercase: Tuple = model(_UpperCamelCase)[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
# Test that model correctly compute the loss with a tuple
_lowercase: Dict = self._prepare_for_class(inputs_dict.copy() , _UpperCamelCase , return_labels=_UpperCamelCase)
# Get keys that were added with the _prepare_for_class function
_lowercase: Optional[Any] = prepared_for_class.keys() - inputs_dict.keys()
_lowercase: Any = inspect.signature(model.call).parameters
_lowercase: Dict = list(signature.keys())
# Create a dictionary holding the location of the tensors in the tuple
_lowercase: Union[str, Any] = {0: "input_ids"}
for label_key in label_keys:
_lowercase: Dict = signature_names.index(_UpperCamelCase)
_lowercase: Any = label_key
_lowercase: List[str] = sorted(tuple_index_mapping.items())
# Initialize a list with their default values, update the values and convert to a tuple
_lowercase: str = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default)
for index, value in sorted_tuple_index_mapping:
_lowercase: Union[str, Any] = prepared_for_class[value]
_lowercase: List[Any] = tuple(_UpperCamelCase)
# Send to model
_lowercase: Dict = model(tuple_input[:-1])[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1])
def UpperCAmelCase__ ( self : Dict):
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
): int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
def UpperCAmelCase__ ( self : Optional[int]):
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
): Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowercase: List[str] = type
self.model_tester.create_and_check_model(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
def UpperCAmelCase__ ( self : Tuple):
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
): List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
def UpperCAmelCase__ ( self : Union[str, Any]):
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
): str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
def UpperCAmelCase__ ( self : Optional[int]):
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
): Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
@slow
def UpperCAmelCase__ ( self : Dict):
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase: Dict = TFLayoutLMvaModel.from_pretrained(_UpperCamelCase)
self.assertIsNotNone(_UpperCamelCase)
def __lowerCAmelCase ( ):
_lowercase: List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
class A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase__ ( self : int):
return LayoutLMvaImageProcessor(apply_ocr=_UpperCamelCase) if is_vision_available() else None
@slow
def UpperCAmelCase__ ( self : Optional[int]):
_lowercase: Dict = TFLayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base")
_lowercase: str = self.default_image_processor
_lowercase: List[str] = prepare_img()
_lowercase: Any = image_processor(images=_UpperCamelCase , return_tensors="tf").pixel_values
_lowercase: Optional[int] = tf.constant([[1, 2]])
_lowercase: Dict = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]]) , axis=0)
# forward pass
_lowercase: List[Any] = model(input_ids=_UpperCamelCase , bbox=_UpperCamelCase , pixel_values=_UpperCamelCase , training=_UpperCamelCase)
# verify the logits
_lowercase: Dict = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , _UpperCamelCase)
_lowercase: Dict = tf.constant(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]])
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _UpperCamelCase , atol=1e-4))
| 206
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[int] = {
'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json',
}
class A ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : Optional[int] = """data2vec-text"""
def __init__( self : List[str] , _UpperCamelCase : List[str]=30_522 , _UpperCamelCase : Union[str, Any]=768 , _UpperCamelCase : Dict=12 , _UpperCamelCase : Optional[Any]=12 , _UpperCamelCase : Optional[Any]=3_072 , _UpperCamelCase : List[Any]="gelu" , _UpperCamelCase : List[Any]=0.1 , _UpperCamelCase : Dict=0.1 , _UpperCamelCase : Dict=512 , _UpperCamelCase : Optional[Any]=2 , _UpperCamelCase : Any=0.0_2 , _UpperCamelCase : Dict=1e-12 , _UpperCamelCase : Union[str, Any]=1 , _UpperCamelCase : Optional[Any]=0 , _UpperCamelCase : Dict=2 , _UpperCamelCase : Optional[Any]="absolute" , _UpperCamelCase : Any=True , _UpperCamelCase : List[str]=None , **_UpperCamelCase : Tuple , ):
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase)
_lowercase: str = vocab_size
_lowercase: Tuple = hidden_size
_lowercase: Optional[int] = num_hidden_layers
_lowercase: Optional[Any] = num_attention_heads
_lowercase: Any = hidden_act
_lowercase: Any = intermediate_size
_lowercase: List[Any] = hidden_dropout_prob
_lowercase: Optional[int] = attention_probs_dropout_prob
_lowercase: Optional[Any] = max_position_embeddings
_lowercase: str = type_vocab_size
_lowercase: List[Any] = initializer_range
_lowercase: List[str] = layer_norm_eps
_lowercase: int = position_embedding_type
_lowercase: Union[str, Any] = use_cache
_lowercase: Any = classifier_dropout
class A ( lowerCamelCase_ ):
'''simple docstring'''
@property
def UpperCAmelCase__ ( self : Optional[Any]):
if self.task == "multiple-choice":
_lowercase: str = {0: "batch", 1: "choice", 2: "sequence"}
else:
_lowercase: Dict = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
])
| 206
| 1
|
"""simple docstring"""
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
_UpperCamelCase = logging.getLogger(__name__)
_UpperCamelCase = tf.data.AUTOTUNE
def _a ( ):
"""simple docstring"""
UpperCAmelCase = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""" , type=_snake_case , default="""roberta-base""" , help="""The model config to use. Note that we don't copy the model's weights, only the config!""" , )
parser.add_argument(
"""--tokenizer""" , type=_snake_case , default="""unigram-tokenizer-wikitext""" , help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""" , )
parser.add_argument(
"""--per_replica_batch_size""" , type=_snake_case , default=8 , help="""Batch size per TPU core.""" , )
parser.add_argument(
"""--no_tpu""" , action="""store_true""" , help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""" , )
parser.add_argument(
"""--tpu_name""" , type=_snake_case , help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""" , default="""local""" , )
parser.add_argument(
"""--tpu_zone""" , type=_snake_case , help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" , )
parser.add_argument(
"""--gcp_project""" , type=_snake_case , help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""" , action="""store_true""" , help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" , )
parser.add_argument(
"""--train_dataset""" , type=_snake_case , help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--shuffle_buffer_size""" , type=_snake_case , default=2**18 , help="""Size of the shuffle buffer (in samples)""" , )
parser.add_argument(
"""--eval_dataset""" , type=_snake_case , help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--num_epochs""" , type=_snake_case , default=1 , help="""Number of epochs to train for.""" , )
parser.add_argument(
"""--learning_rate""" , type=_snake_case , default=1E-4 , help="""Learning rate to use for training.""" , )
parser.add_argument(
"""--weight_decay_rate""" , type=_snake_case , default=1E-3 , help="""Weight decay rate to use for training.""" , )
parser.add_argument(
"""--max_length""" , type=_snake_case , default=512 , help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" , )
parser.add_argument(
"""--mlm_probability""" , type=_snake_case , default=0.15 , help="""Fraction of tokens to mask during training.""" , )
parser.add_argument("""--output_dir""" , type=_snake_case , required=_snake_case , help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""" , type=_snake_case , help="""Model ID to upload to on the Hugging Face Hub.""" )
UpperCAmelCase = parser.parse_args()
return args
def _a ( _snake_case ):
"""simple docstring"""
try:
if args.tpu_name:
UpperCAmelCase = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
UpperCAmelCase = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(_snake_case )
tf.tpu.experimental.initialize_tpu_system(_snake_case )
return tpu
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = 0
for file in file_list:
UpperCAmelCase = file.split("""/""" )[-1]
UpperCAmelCase = re.search(R"""-\d+-(\d+)\.tfrecord""" , _snake_case ).group(1 )
UpperCAmelCase = int(_snake_case )
num_samples += sample_count
return num_samples
def _a ( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None ):
"""simple docstring"""
UpperCAmelCase = count_samples(_snake_case )
UpperCAmelCase = tf.data.Dataset.from_tensor_slices(_snake_case )
if shuffle:
UpperCAmelCase = dataset.shuffle(len(_snake_case ) )
UpperCAmelCase = tf.data.TFRecordDataset(_snake_case , num_parallel_reads=_snake_case )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
UpperCAmelCase = dataset.apply(tf.data.experimental.assert_cardinality(_snake_case ) )
UpperCAmelCase = dataset.map(_snake_case , num_parallel_calls=_snake_case )
if shuffle:
assert shuffle_buffer_size is not None
UpperCAmelCase = dataset.shuffle(args.shuffle_buffer_size )
UpperCAmelCase = dataset.batch(_snake_case , drop_remainder=_snake_case )
UpperCAmelCase = dataset.map(_snake_case , num_parallel_calls=_snake_case )
UpperCAmelCase = dataset.prefetch(_snake_case )
return dataset
def _a ( _snake_case ):
"""simple docstring"""
if not args.no_tpu:
UpperCAmelCase = initialize_tpu(_snake_case )
UpperCAmelCase = tf.distribute.TPUStrategy(_snake_case )
else:
UpperCAmelCase = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
UpperCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer )
UpperCAmelCase = AutoConfig.from_pretrained(args.pretrained_model_config )
UpperCAmelCase = tokenizer.vocab_size
UpperCAmelCase = tf.io.gfile.glob(os.path.join(args.train_dataset , """*.tfrecord""" ) )
if not training_records:
raise ValueError(F'''No .tfrecord files found in {args.train_dataset}.''' )
UpperCAmelCase = tf.io.gfile.glob(os.path.join(args.eval_dataset , """*.tfrecord""" ) )
if not eval_records:
raise ValueError(F'''No .tfrecord files found in {args.eval_dataset}.''' )
UpperCAmelCase = count_samples(_snake_case )
UpperCAmelCase = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
UpperCAmelCase = steps_per_epoch * args.num_epochs
with strategy.scope():
UpperCAmelCase = TFAutoModelForMaskedLM.from_config(_snake_case )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
UpperCAmelCase , UpperCAmelCase = create_optimizer(
num_train_steps=_snake_case , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=_snake_case , metrics=["""accuracy"""] )
def decode_fn(_snake_case ):
UpperCAmelCase = {
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(_snake_case , _snake_case )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
UpperCAmelCase = DataCollatorForLanguageModeling(
tokenizer=_snake_case , mlm_probability=args.mlm_probability , mlm=_snake_case , return_tensors="""tf""" )
def mask_with_collator(_snake_case ):
# TF really needs an isin() function
UpperCAmelCase = (
~tf.cast(batch["""attention_mask"""] , tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
UpperCAmelCase , UpperCAmelCase = data_collator.tf_mask_tokens(
batch["""input_ids"""] , vocab_size=len(_snake_case ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=_snake_case , )
return batch
UpperCAmelCase = args.per_replica_batch_size * strategy.num_replicas_in_sync
UpperCAmelCase = prepare_dataset(
_snake_case , decode_fn=_snake_case , mask_fn=_snake_case , batch_size=_snake_case , shuffle=_snake_case , shuffle_buffer_size=args.shuffle_buffer_size , )
UpperCAmelCase = prepare_dataset(
_snake_case , decode_fn=_snake_case , mask_fn=_snake_case , batch_size=_snake_case , shuffle=_snake_case , )
UpperCAmelCase = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=_snake_case ) )
model.fit(
_snake_case , validation_data=_snake_case , epochs=args.num_epochs , callbacks=_snake_case , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
_UpperCamelCase = parse_args()
main(args)
| 341
|
"""simple docstring"""
def _a ( _snake_case ):
"""simple docstring"""
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("""Program to check whether a number is a Perfect number or not...""")
_UpperCamelCase = int(input("""Enter number: """).strip())
print(F"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""")
| 341
| 1
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
a_ : List[Any] = logging.get_logger(__name__)
a_ : Union[str, Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
a_ : Optional[int] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def __lowercase( UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = {}
with open(UpperCAmelCase__ , "r" ) as file:
for line_number, line in enumerate(UpperCAmelCase__ ):
lowerCamelCase = line.strip()
if line:
lowerCamelCase = line.split()
lowerCamelCase = line_number
lowerCamelCase = words[0]
lowerCamelCase = value
return result
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
for attribute in key.split("." ):
lowerCamelCase = getattr(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCamelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(UpperCAmelCase__ ):
lowerCamelCase = PARAM_MAPPING[full_name.split("." )[-1]]
lowerCamelCase = "param"
if weight_type is not None and weight_type != "param":
lowerCamelCase = getattr(UpperCAmelCase__ , UpperCAmelCase__ ).shape
elif weight_type is not None and weight_type == "param":
lowerCamelCase = hf_pointer
for attribute in hf_param_name.split("." ):
lowerCamelCase = getattr(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCamelCase = shape_pointer.shape
# let's reduce dimension
lowerCamelCase = value[0]
else:
lowerCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowerCamelCase = value
elif weight_type == "weight_g":
lowerCamelCase = value
elif weight_type == "weight_v":
lowerCamelCase = value
elif weight_type == "bias":
lowerCamelCase = value
elif weight_type == "param":
for attribute in hf_param_name.split("." ):
lowerCamelCase = getattr(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCamelCase = value
else:
lowerCamelCase = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(UpperCAmelCase__ ):
lowerCamelCase = PARAM_MAPPING[full_name.split("." )[-1]]
lowerCamelCase = "param"
if weight_type is not None and weight_type != "param":
lowerCamelCase = ".".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
lowerCamelCase = ".".join([key, hf_param_name] )
else:
lowerCamelCase = key
lowerCamelCase = value if "lm_head" in full_key else value[0]
a_ : Dict = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None ):
"""simple docstring"""
lowerCamelCase = False
for key, mapped_key in MAPPING.items():
lowerCamelCase = "wav2vec2." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowerCamelCase = True
if "*" in mapped_key:
lowerCamelCase = name.split(UpperCAmelCase__ )[0].split("." )[-2]
lowerCamelCase = mapped_key.replace("*" , UpperCAmelCase__ )
if "weight_g" in name:
lowerCamelCase = "weight_g"
elif "weight_v" in name:
lowerCamelCase = "weight_v"
elif "bias" in name:
lowerCamelCase = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCamelCase = "weight"
else:
lowerCamelCase = None
if hf_dict is not None:
rename_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
else:
set_recursively(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return is_used
return is_used
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = []
lowerCamelCase = fairseq_model.state_dict()
lowerCamelCase = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , hf_model.config.feat_extract_norm == "group" , )
lowerCamelCase = True
else:
lowerCamelCase = load_wavaveca_layer(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if not is_used:
unused_weights.append(UpperCAmelCase__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = full_name.split("conv_layers." )[-1]
lowerCamelCase = name.split("." )
lowerCamelCase = int(items[0] )
lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCAmelCase__ )
@torch.no_grad()
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=True , UpperCAmelCase__=False ):
"""simple docstring"""
if config_path is not None:
lowerCamelCase = WavaVecaConfig.from_pretrained(UpperCAmelCase__ )
else:
lowerCamelCase = WavaVecaConfig()
if is_seq_class:
lowerCamelCase = read_txt_into_dict(UpperCAmelCase__ )
lowerCamelCase = idalabel
lowerCamelCase = WavaVecaForSequenceClassification(UpperCAmelCase__ )
lowerCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , )
feature_extractor.save_pretrained(UpperCAmelCase__ )
elif is_finetuned:
if dict_path:
lowerCamelCase = Dictionary.load(UpperCAmelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCamelCase = target_dict.pad_index
lowerCamelCase = target_dict.bos_index
lowerCamelCase = target_dict.eos_index
lowerCamelCase = len(target_dict.symbols )
lowerCamelCase = os.path.join(UpperCAmelCase__ , "vocab.json" )
if not os.path.isdir(UpperCAmelCase__ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(UpperCAmelCase__ ) )
return
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
lowerCamelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCamelCase = 0
lowerCamelCase = 1
with open(UpperCAmelCase__ , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCamelCase = WavaVecaCTCTokenizer(
UpperCAmelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=UpperCAmelCase__ , )
lowerCamelCase = True if config.feat_extract_norm == "layer" else False
lowerCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , )
lowerCamelCase = WavaVecaProcessor(feature_extractor=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ )
processor.save_pretrained(UpperCAmelCase__ )
lowerCamelCase = WavaVecaForCTC(UpperCAmelCase__ )
else:
lowerCamelCase = WavaVecaForPreTraining(UpperCAmelCase__ )
if is_finetuned or is_seq_class:
lowerCamelCase , lowerCamelCase , lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
lowerCamelCase = argparse.Namespace(task="audio_pretraining" )
lowerCamelCase = fairseq.tasks.setup_task(UpperCAmelCase__ )
lowerCamelCase , lowerCamelCase , lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=UpperCAmelCase__ )
lowerCamelCase = model[0].eval()
recursively_load_weights(UpperCAmelCase__ , UpperCAmelCase__ , not is_finetuned )
hf_wavavec.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
a_ : Dict = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
a_ : List[Any] = parser.parse_args()
a_ : Tuple = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 717
|
a_ : List[str] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
a_ : Any = [{'type': 'code', 'content': INSTALL_CONTENT}]
a_ : str = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 484
| 0
|
'''simple docstring'''
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def _A ( ):
"""simple docstring"""
__lowercase , __lowercase = 9, 14 # noqa: F841
__lowercase = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
__lowercase = defaultdict(A__ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
__lowercase = mst(A__ )
__lowercase = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
__lowercase = tuple(answer[:2] )
__lowercase = tuple(edge[::-1] )
assert edge in result or reverse in result
| 41
|
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'''google/umt5-small''': '''https://huggingface.co/google/umt5-small/resolve/main/config.json''',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class SCREAMING_SNAKE_CASE__ (__snake_case ):
__lowerCamelCase : str = """umt5"""
__lowerCamelCase : List[str] = ["""past_key_values"""]
def __init__( self , a=25_0112 , a=512 , a=64 , a=1024 , a=8 , a=None , a=6 , a=32 , a=128 , a=0.1 , a=1e-6 , a=1.0 , a="gated-gelu" , a=True , a=True , a="T5Tokenizer" , a=True , a=0 , a=1 , a=0 , **a , ):
super().__init__(
is_encoder_decoder=a , tokenizer_class=a , tie_word_embeddings=a , pad_token_id=a , eos_token_id=a , decoder_start_token_id=a , **a , )
lowercase__ : List[Any] = vocab_size
lowercase__ : Union[str, Any] = d_model
lowercase__ : List[str] = d_kv
lowercase__ : Union[str, Any] = d_ff
lowercase__ : Tuple = num_layers
lowercase__ : int = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowercase__ : Dict = num_heads
lowercase__ : List[str] = relative_attention_num_buckets
lowercase__ : str = relative_attention_max_distance
lowercase__ : Any = dropout_rate
lowercase__ : Union[str, Any] = layer_norm_epsilon
lowercase__ : Dict = initializer_factor
lowercase__ : Union[str, Any] = feed_forward_proj
lowercase__ : int = use_cache
lowercase__ : List[str] = self.feed_forward_proj.split('-')
lowercase__ : Any = act_info[-1]
lowercase__ : str = act_info[0] == 'gated'
if len(a) > 1 and act_info[0] != "gated" or len(a) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'')
if feed_forward_proj == "gated-gelu":
lowercase__ : List[Any] = 'gelu_new'
@property
def snake_case_ ( self):
return self.d_model
@property
def snake_case_ ( self):
return self.num_heads
@property
def snake_case_ ( self):
return self.num_layers
class SCREAMING_SNAKE_CASE__ (__snake_case ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def snake_case_ ( self):
lowercase__ : Union[str, Any] = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
lowercase__ : List[str] = 'past_encoder_sequence + sequence'
lowercase__ : Dict = {0: 'batch'}
lowercase__ : Tuple = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
lowercase__ : Optional[Any] = {0: 'batch', 1: 'decoder_sequence'}
lowercase__ : Any = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(a , direction='inputs')
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def snake_case_ ( self):
return 13
@property
def snake_case_ ( self):
return 5e-4
| 164
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase :
def __init__( self , lowercase , lowercase=13 , lowercase=32 , lowercase=3 , lowercase=4 , lowercase=[10, 20, 30, 40] , lowercase=[2, 2, 3, 2] , lowercase=True , lowercase=True , lowercase=37 , lowercase="gelu" , lowercase=10 , lowercase=0.02 , lowercase=["stage2", "stage3", "stage4"] , lowercase=[2, 3, 4] , lowercase=None , ) -> Optional[int]:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = image_size
lowerCAmelCase = num_channels
lowerCAmelCase = num_stages
lowerCAmelCase = hidden_sizes
lowerCAmelCase = depths
lowerCAmelCase = is_training
lowerCAmelCase = use_labels
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = num_labels
lowerCAmelCase = initializer_range
lowerCAmelCase = out_features
lowerCAmelCase = out_indices
lowerCAmelCase = scope
def _snake_case ( self ) -> Any:
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def _snake_case ( self ) -> str:
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=lowercase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _snake_case ( self , lowercase , lowercase , lowercase ) -> Tuple:
lowerCAmelCase = ConvNextVaModel(config=lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _snake_case ( self , lowercase , lowercase , lowercase ) -> Tuple:
lowerCAmelCase = ConvNextVaForImageClassification(lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self , lowercase , lowercase , lowercase ) -> Optional[int]:
lowerCAmelCase = ConvNextVaBackbone(config=lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCAmelCase = None
lowerCAmelCase = ConvNextVaBackbone(config=lowercase )
model.to(lowercase )
model.eval()
lowerCAmelCase = model(lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs
lowerCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs
lowerCAmelCase = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = (
{'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def _snake_case ( self ) -> str:
lowerCAmelCase = ConvNextVaModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37 )
def _snake_case ( self ) -> Union[str, Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _snake_case ( self ) -> Optional[Any]:
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def _snake_case ( self ) -> Optional[int]:
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def _snake_case ( self ) -> Dict:
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def _snake_case ( self ) -> Dict:
pass
def _snake_case ( self ) -> List[Any]:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_with_labels()
lowerCAmelCase = True
if model_class.__name__ in [
*get_values(lowercase ),
*get_values(lowercase ),
]:
continue
lowerCAmelCase = model_class(lowercase )
model.to(lowercase )
model.train()
lowerCAmelCase = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
lowerCAmelCase = model(**lowercase ).loss
loss.backward()
def _snake_case ( self ) -> Tuple:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_with_labels()
lowerCAmelCase = False
lowerCAmelCase = True
if (
model_class.__name__
in [*get_values(lowercase ), *get_values(lowercase )]
or not model_class.supports_gradient_checkpointing
):
continue
lowerCAmelCase = model_class(lowercase )
model.to(lowercase )
model.gradient_checkpointing_enable()
model.train()
lowerCAmelCase = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
lowerCAmelCase = model(**lowercase ).loss
loss.backward()
def _snake_case ( self ) -> str:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(lowercase )
lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase = [*signature.parameters.keys()]
lowerCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase )
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def _snake_case ( self ) -> str:
def check_hidden_states_output(lowercase , lowercase , lowercase ):
lowerCAmelCase = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(lowercase , lowercase ) )
lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase = self.model_tester.num_stages
self.assertEqual(len(lowercase ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = True
check_hidden_states_output(lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase = True
check_hidden_states_output(lowercase , lowercase , lowercase )
def _snake_case ( self ) -> List[str]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase )
@slow
def _snake_case ( self ) -> Union[str, Any]:
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = ConvNextVaModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def _snake_case ( self ) -> Optional[int]:
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def _snake_case ( self ) -> int:
lowerCAmelCase = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(lowercase )
lowerCAmelCase = self.default_image_processor
lowerCAmelCase = prepare_img()
lowerCAmelCase = preprocessor(images=lowercase , return_tensors="""pt""" ).to(lowercase )
# forward pass
with torch.no_grad():
lowerCAmelCase = model(**lowercase )
# verify the logits
lowerCAmelCase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowercase )
lowerCAmelCase = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1e-4 ) )
| 393
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
class lowercase :
def __init__( self , lowercase , lowercase , lowercase = 0 ) -> None:
lowerCAmelCase , lowerCAmelCase = row, column
lowerCAmelCase = [[default_value for c in range(lowercase )] for r in range(lowercase )]
def __str__( self ) -> str:
lowerCAmelCase = f'Matrix consist of {self.row} rows and {self.column} columns\n'
# Make string identifier
lowerCAmelCase = 0
for row_vector in self.array:
for obj in row_vector:
lowerCAmelCase = max(lowercase , len(str(lowercase ) ) )
lowerCAmelCase = f'%{max_element_length}s'
# Make string and return
def single_line(lowercase ) -> str:
nonlocal string_format_identifier
lowerCAmelCase = """["""
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(lowercase ) for row_vector in self.array )
return s
def __repr__( self ) -> str:
return str(self )
def _snake_case ( self , lowercase ) -> bool:
if not (isinstance(lowercase , (list, tuple) ) and len(lowercase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , lowercase ) -> Any:
assert self.validate_indicies(lowercase )
return self.array[loc[0]][loc[1]]
def __setitem__( self , lowercase , lowercase ) -> None:
assert self.validate_indicies(lowercase )
lowerCAmelCase = value
def __add__( self , lowercase ) -> Matrix:
assert isinstance(lowercase , lowercase )
assert self.row == another.row and self.column == another.column
# Add
lowerCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = self[r, c] + another[r, c]
return result
def __neg__( self ) -> Matrix:
lowerCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = -self[r, c]
return result
def __sub__( self , lowercase ) -> Matrix:
return self + (-another)
def __mul__( self , lowercase ) -> Matrix:
if isinstance(lowercase , (int, float) ): # Scalar multiplication
lowerCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = self[r, c] * another
return result
elif isinstance(lowercase , lowercase ): # Matrix multiplication
assert self.column == another.row
lowerCAmelCase = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
lowerCAmelCase = f'Unsupported type given for another ({type(lowercase )})'
raise TypeError(lowercase )
def _snake_case ( self ) -> Matrix:
lowerCAmelCase = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase = self[r, c]
return result
def _snake_case ( self , lowercase , lowercase ) -> Any:
assert isinstance(lowercase , lowercase ) and isinstance(lowercase , lowercase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
lowerCAmelCase = v.transpose()
lowerCAmelCase = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = Matrix(3 , 3 , 0 )
for i in range(3 ):
lowerCAmelCase = 1
print(F'a^(-1) is {ainv}' )
# u, v
lowerCAmelCase = Matrix(3 , 1 , 0 )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1, 2, -3
lowerCAmelCase = Matrix(3 , 1 , 0 )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 4, -2, 5
print(F'u is {u}' )
print(F'v is {v}' )
print(F'uv^T is {u * v.transpose()}' )
# Sherman Morrison
print(F'(a + uv^T)^(-1) is {ainv.sherman_morrison(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}' )
def UpperCAmelCase__ ( ):
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 393
| 1
|
def a_ ( __lowercase : dict ) -> set:
_snake_case = set()
# edges = list of graph's edges
_snake_case = get_edges(__lowercase )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
_snake_case , _snake_case = edges.pop()
chosen_vertices.add(__lowercase )
chosen_vertices.add(__lowercase )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(__lowercase )
return chosen_vertices
def a_ ( __lowercase : dict ) -> set:
_snake_case = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 686
|
import random
from .binary_exp_mod import bin_exp_mod
def a_ ( __lowercase : int , __lowercase : Any=1_000 ) -> int:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
_snake_case = n - 1
_snake_case = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
_snake_case = 0
while count < prec:
_snake_case = random.randint(2 , n - 1 )
_snake_case = bin_exp_mod(__lowercase , __lowercase , __lowercase )
if b != 1:
_snake_case = True
for _ in range(__lowercase ):
if b == n - 1:
_snake_case = False
break
_snake_case = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
_lowerCamelCase : Tuple = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 686
| 1
|
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class __a :
def __init__( self : str , lowercase__ : Optional[int] , lowercase__ : Any=13 , lowercase__ : Any=7 , lowercase__ : Union[str, Any]=True , lowercase__ : List[Any]=True , lowercase__ : Any=True , lowercase__ : int=True , lowercase__ : Optional[Any]=99 , lowercase__ : int=64 , lowercase__ : str=5 , lowercase__ : Union[str, Any]=4 , lowercase__ : Optional[int]=37 , lowercase__ : Optional[int]="gelu" , lowercase__ : List[str]=0.1 , lowercase__ : str=0.1 , lowercase__ : Dict=5_12 , lowercase__ : List[str]=16 , lowercase__ : List[str]=2 , lowercase__ : int=0.02 , lowercase__ : int=3 , lowercase__ : Tuple=4 , lowercase__ : Dict=None , ) ->str:
"""simple docstring"""
_lowercase = parent
_lowercase = batch_size
_lowercase = seq_length
_lowercase = is_training
_lowercase = use_input_mask
_lowercase = use_token_type_ids
_lowercase = use_labels
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = intermediate_size
_lowercase = hidden_act
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = max_position_embeddings
_lowercase = type_vocab_size
_lowercase = type_sequence_label_size
_lowercase = initializer_range
_lowercase = num_labels
_lowercase = num_choices
_lowercase = scope
_lowercase = vocab_size - 1
def _UpperCAmelCase ( self : int) ->int:
"""simple docstring"""
_lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_lowercase = None
if self.use_input_mask:
_lowercase = random_attention_mask([self.batch_size, self.seq_length])
_lowercase = None
if self.use_labels:
_lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_lowercase = self.get_config()
return config, input_ids, input_mask, token_labels
def _UpperCAmelCase ( self : str) ->Dict:
"""simple docstring"""
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase__ , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def _UpperCAmelCase ( self : List[Any]) ->int:
"""simple docstring"""
_lowercase , _lowercase , _lowercase , _lowercase = self.prepare_config_and_inputs()
_lowercase = True
return config, input_ids, input_mask, token_labels
def _UpperCAmelCase ( self : str , lowercase__ : List[Any] , lowercase__ : Union[str, Any] , lowercase__ : List[Any]) ->int:
"""simple docstring"""
_lowercase = GPTNeoXModel(config=lowercase__)
model.to(lowercase__)
model.eval()
_lowercase = model(lowercase__ , attention_mask=lowercase__)
_lowercase = model(lowercase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _UpperCAmelCase ( self : Dict , lowercase__ : List[Any] , lowercase__ : int , lowercase__ : Optional[Any]) ->Dict:
"""simple docstring"""
_lowercase = True
_lowercase = GPTNeoXModel(lowercase__)
model.to(lowercase__)
model.eval()
_lowercase = model(lowercase__ , attention_mask=lowercase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _UpperCAmelCase ( self : Optional[int] , lowercase__ : str , lowercase__ : Tuple , lowercase__ : List[Any] , lowercase__ : int) ->List[Any]:
"""simple docstring"""
_lowercase = GPTNeoXForCausalLM(config=lowercase__)
model.to(lowercase__)
model.eval()
_lowercase = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _UpperCAmelCase ( self : List[Any] , lowercase__ : List[Any] , lowercase__ : Tuple , lowercase__ : Tuple , lowercase__ : List[str]) ->int:
"""simple docstring"""
_lowercase = self.num_labels
_lowercase = GPTNeoXForQuestionAnswering(lowercase__)
model.to(lowercase__)
model.eval()
_lowercase = model(lowercase__ , attention_mask=lowercase__)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _UpperCAmelCase ( self : List[str] , lowercase__ : Any , lowercase__ : Union[str, Any] , lowercase__ : Optional[int] , lowercase__ : Union[str, Any]) ->Optional[int]:
"""simple docstring"""
_lowercase = self.num_labels
_lowercase = GPTNeoXForSequenceClassification(lowercase__)
model.to(lowercase__)
model.eval()
_lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_lowercase = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _UpperCAmelCase ( self : Optional[int] , lowercase__ : Any , lowercase__ : Optional[int] , lowercase__ : int , lowercase__ : Optional[int]) ->Dict:
"""simple docstring"""
_lowercase = self.num_labels
_lowercase = GPTNeoXForTokenClassification(lowercase__)
model.to(lowercase__)
model.eval()
_lowercase = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _UpperCAmelCase ( self : Optional[Any] , lowercase__ : Optional[Any] , lowercase__ : int , lowercase__ : Union[str, Any]) ->int:
"""simple docstring"""
_lowercase = True
_lowercase = GPTNeoXForCausalLM(config=lowercase__)
model.to(lowercase__)
model.eval()
# first forward pass
_lowercase = model(lowercase__ , attention_mask=lowercase__ , use_cache=lowercase__)
_lowercase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_lowercase = ids_tensor((self.batch_size, 3) , config.vocab_size)
_lowercase = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
_lowercase = torch.cat([input_ids, next_tokens] , dim=-1)
_lowercase = torch.cat([input_mask, next_mask] , dim=-1)
_lowercase = model(lowercase__ , attention_mask=lowercase__ , output_hidden_states=lowercase__)
_lowercase = output_from_no_past["""hidden_states"""][0]
_lowercase = model(
lowercase__ , attention_mask=lowercase__ , past_key_values=lowercase__ , output_hidden_states=lowercase__ , )["""hidden_states"""][0]
# select random slice
_lowercase = ids_tensor((1,) , output_from_past.shape[-1]).item()
_lowercase = output_from_no_past[:, -3:, random_slice_idx].detach()
_lowercase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-3))
def _UpperCAmelCase ( self : Tuple) ->Union[str, Any]:
"""simple docstring"""
_lowercase = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase = config_and_inputs
_lowercase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __a ( _snake_case ,_snake_case ,_snake_case ,unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE : int = (GPTNeoXForCausalLM,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : List[str] = (
{
'feature-extraction': GPTNeoXModel,
'question-answering': GPTNeoXForQuestionAnswering,
'text-classification': GPTNeoXForSequenceClassification,
'text-generation': GPTNeoXForCausalLM,
'token-classification': GPTNeoXForTokenClassification,
'zero-shot': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : Optional[int] = False
__SCREAMING_SNAKE_CASE : Dict = False
__SCREAMING_SNAKE_CASE : Optional[Any] = False
__SCREAMING_SNAKE_CASE : Any = False
def _UpperCAmelCase ( self : str) ->str:
"""simple docstring"""
_lowercase = GPTNeoXModelTester(self)
_lowercase = ConfigTester(self , config_class=lowercase__ , hidden_size=64 , num_attention_heads=8)
def _UpperCAmelCase ( self : Tuple) ->Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : Union[str, Any]) ->Optional[int]:
"""simple docstring"""
_lowercase , _lowercase , _lowercase , _lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowercase__ , lowercase__ , lowercase__)
def _UpperCAmelCase ( self : Any) ->str:
"""simple docstring"""
_lowercase , _lowercase , _lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowercase__ , lowercase__ , lowercase__)
def _UpperCAmelCase ( self : Optional[Any]) ->Union[str, Any]:
"""simple docstring"""
_lowercase , _lowercase , _lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_decoder()
_lowercase = None
self.model_tester.create_and_check_model_as_decoder(lowercase__ , lowercase__ , lowercase__)
def _UpperCAmelCase ( self : Dict) ->str:
"""simple docstring"""
_lowercase , _lowercase , _lowercase , _lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowercase__ , lowercase__ , lowercase__)
def _UpperCAmelCase ( self : Union[str, Any]) ->Union[str, Any]:
"""simple docstring"""
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowercase__)
def _UpperCAmelCase ( self : Optional[Any]) ->List[Any]:
"""simple docstring"""
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase__)
def _UpperCAmelCase ( self : Dict) ->Any:
"""simple docstring"""
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase__)
def _UpperCAmelCase ( self : Dict) ->str:
"""simple docstring"""
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase__)
@unittest.skip(reason="""Feed forward chunking is not implemented""")
def _UpperCAmelCase ( self : Union[str, Any]) ->Optional[int]:
"""simple docstring"""
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)])
def _UpperCAmelCase ( self : Optional[Any] , lowercase__ : Optional[Any]) ->Tuple:
"""simple docstring"""
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase = ids_tensor([1, 10] , config.vocab_size)
_lowercase = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size)
set_seed(42) # Fixed seed at init time so the two models get the same random weights
_lowercase = GPTNeoXModel(lowercase__)
original_model.to(lowercase__)
original_model.eval()
_lowercase = original_model(lowercase__).last_hidden_state
_lowercase = original_model(lowercase__).last_hidden_state
set_seed(42) # Fixed seed at init time so the two models get the same random weights
_lowercase = {"""type""": scaling_type, """factor""": 10.0}
_lowercase = GPTNeoXModel(lowercase__)
scaled_model.to(lowercase__)
scaled_model.eval()
_lowercase = scaled_model(lowercase__).last_hidden_state
_lowercase = scaled_model(lowercase__).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-5))
else:
self.assertFalse(torch.allclose(lowercase__ , lowercase__ , atol=1e-5))
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowercase__ , lowercase__ , atol=1e-5))
@require_torch
class __a ( unittest.TestCase ):
@slow
def _UpperCAmelCase ( self : int) ->int:
"""simple docstring"""
_lowercase = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""")
for checkpointing in [True, False]:
_lowercase = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""")
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(lowercase__)
_lowercase = tokenizer("""My favorite food is""" , return_tensors="""pt""").to(lowercase__)
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
_lowercase = """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"""
_lowercase = model.generate(**lowercase__ , do_sample=lowercase__ , max_new_tokens=20)
_lowercase = tokenizer.batch_decode(lowercase__)[0]
self.assertEqual(lowercase__ , lowercase__)
| 572
|
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( snake_case_ ):
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
_lowercase = 1
_lowercase = 1
while repunit:
_lowercase = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def _SCREAMING_SNAKE_CASE ( snake_case_ = 1000000 ):
_lowercase = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(snake_case_ ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F"""{solution() = }""")
| 572
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class a_ ( a_ ):
'''simple docstring'''
__a: List[Any] = '''facebook/bart-large-mnli'''
__a: List[str] = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
__a: Optional[int] = '''text_classifier'''
__a: Optional[int] = AutoTokenizer
__a: int = AutoModelForSequenceClassification
__a: Tuple = ['''text''', ['''text''']]
__a: int = ['''text''']
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
super().setup()
lowerCAmelCase_ = self.model.config
lowerCAmelCase_ = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
lowerCAmelCase_ = int(lowercase_ )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def _lowercase ( self , lowercase_ , lowercase_ ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ = labels
return self.pre_processor(
[text] * len(lowercase_ ) , [f'''This example is {label}''' for label in labels] , return_tensors='pt' , padding='max_length' , )
def _lowercase ( self , lowercase_ ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ = outputs.logits
lowerCAmelCase_ = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 318
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
"""microsoft/resnet-50""": """https://huggingface.co/microsoft/resnet-50/blob/main/config.json""",
}
class a_ ( a_ , a_ ):
'''simple docstring'''
__a: Union[str, Any] = '''resnet'''
__a: List[Any] = ['''basic''', '''bottleneck''']
def __init__( self , lowercase_=3 , lowercase_=6_4 , lowercase_=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , lowercase_=[3, 4, 6, 3] , lowercase_="bottleneck" , lowercase_="relu" , lowercase_=False , lowercase_=None , lowercase_=None , **lowercase_ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**lowercase_ )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = embedding_size
lowerCAmelCase_ = hidden_sizes
lowerCAmelCase_ = depths
lowerCAmelCase_ = layer_type
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = downsample_in_first_stage
lowerCAmelCase_ = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(lowercase_ ) + 1 )]
lowerCAmelCase_ , lowerCAmelCase_ = get_aligned_output_features_output_indices(
out_features=lowercase_ , out_indices=lowercase_ , stage_names=self.stage_names )
class a_ ( a_ ):
'''simple docstring'''
__a: Optional[int] = version.parse('''1.11''' )
@property
def _lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _lowercase ( self ) -> float:
'''simple docstring'''
return 1e-3
| 318
| 1
|
def a_ ( lowerCamelCase : Union[str, Any] = 1000 ):
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 705
|
'''simple docstring'''
from __future__ import annotations
def a_ ( lowerCamelCase : list , lowerCamelCase : int ):
# Checks if the entire collection has been sorted
if len(lowerCamelCase ) <= 1 or n <= 1:
return
insert_next(lowerCamelCase , n - 1 )
rec_insertion_sort(lowerCamelCase , n - 1 )
def a_ ( lowerCamelCase : list , lowerCamelCase : int ):
# Checks order between adjacent elements
if index >= len(lowerCamelCase ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
lowerCAmelCase , lowerCAmelCase = (
collection[index],
collection[index - 1],
)
insert_next(lowerCamelCase , index + 1 )
if __name__ == "__main__":
__snake_case =input("""Enter integers separated by spaces: """)
__snake_case =[int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 513
| 0
|
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training')
# TF training parameters
UpperCAmelCase_ : List[Any] = False
UpperCAmelCase_ : Dict = False
def A_ ( _lowerCAmelCase : Namespace ):
"""simple docstring"""
return TrainCommand(_lowerCAmelCase )
class UpperCAmelCase__ ( A ):
@staticmethod
def lowerCamelCase_ ( __A : ArgumentParser ):
_lowerCamelCase : int = parser.add_parser("train",help="CLI tool to train a model on a task." )
train_parser.add_argument(
"--train_data",type=__A,required=__A,help="path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.",)
train_parser.add_argument(
"--column_label",type=__A,default=0,help="Column of the dataset csv file with example labels." )
train_parser.add_argument(
"--column_text",type=__A,default=1,help="Column of the dataset csv file with example texts." )
train_parser.add_argument(
"--column_id",type=__A,default=2,help="Column of the dataset csv file with example ids." )
train_parser.add_argument(
"--skip_first_row",action="store_true",help="Skip the first row of the csv file (headers)." )
train_parser.add_argument("--validation_data",type=__A,default="",help="path to validation dataset." )
train_parser.add_argument(
"--validation_split",type=__A,default=0.1,help="if validation dataset is not provided, fraction of train dataset to use as validation dataset.",)
train_parser.add_argument("--output",type=__A,default="./",help="path to saved the trained model." )
train_parser.add_argument(
"--task",type=__A,default="text_classification",help="Task to train the model on." )
train_parser.add_argument(
"--model",type=__A,default="bert-base-uncased",help="Model's name or path to stored model." )
train_parser.add_argument("--train_batch_size",type=__A,default=3_2,help="Batch size for training." )
train_parser.add_argument("--valid_batch_size",type=__A,default=6_4,help="Batch size for validation." )
train_parser.add_argument("--learning_rate",type=__A,default=3e-5,help="Learning rate." )
train_parser.add_argument("--adam_epsilon",type=__A,default=1e-08,help="Epsilon for Adam optimizer." )
train_parser.set_defaults(func=__A )
def __init__( self : List[Any],__A : Namespace ):
_lowerCamelCase : str = logging.get_logger("transformers-cli/training" )
_lowerCamelCase : Tuple = "tf" if is_tf_available() else "torch"
os.makedirs(args.output,exist_ok=__A )
_lowerCamelCase : Dict = args.output
_lowerCamelCase : List[str] = args.column_label
_lowerCamelCase : Tuple = args.column_text
_lowerCamelCase : Tuple = args.column_id
self.logger.info(f'Loading {args.task} pipeline for {args.model}' )
if args.task == "text_classification":
_lowerCamelCase : int = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f'Loading dataset from {args.train_data}' )
_lowerCamelCase : Optional[int] = Processor.create_from_csv(
args.train_data,column_label=args.column_label,column_text=args.column_text,column_id=args.column_id,skip_first_row=args.skip_first_row,)
_lowerCamelCase : Any = None
if args.validation_data:
self.logger.info(f'Loading validation dataset from {args.validation_data}' )
_lowerCamelCase : int = Processor.create_from_csv(
args.validation_data,column_label=args.column_label,column_text=args.column_text,column_id=args.column_id,skip_first_row=args.skip_first_row,)
_lowerCamelCase : Union[str, Any] = args.validation_split
_lowerCamelCase : Any = args.train_batch_size
_lowerCamelCase : Optional[int] = args.valid_batch_size
_lowerCamelCase : List[Any] = args.learning_rate
_lowerCamelCase : Union[str, Any] = args.adam_epsilon
def lowerCamelCase_ ( self : List[str] ):
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def lowerCamelCase_ ( self : int ):
raise NotImplementedError
def lowerCamelCase_ ( self : Optional[Any] ):
self.pipeline.fit(
self.train_dataset,validation_data=self.valid_dataset,validation_split=self.validation_split,learning_rate=self.learning_rate,adam_epsilon=self.adam_epsilon,train_batch_size=self.train_batch_size,valid_batch_size=self.valid_batch_size,)
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 44
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A: Union[str, Any] = logging.get_logger(__name__)
A: Optional[int] = {
"facebook/nllb-moe-54B": "https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : str = 'nllb-moe'
__lowerCAmelCase : List[Any] = ['past_key_values']
__lowerCAmelCase : Dict = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , _SCREAMING_SNAKE_CASE=128112 , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=4096 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=4096 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=0.05 , _SCREAMING_SNAKE_CASE=0.05 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="float32" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=128 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=0.001 , _SCREAMING_SNAKE_CASE=0.001 , _SCREAMING_SNAKE_CASE="all" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=0.2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Optional[int] = vocab_size
UpperCAmelCase : Optional[int] = max_position_embeddings
UpperCAmelCase : str = d_model
UpperCAmelCase : Union[str, Any] = encoder_ffn_dim
UpperCAmelCase : int = encoder_layers
UpperCAmelCase : Dict = encoder_attention_heads
UpperCAmelCase : Tuple = decoder_ffn_dim
UpperCAmelCase : List[Any] = decoder_layers
UpperCAmelCase : Tuple = decoder_attention_heads
UpperCAmelCase : Any = dropout
UpperCAmelCase : Optional[int] = attention_dropout
UpperCAmelCase : Union[str, Any] = activation_dropout
UpperCAmelCase : Dict = activation_function
UpperCAmelCase : int = init_std
UpperCAmelCase : List[Any] = encoder_layerdrop
UpperCAmelCase : Optional[Any] = decoder_layerdrop
UpperCAmelCase : str = use_cache
UpperCAmelCase : List[Any] = encoder_layers
UpperCAmelCase : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase : Optional[Any] = router_z_loss_coef
UpperCAmelCase : List[str] = router_aux_loss_coef
UpperCAmelCase : str = decoder_sparse_step
UpperCAmelCase : str = encoder_sparse_step
UpperCAmelCase : Optional[int] = num_experts
UpperCAmelCase : Optional[int] = expert_capacity
UpperCAmelCase : List[Any] = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" )
UpperCAmelCase : int = router_dtype
UpperCAmelCase : Optional[int] = router_ignore_padding_tokens
UpperCAmelCase : Tuple = batch_prioritized_routing
UpperCAmelCase : Any = second_expert_policy
UpperCAmelCase : List[str] = normalize_router_prob_before_dropping
UpperCAmelCase : str = moe_eval_capacity_token_fraction
UpperCAmelCase : Union[str, Any] = moe_token_dropout
UpperCAmelCase : Any = output_router_logits
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , is_encoder_decoder=_SCREAMING_SNAKE_CASE , decoder_start_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
| 160
| 0
|
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
_A : Any =logging.get_logger(__name__)
_A : Tuple =Dict[str, Any]
_A : str =List[Prediction]
@add_end_docstrings(_lowercase )
class _lowercase ( _lowercase ):
def __init__( self: str , *UpperCamelCase__: str , **UpperCamelCase__: int ):
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
if self.framework == "tf":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , """vision""" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def lowerCamelCase_ ( self: List[Any] , **UpperCamelCase__: Any ):
lowerCamelCase__ : Any = {}
if "threshold" in kwargs:
lowerCamelCase__ : Dict = kwargs["""threshold"""]
return {}, {}, postprocess_kwargs
def __call__( self: int , *UpperCamelCase__: str , **UpperCamelCase__: int ):
return super().__call__(*UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: List[Any] ):
lowerCamelCase__ : List[str] = load_image(UpperCamelCase__ )
lowerCamelCase__ : Dict = torch.IntTensor([[image.height, image.width]] )
lowerCamelCase__ : Tuple = self.image_processor(images=[image] , return_tensors="""pt""" )
if self.tokenizer is not None:
lowerCamelCase__ : Any = self.tokenizer(text=inputs["""words"""] , boxes=inputs["""boxes"""] , return_tensors="""pt""" )
lowerCamelCase__ : int = target_size
return inputs
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: Any ):
lowerCamelCase__ : Optional[int] = model_inputs.pop("""target_size""" )
lowerCamelCase__ : Optional[int] = self.model(**UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = outputs.__class__({"""target_size""": target_size, **outputs} )
if self.tokenizer is not None:
lowerCamelCase__ : Optional[Any] = model_inputs["""bbox"""]
return model_outputs
def lowerCamelCase_ ( self: str , UpperCamelCase__: int , UpperCamelCase__: Optional[int]=0.9 ):
lowerCamelCase__ : Optional[int] = model_outputs["""target_size"""]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
lowerCamelCase__ , lowerCamelCase__ : Any = target_size[0].tolist()
def unnormalize(UpperCamelCase__: Tuple ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1_000),
(height * bbox[1] / 1_000),
(width * bbox[2] / 1_000),
(height * bbox[3] / 1_000),
] ) )
lowerCamelCase__ , lowerCamelCase__ : Any = model_outputs["""logits"""].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
lowerCamelCase__ : Dict = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
lowerCamelCase__ : List[str] = [unnormalize(UpperCamelCase__ ) for bbox in model_outputs["""bbox"""].squeeze(0 )]
lowerCamelCase__ : Union[str, Any] = ["""score""", """label""", """box"""]
lowerCamelCase__ : Dict = [dict(zip(UpperCamelCase__ , UpperCamelCase__ ) ) for vals in zip(scores.tolist() , UpperCamelCase__ , UpperCamelCase__ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
lowerCamelCase__ : str = self.image_processor.post_process_object_detection(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Tuple = raw_annotations[0]
lowerCamelCase__ : str = raw_annotation["""scores"""]
lowerCamelCase__ : List[Any] = raw_annotation["""labels"""]
lowerCamelCase__ : List[str] = raw_annotation["""boxes"""]
lowerCamelCase__ : Any = scores.tolist()
lowerCamelCase__ : Tuple = [self.model.config.idalabel[label.item()] for label in labels]
lowerCamelCase__ : List[str] = [self._get_bounding_box(UpperCamelCase__ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
lowerCamelCase__ : Dict = ["""score""", """label""", """box"""]
lowerCamelCase__ : Optional[int] = [
dict(zip(UpperCamelCase__ , UpperCamelCase__ ) )
for vals in zip(raw_annotation["""scores"""] , raw_annotation["""labels"""] , raw_annotation["""boxes"""] )
]
return annotation
def lowerCamelCase_ ( self: str , UpperCamelCase__: "torch.Tensor" ):
if self.framework != "pt":
raise ValueError("""The ObjectDetectionPipeline is only available in PyTorch.""" )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = box.int().tolist()
lowerCamelCase__ : Optional[Any] = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 631
|
'''simple docstring'''
import argparse
import os
import re
import packaging.version
_A : List[str] ='''examples/'''
_A : Any ={
'''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''),
'''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
_A : int ={
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
_A : int ='''README.md'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str:
with open(UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ : List[str] = f.read()
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = REPLACE_PATTERNS[pattern]
lowerCamelCase__ : Dict = replace.replace("""VERSION""" , UpperCamelCase )
lowerCamelCase__ : str = re_pattern.sub(UpperCamelCase , UpperCamelCase )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
for folder, directories, fnames in os.walk(UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(UpperCamelCase , UpperCamelCase ) , UpperCamelCase , pattern="""examples""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=False ) -> List[Any]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(UpperCamelCase , UpperCamelCase , UpperCamelCase )
if not patch:
update_version_in_examples(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ () -> Optional[Any]:
lowerCamelCase__ : Dict = """🤗 Transformers currently provides the following architectures"""
lowerCamelCase__ : Dict = """1. Want to contribute a new model?"""
with open(UpperCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase__ : int = f.readlines()
# Find the start of the list.
lowerCamelCase__ : Optional[int] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCamelCase__ : Optional[Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
lowerCamelCase__ : List[Any] = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(UpperCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ () -> Optional[Any]:
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
lowerCamelCase__ : int = f.read()
lowerCamelCase__ : Optional[Any] = REPLACE_PATTERNS["""init"""][0].search(UpperCamelCase ).groups()[0]
return packaging.version.parse(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase=False ) -> List[Any]:
lowerCamelCase__ : Union[str, Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
lowerCamelCase__ : List[str] = default_version.base_version
elif patch:
lowerCamelCase__ : Any = f'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
lowerCamelCase__ : List[Any] = f'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
lowerCamelCase__ : Any = input(f'''Which version are you releasing? [{default_version}]''' )
if len(UpperCamelCase ) == 0:
lowerCamelCase__ : Optional[int] = default_version
print(f'''Updating version to {version}.''' )
global_version_update(UpperCamelCase , patch=UpperCamelCase )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def SCREAMING_SNAKE_CASE_ () -> List[str]:
lowerCamelCase__ : Optional[int] = get_version()
lowerCamelCase__ : Any = f'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
lowerCamelCase__ : Any = current_version.base_version
# Check with the user we got that right.
lowerCamelCase__ : List[Any] = input(f'''Which version are we developing now? [{dev_version}]''' )
if len(UpperCamelCase ) == 0:
lowerCamelCase__ : Dict = dev_version
print(f'''Updating version to {version}.''' )
global_version_update(UpperCamelCase )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
_A : List[Any] =argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
_A : List[str] =parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 631
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.