code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
'configuration_lilt': ['LILT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LiltConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'LILT_PRETRAINED_MODEL_ARCHIVE_LIST',
'LiltForQuestionAnswering',
'LiltForSequenceClassification',
'LiltForTokenClassification',
'LiltModel',
'LiltPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 25
|
'''simple docstring'''
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowercase_ = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
lowercase_ = 10
lowercase_ = 256
def lowerCAmelCase (__A):
"""simple docstring"""
if len(__A) < MIN_NUM_TOKENS:
return None
_a = MinHash(num_perm=__A)
for token in set(__A):
min_hash.update(token.encode())
return min_hash
def lowerCAmelCase (__A):
"""simple docstring"""
return {t for t in NON_ALPHA.split(__A) if len(t.strip()) > 0}
class __A :
'''simple docstring'''
def __init__(self , *,
A = 0.85 , ) -> Optional[int]:
"""simple docstring"""
_a = duplication_jaccard_threshold
_a = NUM_PERM
_a = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_a = defaultdict(A )
def a__ (self , A , A ) -> None:
"""simple docstring"""
_a = self._index.query(A )
if code_key in self._index.keys:
print(f'''Duplicate key {code_key}''' )
return
self._index.insert(A , A )
if len(A ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(A )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(A )
def a__ (self ) -> List[List[Dict]]:
"""simple docstring"""
_a = []
for base, duplicates in self._duplicate_clusters.items():
_a = [base] + list(A )
# reformat the cluster to be a list of dict
_a = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(A )
return duplicate_clusters
def a__ (self , A ) -> None:
"""simple docstring"""
_a = self.get_duplicate_clusters()
with open(A , '''w''' ) as f:
json.dump(A , A )
def lowerCAmelCase (__A):
"""simple docstring"""
_a , _a = element
_a = get_min_hash([t for t in NON_ALPHA.split(data['''content''']) if len(t.strip()) > 0])
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def lowerCAmelCase (__A):
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(__A , max_queue_size=10_000) , chunksize=100 , ):
if data is not None:
yield data
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = DuplicationIndex(duplication_jaccard_threshold=__A)
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(__A)) , max_queue_size=100)):
di.add(__A , __A)
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = get_tokens(__A)
_a = get_tokens(__A)
return len(tokensa & tokensa) / len(tokensa | tokensa)
lowercase_ = None
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = []
for elementa in cluster:
_a = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
_a = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(__A , __A) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_a = 1
extremes.append(__A)
return extremes
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
global _shared_dataset
_a = dataset
_a = []
_a = partial(_find_cluster_extremes_shared , jaccard_threshold=__A)
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
__A , __A , ) , total=len(__A) , ):
extremes_list.append(__A)
return extremes_list
def lowerCAmelCase (__A , __A = 0.85):
"""simple docstring"""
_a = make_duplicate_clusters(__A , __A)
_a = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
_a = {}
_a = find_extremes(__A , __A , __A)
for extremes in extremes_clusters:
for element in extremes:
_a = element
_a = duplicate_indices - set(extreme_dict.keys())
_a = dataset.filter(lambda __A , __A: idx not in remove_indices , with_indices=__A)
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_a = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
_a = extreme_dict[element['''base_index''']]['''copies''']
print(F'''Original dataset size: {len(__A)}''')
print(F'''Number of duplicate clusters: {len(__A)}''')
print(F'''Files in duplicate cluster: {len(__A)}''')
print(F'''Unique files in duplicate cluster: {len(__A)}''')
print(F'''Filtered dataset size: {len(__A)}''')
return ds_filter, duplicate_clusters
| 11
| 0
|
"""simple docstring"""
class SCREAMING_SNAKE_CASE__ :
def __init__(self , _lowercase , _lowercase=None , _lowercase=None ):
'''simple docstring'''
__a : Union[str, Any] = data
__a : str = previous
__a : Optional[int] = next_node
def __str__(self ):
'''simple docstring'''
return F'''{self.data}'''
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.data
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.next
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.previous
class SCREAMING_SNAKE_CASE__ :
def __init__(self , _lowercase ):
'''simple docstring'''
__a : str = head
def __iter__(self ):
'''simple docstring'''
return self
def lowerCAmelCase__(self ):
'''simple docstring'''
if not self.current:
raise StopIteration
else:
__a : Dict = self.current.get_data()
__a : Union[str, Any] = self.current.get_next()
return value
class SCREAMING_SNAKE_CASE__ :
def __init__(self ):
'''simple docstring'''
__a : Optional[Any] = None # First node in list
__a : Union[str, Any] = None # Last node in list
def __str__(self ):
'''simple docstring'''
__a : Tuple = self.head
__a : Any = []
while current is not None:
nodes.append(current.get_data() )
__a : Optional[Any] = current.get_next()
return " ".join(str(__snake_case ) for node in nodes )
def __contains__(self , _lowercase ):
'''simple docstring'''
__a : Optional[int] = self.head
while current:
if current.get_data() == value:
return True
__a : Union[str, Any] = current.get_next()
return False
def __iter__(self ):
'''simple docstring'''
return LinkedListIterator(self.head )
def lowerCAmelCase__(self ):
'''simple docstring'''
if self.head:
return self.head.get_data()
return None
def lowerCAmelCase__(self ):
'''simple docstring'''
if self.tail:
return self.tail.get_data()
return None
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
if self.head is None:
__a : Optional[int] = node
__a : Dict = node
else:
self.insert_before_node(self.head , __snake_case )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
if self.head is None:
self.set_head(__snake_case )
else:
self.insert_after_node(self.tail , __snake_case )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : List[Any] = Node(__snake_case )
if self.head is None:
self.set_head(__snake_case )
else:
self.set_tail(__snake_case )
def lowerCAmelCase__(self , _lowercase , _lowercase ):
'''simple docstring'''
__a : Any = node
__a : Dict = node.previous
if node.get_previous() is None:
__a : Optional[Any] = node_to_insert
else:
__a : Optional[Any] = node_to_insert
__a : int = node_to_insert
def lowerCAmelCase__(self , _lowercase , _lowercase ):
'''simple docstring'''
__a : Dict = node
__a : Optional[int] = node.next
if node.get_next() is None:
__a : Union[str, Any] = node_to_insert
else:
__a : Dict = node_to_insert
__a : List[str] = node_to_insert
def lowerCAmelCase__(self , _lowercase , _lowercase ):
'''simple docstring'''
__a : Tuple = 1
__a : int = Node(__snake_case )
__a : List[Any] = self.head
while node:
if current_position == position:
self.insert_before_node(__snake_case , __snake_case )
return
current_position += 1
__a : str = node.next
self.insert_after_node(self.tail , __snake_case )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : Dict = self.head
while node:
if node.get_data() == item:
return node
__a : List[Any] = node.get_next()
raise Exception("""Node not found""" )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
if (node := self.get_node(__snake_case )) is not None:
if node == self.head:
__a : Optional[Any] = self.head.get_next()
if node == self.tail:
__a : Optional[Any] = self.tail.get_previous()
self.remove_node_pointers(__snake_case )
@staticmethod
def lowerCAmelCase__(_lowercase ):
'''simple docstring'''
if node.get_next():
__a : Any = node.previous
if node.get_previous():
__a : Any = node.next
__a : Any = None
__a : Tuple = None
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.head is None
def __magic_name__ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
lowercase__ = {
"configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ErnieForCausalLM",
"ErnieForMaskedLM",
"ErnieForMultipleChoice",
"ErnieForNextSentencePrediction",
"ErnieForPreTraining",
"ErnieForQuestionAnswering",
"ErnieForSequenceClassification",
"ErnieForTokenClassification",
"ErnieModel",
"ErniePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63
| 0
|
'''simple docstring'''
import qiskit
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = qiskit.Aer.get_backend("""aer_simulator""" )
_SCREAMING_SNAKE_CASE : Optional[int] = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
_SCREAMING_SNAKE_CASE : Optional[int] = qiskit.execute(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = half_adder(1, 1)
print(F"Half Adder Output Qubit Counts: {counts}")
| 533
|
'''simple docstring'''
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
print("""\nThe shortest path matrix using Floyd Warshall algorithm\n""" )
for i in range(SCREAMING_SNAKE_CASE__ ):
for j in range(SCREAMING_SNAKE_CASE__ ):
if dist[i][j] != float("""inf""" ):
print(int(dist[i][j] ) , end="""\t""" )
else:
print("""INF""" , end="""\t""" )
print()
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = [[float("""inf""" ) for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ )]
for i in range(SCREAMING_SNAKE_CASE__ ):
for j in range(SCREAMING_SNAKE_CASE__ ):
_SCREAMING_SNAKE_CASE : List[str] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(SCREAMING_SNAKE_CASE__ ):
# looping through rows of graph array
for i in range(SCREAMING_SNAKE_CASE__ ):
# looping through columns of graph array
for j in range(SCREAMING_SNAKE_CASE__ ):
if (
dist[i][k] != float("""inf""" )
and dist[k][j] != float("""inf""" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
_SCREAMING_SNAKE_CASE : int = dist[i][k] + dist[k][j]
_print_dist(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return dist, v
if __name__ == "__main__":
UpperCAmelCase_ : str = int(input('Enter number of vertices: '))
UpperCAmelCase_ : Union[str, Any] = int(input('Enter number of edges: '))
UpperCAmelCase_ : Dict = [[float('inf') for i in range(v)] for j in range(v)]
for i in range(v):
UpperCAmelCase_ : Union[str, Any] = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('\nEdge ', i + 1)
UpperCAmelCase_ : int = int(input('Enter source:'))
UpperCAmelCase_ : List[str] = int(input('Enter destination:'))
UpperCAmelCase_ : List[str] = float(input('Enter weight:'))
UpperCAmelCase_ : int = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 533
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {'''configuration_sew''': ['''SEW_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SEWConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''SEW_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SEWForCTC''',
'''SEWForSequenceClassification''',
'''SEWModel''',
'''SEWPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 569
|
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class __UpperCAmelCase :
def __init__( self: List[str] , UpperCAmelCase_: Dict=2 , UpperCAmelCase_: Dict=3 , UpperCAmelCase_: Optional[int]=64 , UpperCAmelCase_: Optional[Any]=None ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = np.random.default_rng(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = length
_SCREAMING_SNAKE_CASE = rng.normal(size=(length,) ).astype(np.floataa )
_SCREAMING_SNAKE_CASE = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self: Any ):
'''simple docstring'''
return self.length
def __getitem__( self: Optional[Any] , UpperCAmelCase_: Optional[int] ):
'''simple docstring'''
return {"x": self.x[i], "y": self.y[i]}
class __UpperCAmelCase (torch.nn.Module ):
def __init__( self: List[Any] , UpperCAmelCase_: str=0 , UpperCAmelCase_: Tuple=0 , UpperCAmelCase_: Optional[Any]=False ):
'''simple docstring'''
super().__init__()
_SCREAMING_SNAKE_CASE = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
_SCREAMING_SNAKE_CASE = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
_SCREAMING_SNAKE_CASE = True
def UpperCamelCase ( self: Any , UpperCAmelCase_: Optional[Any]=None ):
'''simple docstring'''
if self.first_batch:
print(F'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
_SCREAMING_SNAKE_CASE = False
return x * self.a[0] + self.b[0]
class __UpperCAmelCase (torch.nn.Module ):
def __init__( self: Dict , UpperCAmelCase_: int=0 , UpperCAmelCase_: Any=0 , UpperCAmelCase_: int=False ):
'''simple docstring'''
super().__init__()
_SCREAMING_SNAKE_CASE = torch.nn.Parameter(torch.tensor(UpperCAmelCase_ ).float() )
_SCREAMING_SNAKE_CASE = torch.nn.Parameter(torch.tensor(UpperCAmelCase_ ).float() )
_SCREAMING_SNAKE_CASE = True
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: int=None ):
'''simple docstring'''
if self.first_batch:
print(F'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
_SCREAMING_SNAKE_CASE = False
return x * self.a + self.b
def __lowerCamelCase ( snake_case__ ,snake_case__ = 16 ) -> Optional[Any]:
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_SCREAMING_SNAKE_CASE = {"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""}
_SCREAMING_SNAKE_CASE = load_dataset("""csv""" ,data_files=snake_case__ )
_SCREAMING_SNAKE_CASE = datasets["""train"""].unique("""label""" )
_SCREAMING_SNAKE_CASE = {v: i for i, v in enumerate(snake_case__ )}
def tokenize_function(snake_case__ ):
# max_length=None => use the model max length (it's actually the default)
_SCREAMING_SNAKE_CASE = tokenizer(
examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=snake_case__ ,max_length=snake_case__ ,padding="""max_length""" )
if "label" in examples:
_SCREAMING_SNAKE_CASE = [label_to_id[l] for l in examples["""label"""]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_SCREAMING_SNAKE_CASE = datasets.map(
snake_case__ ,batched=snake_case__ ,remove_columns=["""sentence1""", """sentence2""", """label"""] ,)
def collate_fn(snake_case__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case__ ,padding="""max_length""" ,max_length=1_28 ,return_tensors="""pt""" )
return tokenizer.pad(snake_case__ ,padding="""longest""" ,return_tensors="""pt""" )
# Instantiate dataloaders.
_SCREAMING_SNAKE_CASE = DataLoader(tokenized_datasets["""train"""] ,shuffle=snake_case__ ,collate_fn=snake_case__ ,batch_size=2 )
_SCREAMING_SNAKE_CASE = DataLoader(tokenized_datasets["""validation"""] ,shuffle=snake_case__ ,collate_fn=snake_case__ ,batch_size=1 )
return train_dataloader, eval_dataloader
| 569
| 1
|
def lowerCamelCase__ (_UpperCAmelCase = 100):
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
for i in range(1 , n + 1):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 73
|
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
_UpperCamelCase : int = 'docs/source/en/_toctree.yml'
def __snake_case ( lowerCAmelCase : Union[str, Any] ):
__UpperCAmelCase = defaultdict(lowerCAmelCase )
__UpperCAmelCase = []
__UpperCAmelCase = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'local': doc['local'], 'title': doc['title']} )
else:
new_doc_list.append(lowerCAmelCase )
__UpperCAmelCase = new_doc_list
__UpperCAmelCase = [key for key, value in counts.items() if value > 1]
__UpperCAmelCase = []
for duplicate_key in duplicates:
__UpperCAmelCase = list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} )
if len(lowerCAmelCase ) > 1:
raise ValueError(
F"""{duplicate_key} is present several times in the documentation table of content at """
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] )
__UpperCAmelCase = sorted(lowerCAmelCase , key=lambda lowerCAmelCase : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(lowerCAmelCase ) > 1:
raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' )
overview_doc.extend(lowerCAmelCase )
# Sort
return overview_doc
def __snake_case ( lowerCAmelCase : Union[str, Any]=False ):
with open(lowerCAmelCase , encoding='utf-8' ) as f:
__UpperCAmelCase = yaml.safe_load(f.read() )
# Get to the API doc
__UpperCAmelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__UpperCAmelCase = content[api_idx]['sections']
# Then to the model doc
__UpperCAmelCase = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
__UpperCAmelCase = api_doc[scheduler_idx]['sections']
__UpperCAmelCase = clean_doc_toc(lowerCAmelCase )
__UpperCAmelCase = False
if new_scheduler_doc != scheduler_doc:
__UpperCAmelCase = True
if overwrite:
__UpperCAmelCase = new_scheduler_doc
if diff:
if overwrite:
__UpperCAmelCase = api_doc
with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(lowerCAmelCase , allow_unicode=lowerCAmelCase ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
def __snake_case ( lowerCAmelCase : Tuple=False ):
with open(lowerCAmelCase , encoding='utf-8' ) as f:
__UpperCAmelCase = yaml.safe_load(f.read() )
# Get to the API doc
__UpperCAmelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__UpperCAmelCase = content[api_idx]['sections']
# Then to the model doc
__UpperCAmelCase = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
__UpperCAmelCase = False
__UpperCAmelCase = api_doc[pipeline_idx]['sections']
__UpperCAmelCase = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
__UpperCAmelCase = pipeline_doc['section']
__UpperCAmelCase = clean_doc_toc(lowerCAmelCase )
if overwrite:
__UpperCAmelCase = new_sub_pipeline_doc
new_pipeline_docs.append(lowerCAmelCase )
# sort overall pipeline doc
__UpperCAmelCase = clean_doc_toc(lowerCAmelCase )
if new_pipeline_docs != pipeline_docs:
__UpperCAmelCase = True
if overwrite:
__UpperCAmelCase = new_pipeline_docs
if diff:
if overwrite:
__UpperCAmelCase = api_doc
with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(lowerCAmelCase , allow_unicode=lowerCAmelCase ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
_UpperCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
_UpperCamelCase : Union[str, Any] = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 396
| 0
|
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_a : Optional[Any] = logging.get_logger(__name__)
_a : Any = {
'microsoft/conditional-detr-resnet-50': (
'https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'
),
}
class a_ ( a ):
A__ : Tuple = 'conditional_detr'
A__ : int = ['past_key_values']
A__ : Optional[Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Tuple , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : str=None , UpperCAmelCase__ : str=3 , UpperCAmelCase__ : Any=300 , UpperCAmelCase__ : Optional[int]=6 , UpperCAmelCase__ : str=2_048 , UpperCAmelCase__ : Optional[int]=8 , UpperCAmelCase__ : Optional[int]=6 , UpperCAmelCase__ : Optional[Any]=2_048 , UpperCAmelCase__ : Any=8 , UpperCAmelCase__ : Optional[int]=0.0 , UpperCAmelCase__ : int=0.0 , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Optional[int]="relu" , UpperCAmelCase__ : str=256 , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : Dict=0.0 , UpperCAmelCase__ : int=0.0 , UpperCAmelCase__ : Optional[int]=0.02 , UpperCAmelCase__ : List[str]=1.0 , UpperCAmelCase__ : List[Any]=False , UpperCAmelCase__ : str="sine" , UpperCAmelCase__ : Tuple="resnet50" , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Optional[int]=False , UpperCAmelCase__ : List[str]=2 , UpperCAmelCase__ : Dict=5 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : Dict=1 , UpperCAmelCase__ : Union[str, Any]=1 , UpperCAmelCase__ : Dict=2 , UpperCAmelCase__ : Union[str, Any]=5 , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : Dict=0.25 , **UpperCAmelCase__ : int , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
snake_case : Optional[int] = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : Tuple = backbone_config.get('''model_type''' )
snake_case : str = CONFIG_MAPPING[backbone_model_type]
snake_case : str = config_class.from_dict(UpperCAmelCase__ )
snake_case : Dict = use_timm_backbone
snake_case : List[Any] = backbone_config
snake_case : List[Any] = num_channels
snake_case : List[str] = num_queries
snake_case : str = d_model
snake_case : int = encoder_ffn_dim
snake_case : str = encoder_layers
snake_case : int = encoder_attention_heads
snake_case : Dict = decoder_ffn_dim
snake_case : int = decoder_layers
snake_case : Any = decoder_attention_heads
snake_case : Any = dropout
snake_case : Optional[Any] = attention_dropout
snake_case : List[str] = activation_dropout
snake_case : str = activation_function
snake_case : Any = init_std
snake_case : Any = init_xavier_std
snake_case : Tuple = encoder_layerdrop
snake_case : List[Any] = decoder_layerdrop
snake_case : List[Any] = encoder_layers
snake_case : List[str] = auxiliary_loss
snake_case : Optional[Any] = position_embedding_type
snake_case : Any = backbone
snake_case : List[Any] = use_pretrained_backbone
snake_case : int = dilation
# Hungarian matcher
snake_case : Union[str, Any] = class_cost
snake_case : Any = bbox_cost
snake_case : Dict = giou_cost
# Loss coefficients
snake_case : List[Any] = mask_loss_coefficient
snake_case : Union[str, Any] = dice_loss_coefficient
snake_case : Tuple = cls_loss_coefficient
snake_case : Dict = bbox_loss_coefficient
snake_case : int = giou_loss_coefficient
snake_case : Optional[Any] = focal_alpha
super().__init__(is_encoder_decoder=UpperCAmelCase__ , **UpperCAmelCase__ )
@property
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
return self.d_model
def lowerCAmelCase( self : int ):
"""simple docstring"""
snake_case : str = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
snake_case : Union[str, Any] = self.backbone_config.to_dict()
snake_case : List[str] = self.__class__.model_type
return output
class a_ ( a ):
A__ : Dict = version.parse('1.11' )
@property
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
return 1e-5
@property
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
return 12
| 701
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_a : str = {'configuration_encoder_decoder': ['EncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = ['EncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = ['TFEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = ['FlaxEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
_a : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 84
| 0
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase = TextToVideoSDPipeline
__UpperCAmelCase = TEXT_TO_IMAGE_PARAMS
__UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
__UpperCAmelCase = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def A ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') , up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') , cross_attention_dim=3_2 , attention_head_dim=4 , )
__lowercase = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , )
torch.manual_seed(0 )
__lowercase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
__lowercase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , )
__lowercase = CLIPTextModel(__UpperCAmelCase )
__lowercase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__lowercase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def A ( self , snake_case_ , snake_case_=0 ) -> List[Any]:
'''simple docstring'''
if str(__UpperCAmelCase ).startswith('''mps''' ):
__lowercase = torch.manual_seed(__UpperCAmelCase )
else:
__lowercase = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
__lowercase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def A ( self ) -> List[str]:
'''simple docstring'''
__lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowercase = self.get_dummy_components()
__lowercase = TextToVideoSDPipeline(**__UpperCAmelCase )
__lowercase = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__lowercase = self.get_dummy_inputs(__UpperCAmelCase )
__lowercase = """np"""
__lowercase = sd_pipe(**__UpperCAmelCase ).frames
__lowercase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (6_4, 6_4, 3)
__lowercase = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A ( self ) -> Optional[Any]:
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__UpperCAmelCase , expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def A ( self ) -> Optional[int]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__UpperCAmelCase , expected_max_diff=1e-2 )
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def A ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def A ( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' )
def A ( self ) -> int:
'''simple docstring'''
pass
def A ( self ) -> List[str]:
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def A ( self ) -> int:
'''simple docstring'''
__lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy''' )
__lowercase = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
__lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__lowercase = pipe.to('''cuda''' )
__lowercase = """Spiderman is surfing"""
__lowercase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__lowercase = pipe(__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2_5 , output_type='''pt''' ).frames
__lowercase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def A ( self ) -> Any:
'''simple docstring'''
__lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy''' )
__lowercase = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
__lowercase = pipe.to('''cuda''' )
__lowercase = """Spiderman is surfing"""
__lowercase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__lowercase = pipe(__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type='''pt''' ).frames
__lowercase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 639
|
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase = 600851475143 ):
"""simple docstring"""
try:
lowerCAmelCase__ : Union[str, Any] = int(UpperCamelCase )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
lowerCAmelCase__ : Dict = 1
lowerCAmelCase__ : int = 2
while i * i <= n:
while n % i == 0:
lowerCAmelCase__ : str = i
n //= i
i += 1
if n > 1:
lowerCAmelCase__ : List[str] = n
return int(UpperCamelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 565
| 0
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_snake_case : Optional[Any] = logging.get_logger(__name__)
_snake_case : Optional[Any] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
_snake_case : Union[str, Any] = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : Any, lowerCAmelCase_ : Any, lowerCAmelCase_ : str, lowerCAmelCase_ : Optional[int] ):
for attribute in key.split('.' ):
__lowerCAmelCase = getattr(lowerCAmelCase_, lowerCAmelCase_ )
if weight_type is not None:
__lowerCAmelCase = getattr(lowerCAmelCase_, lowerCAmelCase_ ).shape
else:
__lowerCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__lowerCAmelCase = value
elif weight_type == "weight_g":
__lowerCAmelCase = value
elif weight_type == "weight_v":
__lowerCAmelCase = value
elif weight_type == "bias":
__lowerCAmelCase = value
elif weight_type == "running_mean":
__lowerCAmelCase = value
elif weight_type == "running_var":
__lowerCAmelCase = value
elif weight_type == "num_batches_tracked":
__lowerCAmelCase = value
elif weight_type == "inv_freq":
__lowerCAmelCase = value
else:
__lowerCAmelCase = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def a_ ( lowerCAmelCase_ : Dict, lowerCAmelCase_ : List[str], lowerCAmelCase_ : int ):
__lowerCAmelCase = []
__lowerCAmelCase = fairseq_model.state_dict()
__lowerCAmelCase = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
__lowerCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, hf_model.config.feat_extract_norm == 'group', )
__lowerCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
__lowerCAmelCase = 'wav2vec2_conformer.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__lowerCAmelCase = True
if "*" in mapped_key:
__lowerCAmelCase = name.split(lowerCAmelCase_ )[0].split('.' )[-2]
__lowerCAmelCase = mapped_key.replace('*', lowerCAmelCase_ )
if "pos_bias_u" in name:
__lowerCAmelCase = None
elif "pos_bias_v" in name:
__lowerCAmelCase = None
elif "weight_g" in name:
__lowerCAmelCase = 'weight_g'
elif "weight_v" in name:
__lowerCAmelCase = 'weight_v'
elif "bias" in name:
__lowerCAmelCase = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowerCAmelCase = 'weight'
elif "running_mean" in name:
__lowerCAmelCase = 'running_mean'
elif "inv_freq" in name:
__lowerCAmelCase = 'inv_freq'
elif "running_var" in name:
__lowerCAmelCase = 'running_var'
elif "num_batches_tracked" in name:
__lowerCAmelCase = 'num_batches_tracked'
else:
__lowerCAmelCase = None
set_recursively(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase_ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def a_ ( lowerCAmelCase_ : Tuple, lowerCAmelCase_ : int, lowerCAmelCase_ : str, lowerCAmelCase_ : Any, lowerCAmelCase_ : Optional[Any] ):
__lowerCAmelCase = full_name.split('conv_layers.' )[-1]
__lowerCAmelCase = name.split('.' )
__lowerCAmelCase = int(items[0] )
__lowerCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__lowerCAmelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__lowerCAmelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__lowerCAmelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__lowerCAmelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowerCAmelCase_ )
@torch.no_grad()
def a_ ( lowerCAmelCase_ : List[Any], lowerCAmelCase_ : Dict, lowerCAmelCase_ : Any=None, lowerCAmelCase_ : Any=None, lowerCAmelCase_ : int=True ):
if config_path is not None:
__lowerCAmelCase = WavaVecaConformerConfig.from_pretrained(lowerCAmelCase_, hidden_act='swish' )
else:
__lowerCAmelCase = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
__lowerCAmelCase = 'rotary'
if is_finetuned:
if dict_path:
__lowerCAmelCase = Dictionary.load(lowerCAmelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__lowerCAmelCase = target_dict.pad_index
__lowerCAmelCase = target_dict.bos_index
__lowerCAmelCase = target_dict.eos_index
__lowerCAmelCase = len(target_dict.symbols )
__lowerCAmelCase = os.path.join(lowerCAmelCase_, 'vocab.json' )
if not os.path.isdir(lowerCAmelCase_ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowerCAmelCase_ ) )
return
os.makedirs(lowerCAmelCase_, exist_ok=lowerCAmelCase_ )
__lowerCAmelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
__lowerCAmelCase = 0
__lowerCAmelCase = 1
with open(lowerCAmelCase_, 'w', encoding='utf-8' ) as vocab_handle:
json.dump(lowerCAmelCase_, lowerCAmelCase_ )
__lowerCAmelCase = WavaVecaCTCTokenizer(
lowerCAmelCase_, unk_token=target_dict.unk_word, pad_token=target_dict.pad_word, bos_token=target_dict.bos_word, eos_token=target_dict.eos_word, word_delimiter_token='|', do_lower_case=lowerCAmelCase_, )
__lowerCAmelCase = True if config.feat_extract_norm == 'layer' else False
__lowerCAmelCase = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=1_6000, padding_value=0, do_normalize=lowerCAmelCase_, return_attention_mask=lowerCAmelCase_, )
__lowerCAmelCase = WavaVecaProcessor(feature_extractor=lowerCAmelCase_, tokenizer=lowerCAmelCase_ )
processor.save_pretrained(lowerCAmelCase_ )
__lowerCAmelCase = WavaVecaConformerForCTC(lowerCAmelCase_ )
else:
__lowerCAmelCase = WavaVecaConformerForPreTraining(lowerCAmelCase_ )
if is_finetuned:
__lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
__lowerCAmelCase = argparse.Namespace(task='audio_pretraining' )
__lowerCAmelCase = fairseq.tasks.setup_task(lowerCAmelCase_ )
__lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path], task=lowerCAmelCase_ )
__lowerCAmelCase = model[0].eval()
recursively_load_weights(lowerCAmelCase_, lowerCAmelCase_, not is_finetuned )
hf_wavavec.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
_snake_case : int = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
_snake_case : List[str] = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 711
|
from __future__ import annotations
def a_ ( lowerCAmelCase_ : int | str ):
__lowerCAmelCase = str(lowerCAmelCase_ )
return n == n[::-1]
def a_ ( lowerCAmelCase_ : int = 100_0000 ):
__lowerCAmelCase = 0
for i in range(1, lowerCAmelCase_ ):
if is_palindrome(lowerCAmelCase_ ) and is_palindrome(bin(lowerCAmelCase_ ).split('b' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 421
| 0
|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
a__ = logging.get_logger(__name__)
class snake_case ( __snake_case ):
'''simple docstring'''
snake_case_ : List[Any] = ["""pixel_values"""]
def __init__( self : Dict , lowerCAmelCase : str = True , lowerCAmelCase : Dict = None , lowerCAmelCase : Any = PILImageResampling.BILINEAR , lowerCAmelCase : str = True , lowerCAmelCase : Union[str, Any] = None , lowerCAmelCase : Dict = True , lowerCAmelCase : Optional[int] = 1 / 255 , lowerCAmelCase : List[str] = True , lowerCAmelCase : List[Any] = None , lowerCAmelCase : Union[str, Any] = None , **lowerCAmelCase : str , ) -> None:
"""simple docstring"""
super().__init__(**lowerCAmelCase)
_snake_case : Any = size if size is not None else {"""shortest_edge""": 256}
_snake_case : Tuple = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase)
_snake_case : List[str] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
_snake_case : Union[str, Any] = get_size_dict(lowerCAmelCase , param_name="""crop_size""")
_snake_case : Any = do_resize
_snake_case : Any = size
_snake_case : Tuple = resample
_snake_case : Any = do_center_crop
_snake_case : Optional[Any] = crop_size
_snake_case : List[Any] = do_rescale
_snake_case : str = rescale_factor
_snake_case : List[str] = do_normalize
_snake_case : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_snake_case : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase_ ( self : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : int = PILImageResampling.BICUBIC , lowerCAmelCase : Optional[Any] = None , **lowerCAmelCase : List[Any] , ) -> np.ndarray:
"""simple docstring"""
_snake_case : Any = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase)
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''')
_snake_case : Any = get_resize_output_image_size(lowerCAmelCase , size=size["""shortest_edge"""] , default_to_square=lowerCAmelCase)
return resize(lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase)
def UpperCamelCase_ ( self : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : int = None , **lowerCAmelCase : Dict , ) -> np.ndarray:
"""simple docstring"""
_snake_case : List[Any] = get_size_dict(lowerCAmelCase)
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}''')
return center_crop(lowerCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=lowerCAmelCase , **lowerCAmelCase)
def UpperCamelCase_ ( self : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple = None , **lowerCAmelCase : Union[str, Any]) -> np.ndarray:
"""simple docstring"""
return rescale(lowerCAmelCase , scale=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase)
def UpperCamelCase_ ( self : Any , lowerCAmelCase : List[Any] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : int = None , **lowerCAmelCase : List[str] , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase)
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any = None , lowerCAmelCase : Dict = None , lowerCAmelCase : List[str] = None , lowerCAmelCase : List[Any] = None , lowerCAmelCase : Optional[Any] = None , lowerCAmelCase : Optional[Any] = None , lowerCAmelCase : List[Any] = None , lowerCAmelCase : int = None , lowerCAmelCase : str = None , lowerCAmelCase : List[str] = None , lowerCAmelCase : int = None , lowerCAmelCase : Tuple = ChannelDimension.FIRST , **lowerCAmelCase : Tuple , ) -> List[str]:
"""simple docstring"""
_snake_case : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
_snake_case : Tuple = size if size is not None else self.size
_snake_case : Tuple = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase)
_snake_case : Any = resample if resample is not None else self.resample
_snake_case : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
_snake_case : str = crop_size if crop_size is not None else self.crop_size
_snake_case : Union[str, Any] = get_size_dict(lowerCAmelCase , param_name="""crop_size""")
_snake_case : str = do_rescale if do_rescale is not None else self.do_rescale
_snake_case : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
_snake_case : str = do_normalize if do_normalize is not None else self.do_normalize
_snake_case : Any = image_mean if image_mean is not None else self.image_mean
_snake_case : List[Any] = image_std if image_std is not None else self.image_std
_snake_case : List[Any] = make_list_of_images(lowerCAmelCase)
if not valid_images(lowerCAmelCase):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""")
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""")
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""")
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""")
# All transformations expect numpy arrays.
_snake_case : int = [to_numpy_array(lowerCAmelCase) for image in images]
if do_resize:
_snake_case : List[Any] = [self.resize(image=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase) for image in images]
if do_center_crop:
_snake_case : Any = [self.center_crop(image=lowerCAmelCase , size=lowerCAmelCase) for image in images]
if do_rescale:
_snake_case : List[str] = [self.rescale(image=lowerCAmelCase , scale=lowerCAmelCase) for image in images]
if do_normalize:
_snake_case : Optional[Any] = [self.normalize(image=lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase) for image in images]
_snake_case : Tuple = [to_channel_dimension_format(lowerCAmelCase , lowerCAmelCase) for image in images]
_snake_case : Union[str, Any] = {"""pixel_values""": images}
return BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase)
def UpperCamelCase_ ( self : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int] = None) -> Dict:
"""simple docstring"""
_snake_case : Union[str, Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCAmelCase) != len(lowerCAmelCase):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""")
if is_torch_tensor(lowerCAmelCase):
_snake_case : Union[str, Any] = target_sizes.numpy()
_snake_case : Optional[Any] = []
for idx in range(len(lowerCAmelCase)):
_snake_case : List[str] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=lowerCAmelCase)
_snake_case : int = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(lowerCAmelCase)
else:
_snake_case : List[str] = logits.argmax(dim=1)
_snake_case : List[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 477
|
'''simple docstring'''
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__lowerCAmelCase = '2.13.1'
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('3.7'):
raise ImportWarning(
'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'
'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__lowerCAmelCase = concatenate_datasets
__lowerCAmelCase = DownloadConfig
__lowerCAmelCase = DownloadManager
__lowerCAmelCase = DownloadMode
__lowerCAmelCase = DownloadConfig
__lowerCAmelCase = DownloadMode
__lowerCAmelCase = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 585
| 0
|
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def UpperCAmelCase ( _snake_case ):
return EnvironmentCommand()
def UpperCAmelCase ( _snake_case ):
return EnvironmentCommand(args.accelerate_config_file )
class __UpperCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
@staticmethod
def __snake_case ( UpperCAmelCase_ ):
lowerCAmelCase = parser.add_parser('''env''' )
download_parser.set_defaults(func=UpperCAmelCase_ )
download_parser.add_argument(
'''--accelerate-config_file''' , default=UpperCAmelCase_ , help='''The accelerate config file to use for the default values in the launching script.''' , )
download_parser.set_defaults(func=UpperCAmelCase_ )
def __init__( self , UpperCAmelCase_ , *UpperCAmelCase_ ):
lowerCAmelCase = accelerate_config_file
def __snake_case ( self ):
lowerCAmelCase = '''not installed'''
if is_safetensors_available():
import safetensors
lowerCAmelCase = safetensors.__version__
elif importlib.util.find_spec('''safetensors''' ) is not None:
import safetensors
lowerCAmelCase = F"""{safetensors.__version__} but is ignored because of PyTorch version too old."""
lowerCAmelCase = '''not installed'''
lowerCAmelCase = lowerCAmelCase = '''not found'''
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
lowerCAmelCase = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(UpperCAmelCase_ ):
lowerCAmelCase = load_config_from_file(self._accelerate_config_file ).to_dict()
lowerCAmelCase = (
'''\n'''.join([F"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
else F"""\t{accelerate_config}"""
)
lowerCAmelCase = '''not installed'''
lowerCAmelCase = '''NA'''
if is_torch_available():
import torch
lowerCAmelCase = torch.__version__
lowerCAmelCase = torch.cuda.is_available()
lowerCAmelCase = '''not installed'''
lowerCAmelCase = '''NA'''
if is_tf_available():
import tensorflow as tf
lowerCAmelCase = tf.__version__
try:
# deprecated in v2.1
lowerCAmelCase = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
lowerCAmelCase = bool(tf.config.list_physical_devices('''GPU''' ) )
lowerCAmelCase = '''not installed'''
lowerCAmelCase = '''not installed'''
lowerCAmelCase = '''not installed'''
lowerCAmelCase = '''NA'''
if is_flax_available():
import flax
import jax
import jaxlib
lowerCAmelCase = flax.__version__
lowerCAmelCase = jax.__version__
lowerCAmelCase = jaxlib.__version__
lowerCAmelCase = jax.lib.xla_bridge.get_backend().platform
lowerCAmelCase = {
'''`transformers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Huggingface_hub version''': huggingface_hub.__version__,
'''Safetensors version''': F"""{safetensors_version}""",
'''Accelerate version''': F"""{accelerate_version}""",
'''Accelerate config''': F"""{accelerate_config_str}""",
'''PyTorch version (GPU?)''': F"""{pt_version} ({pt_cuda_available})""",
'''Tensorflow version (GPU?)''': F"""{tf_version} ({tf_cuda_available})""",
'''Flax version (CPU?/GPU?/TPU?)''': F"""{flax_version} ({jax_backend})""",
'''Jax version''': F"""{jax_version}""",
'''JaxLib version''': F"""{jaxlib_version}""",
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(UpperCAmelCase_ ) )
return info
@staticmethod
def __snake_case ( UpperCAmelCase_ ):
return "\n".join([F"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 700
|
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def UpperCAmelCase ( _snake_case ):
lowerCAmelCase = args.pruning_method
lowerCAmelCase = args.threshold
lowerCAmelCase = args.model_name_or_path.rstrip('''/''' )
lowerCAmelCase = args.target_model_path
print(F"""Load fine-pruned model from {model_name_or_path}""" )
lowerCAmelCase = torch.load(os.path.join(_snake_case , '''pytorch_model.bin''' ) )
lowerCAmelCase = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
lowerCAmelCase = tensor
print(F"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
lowerCAmelCase = tensor
print(F"""Copied layer {name}""" )
elif "bias" in name:
lowerCAmelCase = tensor
print(F"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
lowerCAmelCase = MagnitudeBinarizer.apply(inputs=_snake_case , threshold=_snake_case )
lowerCAmelCase = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
lowerCAmelCase = name[:-6]
lowerCAmelCase = model[F"""{prefix_}mask_scores"""]
lowerCAmelCase = TopKBinarizer.apply(_snake_case , _snake_case )
lowerCAmelCase = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
lowerCAmelCase = name[:-6]
lowerCAmelCase = model[F"""{prefix_}mask_scores"""]
lowerCAmelCase = ThresholdBinarizer.apply(_snake_case , _snake_case , _snake_case )
lowerCAmelCase = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
lowerCAmelCase = name[:-6]
lowerCAmelCase = model[F"""{prefix_}mask_scores"""]
lowerCAmelCase , lowerCAmelCase = -0.1, 1.1
lowerCAmelCase = torch.sigmoid(_snake_case )
lowerCAmelCase = s * (r - l) + l
lowerCAmelCase = s_bar.clamp(min=0.0 , max=1.0 )
lowerCAmelCase = tensor * mask
print(F"""Pruned layer {name}""" )
else:
raise ValueError('''Unknown pruning method''' )
if target_model_path is None:
lowerCAmelCase = os.path.join(
os.path.dirname(_snake_case ) , F"""bertarized_{os.path.basename(_snake_case )}""" )
if not os.path.isdir(_snake_case ):
shutil.copytree(_snake_case , _snake_case )
print(F"""\nCreated folder {target_model_path}""" )
torch.save(_snake_case , os.path.join(_snake_case , '''pytorch_model.bin''' ) )
print('''\nPruned model saved! See you later!''' )
if __name__ == "__main__":
UpperCAmelCase_ =argparse.ArgumentParser()
parser.add_argument(
"""--pruning_method""",
choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""],
type=str,
required=True,
help=(
"""Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"""
""" sigmoied_threshold = Soft movement pruning)"""
),
)
parser.add_argument(
"""--threshold""",
type=float,
required=False,
help=(
"""For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."""
"""For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."""
"""Not needed for `l0`"""
),
)
parser.add_argument(
"""--model_name_or_path""",
type=str,
required=True,
help="""Folder containing the model that was previously fine-pruned""",
)
parser.add_argument(
"""--target_model_path""",
default=None,
type=str,
required=False,
help="""Folder containing the model that was previously fine-pruned""",
)
UpperCAmelCase_ =parser.parse_args()
main(args)
| 33
| 0
|
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
a : Optional[Any] = 0B1011_0011_1110_1100_1001_0000_0111_1011_1011_0001_1001_1110
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
a : int = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class a :
"""simple docstring"""
def __init__( self : int ) -> Optional[Any]:
__UpperCAmelCase : Tuple = WATERMARK_BITS
__UpperCAmelCase : Tuple = WatermarkEncoder()
self.encoder.set_watermark("""bits""" , self.watermark )
def UpperCAmelCase ( self : Dict , __lowercase : torch.FloatTensor ) -> Optional[Any]:
# can't encode images that are smaller than 256
if images.shape[-1] < 256:
return images
__UpperCAmelCase : Tuple = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__UpperCAmelCase : Tuple = [self.encoder.encode(__lowercase , """dwtDct""" ) for image in images]
__UpperCAmelCase : int = torch.from_numpy(np.array(__lowercase ) ).permute(0 , 3 , 1 , 2 )
__UpperCAmelCase : Optional[Any] = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 63
|
'''simple docstring'''
import math
def _lowerCamelCase ( lowercase : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowercase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCamelCase ( lowercase : float = 0.1 ) -> int:
_a = 3
_a = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(lowercase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 692
| 0
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance < 0:
raise ValueError("Resistance cannot be negative" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 530
|
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__magic_name__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase , unittest.TestCase ):
_A : Optional[Any] = ReformerTokenizer
_A : str = ReformerTokenizerFast
_A : List[str] = True
_A : Tuple = False
_A : str = True
def A_ ( self ):
super().setUp()
snake_case__ = ReformerTokenizer(lowerCamelCase , keep_accents=lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def A_ ( self ):
snake_case__ = "<s>"
snake_case__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase ) , lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase ) , lowerCamelCase )
def A_ ( self ):
snake_case__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(lowerCamelCase ) , 10_00 )
def A_ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def A_ ( self ):
if not self.test_rust_tokenizer:
return
snake_case__ = self.get_tokenizer()
snake_case__ = self.get_rust_tokenizer()
snake_case__ = "I was born in 92000, and this is falsé."
snake_case__ = tokenizer.tokenize(lowerCamelCase )
snake_case__ = rust_tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
snake_case__ = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
snake_case__ = rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
snake_case__ = self.get_rust_tokenizer()
snake_case__ = tokenizer.encode(lowerCamelCase )
snake_case__ = rust_tokenizer.encode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def A_ ( self , lowerCamelCase=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case__ = self.rust_tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
# Simple input
snake_case__ = "This is a simple input"
snake_case__ = ["This is a simple input 1", "This is a simple input 2"]
snake_case__ = ("This is a simple input", "This is a pair")
snake_case__ = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(lowerCamelCase , tokenizer_r.encode , lowerCamelCase , max_length=lowerCamelCase , padding="max_length" )
# Simple input
self.assertRaises(lowerCamelCase , tokenizer_r.encode_plus , lowerCamelCase , max_length=lowerCamelCase , padding="max_length" )
# Simple input
self.assertRaises(
lowerCamelCase , tokenizer_r.batch_encode_plus , lowerCamelCase , max_length=lowerCamelCase , padding="max_length" , )
# Pair input
self.assertRaises(lowerCamelCase , tokenizer_r.encode , lowerCamelCase , max_length=lowerCamelCase , padding="max_length" )
# Pair input
self.assertRaises(lowerCamelCase , tokenizer_r.encode_plus , lowerCamelCase , max_length=lowerCamelCase , padding="max_length" )
# Pair input
self.assertRaises(
lowerCamelCase , tokenizer_r.batch_encode_plus , lowerCamelCase , max_length=lowerCamelCase , padding="max_length" , )
def A_ ( self ):
pass
def A_ ( self ):
snake_case__ = ReformerTokenizer(lowerCamelCase , keep_accents=lowerCamelCase )
snake_case__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase ) , [2_85, 46, 10, 1_70, 3_82] , )
snake_case__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
snake_case__ = tokenizer.convert_tokens_to_ids(lowerCamelCase )
self.assertListEqual(
lowerCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
snake_case__ = tokenizer.convert_ids_to_tokens(lowerCamelCase )
self.assertListEqual(
lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def A_ ( self ):
return ReformerTokenizer.from_pretrained("google/reformer-crime-and-punishment" )
@slow
def A_ ( self ):
snake_case__ = "Hello World!"
snake_case__ = [1_26, 32, 2_62, 1_52, 38, 72, 2_87]
self.assertListEqual(lowerCamelCase , self.big_tokenizer.encode(lowerCamelCase ) )
@slow
def A_ ( self ):
snake_case__ = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
snake_case__ = [
1_08,
2_65,
24,
1_11,
4,
2_58,
1_56,
35,
28,
2_75,
3,
2_59,
2_97,
2_60,
84,
4,
35,
1_10,
44,
8,
2_59,
91,
2_68,
21,
11,
2_09,
2_74,
1_09,
2_66,
2_77,
1_17,
86,
93,
3_15,
2_58,
2_78,
2_58,
2_77,
2_58,
0,
2_58,
2_88,
2_58,
3_19,
2_58,
0,
2_58,
0,
2_58,
0,
2_58,
0,
2_58,
2_87,
2_58,
3_15,
2_58,
2_89,
2_58,
2_78,
99,
2_69,
2_66,
2_62,
8,
2_59,
2_41,
4,
2_17,
2_30,
2_68,
2_66,
55,
1_68,
1_06,
75,
1_93,
2_66,
2_23,
27,
49,
26,
2_82,
25,
2_64,
2_99,
19,
26,
0,
2_58,
2_77,
1_17,
86,
93,
1_76,
1_83,
2_70,
11,
2_62,
42,
61,
2_65,
]
self.assertListEqual(lowerCamelCase , self.big_tokenizer.encode(lowerCamelCase ) )
@require_torch
@slow
def A_ ( self ):
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
snake_case__ = list(self.big_tokenizer.get_vocab().keys() )[:10]
snake_case__ = " ".join(lowerCamelCase )
snake_case__ = self.big_tokenizer.encode_plus(lowerCamelCase , return_tensors="pt" )
snake_case__ = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="pt" )
snake_case__ = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
snake_case__ = encoded_sequence["input_ids"].shape
snake_case__ = ReformerModel(lowerCamelCase )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowerCamelCase )
model(**lowerCamelCase )
@slow
def A_ ( self ):
# fmt: off
snake_case__ = {"input_ids": [[1_08, 2_65, 24, 1_11, 4, 2_58, 1_56, 7, 51, 2_79, 58, 7, 76, 25, 69, 2_78], [1_40, 2_43, 2_64, 1_34, 17, 2_67, 77, 2_63, 22, 2_62, 2_97, 2_58, 3_04, 1_77, 2_79, 2_66, 14, 89, 13, 35, 2_61, 2_99, 2_72, 1_37, 2_75, 2_78]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
snake_case__ = [
"This is a very simple sentence.",
"The quick brown fox jumps over the lazy dog.",
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase , model_name="google/reformer-crime-and-punishment" , revision="0e6c3decb8211d49bf881013425dc8b0448b3f5a" , padding=lowerCamelCase , sequences=lowerCamelCase , )
| 530
| 1
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class _a :
'''simple docstring'''
def __init__( self, A, A=13, A=7, A=True, A=True, A=True, A=True, A=99, A=32, A=5, A=4, A=37, A="gelu", A=0.1, A=0.1, A=128, A=32, A=16, A=2, A=0.02, A=3, A=4, A=None, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = parent
SCREAMING_SNAKE_CASE : Optional[int] = batch_size
SCREAMING_SNAKE_CASE : List[Any] = seq_length
SCREAMING_SNAKE_CASE : List[Any] = is_training
SCREAMING_SNAKE_CASE : Any = use_input_mask
SCREAMING_SNAKE_CASE : Union[str, Any] = use_token_type_ids
SCREAMING_SNAKE_CASE : int = use_labels
SCREAMING_SNAKE_CASE : Tuple = vocab_size
SCREAMING_SNAKE_CASE : Dict = hidden_size
SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE : Any = num_attention_heads
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Any = max_position_embeddings
SCREAMING_SNAKE_CASE : Dict = type_vocab_size
SCREAMING_SNAKE_CASE : int = type_sequence_label_size
SCREAMING_SNAKE_CASE : Any = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = num_labels
SCREAMING_SNAKE_CASE : str = num_choices
SCREAMING_SNAKE_CASE : str = scope
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
SCREAMING_SNAKE_CASE : int = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : int = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size], self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size], self.num_choices )
SCREAMING_SNAKE_CASE : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self ):
'''simple docstring'''
return NezhaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=A, initializer_range=self.initializer_range, )
def UpperCamelCase_ ( self ):
'''simple docstring'''
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : Union[str, Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCamelCase_ ( self, A, A, A, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = NezhaModel(config=A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(A, attention_mask=A, token_type_ids=A )
SCREAMING_SNAKE_CASE : Optional[Any] = model(A, token_type_ids=A )
SCREAMING_SNAKE_CASE : Dict = model(A )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def UpperCamelCase_ ( self, A, A, A, A, A, A, A, A, A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : Union[str, Any] = NezhaModel(A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : str = model(
A, attention_mask=A, token_type_ids=A, encoder_hidden_states=A, encoder_attention_mask=A, )
SCREAMING_SNAKE_CASE : List[str] = model(
A, attention_mask=A, token_type_ids=A, encoder_hidden_states=A, )
SCREAMING_SNAKE_CASE : Tuple = model(A, attention_mask=A, token_type_ids=A )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def UpperCamelCase_ ( self, A, A, A, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = NezhaForMaskedLM(config=A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(A, attention_mask=A, token_type_ids=A, labels=A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self, A, A, A, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = NezhaForNextSentencePrediction(config=A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : Dict = model(
A, attention_mask=A, token_type_ids=A, labels=A, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, 2) )
def UpperCamelCase_ ( self, A, A, A, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = NezhaForPreTraining(config=A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : int = model(
A, attention_mask=A, token_type_ids=A, labels=A, next_sentence_label=A, )
self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2) )
def UpperCamelCase_ ( self, A, A, A, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = NezhaForQuestionAnswering(config=A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : Union[str, Any] = model(
A, attention_mask=A, token_type_ids=A, start_positions=A, end_positions=A, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self, A, A, A, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels
SCREAMING_SNAKE_CASE : Dict = NezhaForSequenceClassification(A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(A, attention_mask=A, token_type_ids=A, labels=A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self, A, A, A, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = NezhaForTokenClassification(config=A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : int = model(A, attention_mask=A, token_type_ids=A, labels=A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self, A, A, A, A, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.num_choices
SCREAMING_SNAKE_CASE : str = NezhaForMultipleChoice(config=A )
model.to(A )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
SCREAMING_SNAKE_CASE : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
SCREAMING_SNAKE_CASE : Tuple = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
SCREAMING_SNAKE_CASE : List[Any] = model(
A, attention_mask=A, token_type_ids=A, labels=A, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : List[Any] = config_and_inputs
SCREAMING_SNAKE_CASE : List[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : str = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
A : List[str] = (
{
'''feature-extraction''': NezhaModel,
'''fill-mask''': NezhaForMaskedLM,
'''question-answering''': NezhaForQuestionAnswering,
'''text-classification''': NezhaForSequenceClassification,
'''token-classification''': NezhaForTokenClassification,
'''zero-shot''': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
A : List[str] = True
def UpperCamelCase_ ( self, A, A, A=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = super()._prepare_for_class(A, A, return_labels=A )
if return_labels:
if model_class in get_values(A ):
SCREAMING_SNAKE_CASE : List[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=A )
SCREAMING_SNAKE_CASE : str = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=A )
return inputs_dict
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = NezhaModelTester(self )
SCREAMING_SNAKE_CASE : Any = ConfigTester(self, config_class=A, hidden_size=37 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
SCREAMING_SNAKE_CASE : int = None
self.model_tester.create_and_check_model_as_decoder(
A, A, A, A, A, A, A, A, A, )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[int] = NezhaModel.from_pretrained(A )
self.assertIsNotNone(A )
@slow
@require_torch_gpu
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : Optional[int] = model_class(config=A )
SCREAMING_SNAKE_CASE : List[str] = self._prepare_for_class(A, A )
SCREAMING_SNAKE_CASE : Tuple = torch.jit.trace(
A, (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(A, os.path.join(A, 'bert.pt' ) )
SCREAMING_SNAKE_CASE : int = torch.jit.load(os.path.join(A, 'bert.pt' ), map_location=A )
loaded(inputs_dict['input_ids'].to(A ), inputs_dict['attention_mask'].to(A ) )
@require_torch
class _a ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = NezhaModel.from_pretrained('sijunhe/nezha-cn-base' )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE : Dict = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Any = model(A, attention_mask=A )[0]
SCREAMING_SNAKE_CASE : Tuple = torch.Size((1, 6, 768) )
self.assertEqual(output.shape, A )
SCREAMING_SNAKE_CASE : Any = torch.tensor([[[0.06_85, 0.24_41, 0.11_02], [0.06_00, 0.19_06, 0.13_49], [0.02_21, 0.08_19, 0.05_86]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4], A, atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = NezhaForMaskedLM.from_pretrained('sijunhe/nezha-cn-base' )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = model(A, attention_mask=A )[0]
SCREAMING_SNAKE_CASE : Any = torch.Size((1, 6, 21_128) )
self.assertEqual(output.shape, A )
SCREAMING_SNAKE_CASE : str = torch.tensor(
[[-2.79_39, -1.79_02, -2.21_89], [-2.85_85, -1.89_08, -2.37_23], [-2.64_99, -1.77_50, -2.25_58]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4], A, atol=1E-4 ) )
| 28
|
'''simple docstring'''
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class _a :
'''simple docstring'''
def __init__( self, A = "cpu", A = "openai/clip-vit-large-patch14" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = device
SCREAMING_SNAKE_CASE : Tuple = CLIPTokenizerFast.from_pretrained(A )
SCREAMING_SNAKE_CASE : int = [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73]
SCREAMING_SNAKE_CASE : str = [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11]
SCREAMING_SNAKE_CASE : Dict = torchvision.transforms.Normalize(self.image_mean, self.image_std )
SCREAMING_SNAKE_CASE : List[str] = torchvision.transforms.Resize(224 )
SCREAMING_SNAKE_CASE : List[Any] = torchvision.transforms.CenterCrop(224 )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.resize(A )
SCREAMING_SNAKE_CASE : Any = self.center_crop(A )
SCREAMING_SNAKE_CASE : str = self.normalize(A )
return images
def __call__( self, A=None, A=None, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.tokenizer(text=A, **A )
SCREAMING_SNAKE_CASE : Tuple = self.preprocess_img(A )
SCREAMING_SNAKE_CASE : List[str] = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class _a ( nn.Module ):
'''simple docstring'''
def __init__( self, A=10, A=0.01, A=None, A=None, A=None, A=None, A=None, A=None, A=False, A=True, A="image", A=True, A=False, A=False, A=False, ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : List[Any] = device if device else get_device()
if vqgan:
SCREAMING_SNAKE_CASE : Optional[Any] = vqgan
else:
SCREAMING_SNAKE_CASE : Tuple = load_vqgan(self.device, conf_path=A, ckpt_path=A )
self.vqgan.eval()
if clip:
SCREAMING_SNAKE_CASE : List[str] = clip
else:
SCREAMING_SNAKE_CASE : Any = CLIPModel.from_pretrained('openai/clip-vit-base-patch32' )
self.clip.to(self.device )
SCREAMING_SNAKE_CASE : Optional[int] = ProcessorGradientFlow(device=self.device )
SCREAMING_SNAKE_CASE : Optional[int] = iterations
SCREAMING_SNAKE_CASE : Tuple = lr
SCREAMING_SNAKE_CASE : Tuple = log
SCREAMING_SNAKE_CASE : str = make_grid
SCREAMING_SNAKE_CASE : Dict = return_val
SCREAMING_SNAKE_CASE : Union[str, Any] = quantize
SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.decoder.z_shape
def UpperCamelCase_ ( self, A=None, A=None, A=5, A=True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = []
if output_path is None:
SCREAMING_SNAKE_CASE : int = './animation.gif'
if input_path is None:
SCREAMING_SNAKE_CASE : Optional[int] = self.save_path
SCREAMING_SNAKE_CASE : Optional[Any] = sorted(glob(input_path + '/*' ) )
if not len(A ):
raise ValueError(
'No images found in save path, aborting (did you pass save_intermediate=True to the generate'
' function?)' )
if len(A ) == 1:
print('Only one image found in save path, (did you pass save_intermediate=True to the generate function?)' )
SCREAMING_SNAKE_CASE : Optional[Any] = total_duration / len(A )
SCREAMING_SNAKE_CASE : int = [frame_duration] * len(A )
if extend_frames:
SCREAMING_SNAKE_CASE : List[str] = 1.5
SCREAMING_SNAKE_CASE : int = 3
for file_name in paths:
if file_name.endswith('.png' ):
images.append(imageio.imread(A ) )
imageio.mimsave(A, A, duration=A )
print(F"gif saved to {output_path}" )
def UpperCamelCase_ ( self, A=None, A=None ):
'''simple docstring'''
if not (path or img):
raise ValueError('Input either path or tensor' )
if img is not None:
raise NotImplementedError
SCREAMING_SNAKE_CASE : str = preprocess(Image.open(A ), target_image_size=256 ).to(self.device )
SCREAMING_SNAKE_CASE : Any = preprocess_vqgan(A )
SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE : Tuple = self.vqgan.encode(A )
return z
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.latent.detach().requires_grad_()
SCREAMING_SNAKE_CASE : Union[str, Any] = base_latent + transform_vector
if self.quantize:
SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.quantize(A )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = trans_latent
return self.vqgan.decode(A )
def UpperCamelCase_ ( self, A, A, A=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.clip_preprocessor(text=A, images=A, return_tensors='pt', padding=A )
SCREAMING_SNAKE_CASE : str = self.clip(**A )
SCREAMING_SNAKE_CASE : Any = clip_outputs.logits_per_image
if weights is not None:
SCREAMING_SNAKE_CASE : List[Any] = similarity_logits * weights
return similarity_logits.sum()
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self._get_clip_similarity(pos_prompts['prompts'], A, weights=(1 / pos_prompts['weights']) )
if neg_prompts:
SCREAMING_SNAKE_CASE : List[Any] = self._get_clip_similarity(neg_prompts['prompts'], A, weights=neg_prompts['weights'] )
else:
SCREAMING_SNAKE_CASE : str = torch.tensor([1], device=self.device )
SCREAMING_SNAKE_CASE : List[Any] = -torch.log(A ) + torch.log(A )
return loss
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = torch.randn_like(self.latent, requires_grad=A, device=self.device )
SCREAMING_SNAKE_CASE : Optional[int] = torch.optim.Adam([vector], lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
SCREAMING_SNAKE_CASE : Union[str, Any] = self._add_vector(A )
SCREAMING_SNAKE_CASE : Dict = loop_post_process(A )
SCREAMING_SNAKE_CASE : List[str] = self._get_CLIP_loss(A, A, A )
print('CLIP loss', A )
if self.log:
wandb.log({'CLIP Loss': clip_loss} )
clip_loss.backward(retain_graph=A )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
wandb.init(reinit=A, project='face-editor' )
wandb.config.update({'Positive Prompts': positive_prompts} )
wandb.config.update({'Negative Prompts': negative_prompts} )
wandb.config.update({'lr': self.lr, 'iterations': self.iterations} )
if image_path:
SCREAMING_SNAKE_CASE : Tuple = Image.open(A )
SCREAMING_SNAKE_CASE : int = image.resize((256, 256) )
wandb.log('Original Image', wandb.Image(A ) )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if not prompts:
return []
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Dict = []
if isinstance(A, A ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [prompt.strip() for prompt in prompts.split('|' )]
for prompt in prompts:
if isinstance(A, (tuple, list) ):
SCREAMING_SNAKE_CASE : List[str] = prompt[0]
SCREAMING_SNAKE_CASE : Any = float(prompt[1] )
elif ":" in prompt:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = prompt.split(':' )
SCREAMING_SNAKE_CASE : Any = float(A )
else:
SCREAMING_SNAKE_CASE : Dict = prompt
SCREAMING_SNAKE_CASE : List[Any] = 1.0
processed_prompts.append(A )
weights.append(A )
return {
"prompts": processed_prompts,
"weights": torch.tensor(A, device=self.device ),
}
def UpperCamelCase_ ( self, A, A=None, A=None, A=True, A=False, A=True, A=True, A=None, ):
'''simple docstring'''
if image_path:
SCREAMING_SNAKE_CASE : int = self._get_latent(A )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.randn(self.latent_dim, device=self.device )
if self.log:
self._init_logging(A, A, A )
assert pos_prompts, "You must provide at least one positive prompt."
SCREAMING_SNAKE_CASE : Dict = self.process_prompts(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.process_prompts(A )
if save_final and save_path is None:
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join('./outputs/', '_'.join(pos_prompts['prompts'] ) )
if not os.path.exists(A ):
os.makedirs(A )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = save_path + '_' + get_timestamp()
os.makedirs(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = save_path
SCREAMING_SNAKE_CASE : List[Any] = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('Original Image' )
show_pil(custom_to_pil(A ) )
SCREAMING_SNAKE_CASE : int = loop_post_process(A )
for iter, transformed_img in enumerate(self._optimize_CLIP(A, A, A ) ):
if show_intermediate:
show_pil(A )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path, F"iter_{iter:03d}.png" ) )
if self.log:
wandb.log({'Image': wandb.Image(A )} )
if show_final:
show_pil(A )
if save_final:
transformed_img.save(os.path.join(self.save_path, F"iter_{iter:03d}_final.png" ) )
| 28
| 1
|
'''simple docstring'''
UpperCamelCase_ = 9.8_0_6_6_5
def lowerCAmelCase__ ( a_ : float , a_ : float , a_ : float = g ) -> float:
if fluid_density <= 0:
raise ValueError('''Impossible fluid density''' )
if volume < 0:
raise ValueError('''Impossible Object volume''' )
if gravity <= 0:
raise ValueError('''Impossible Gravity''' )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 599
|
'''simple docstring'''
from pathlib import Path
import fire
from tqdm import tqdm
def lowerCAmelCase__ ( a_ : Optional[int]="ro" , a_ : List[Any]="en" , a_ : str="wmt16" , a_ : Dict=None ) -> None:
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('''run pip install datasets''' )
UpperCAmelCase__ : Any = f"""{src_lang}-{tgt_lang}"""
print(f"""Converting {dataset}-{pair}""" )
UpperCAmelCase__ : Union[str, Any] = datasets.load_dataset(a_ , a_ )
if save_dir is None:
UpperCAmelCase__ : List[Any] = f"""{dataset}-{pair}"""
UpperCAmelCase__ : Optional[int] = Path(a_ )
save_dir.mkdir(exist_ok=a_ )
for split in ds.keys():
print(f"""Splitting {split} with {ds[split].num_rows} records""" )
# to save to val.source, val.target like summary datasets
UpperCAmelCase__ : Dict = '''val''' if split == '''validation''' else split
UpperCAmelCase__ : Dict = save_dir.joinpath(f"""{fn}.source""" )
UpperCAmelCase__ : List[str] = save_dir.joinpath(f"""{fn}.target""" )
UpperCAmelCase__ : List[str] = src_path.open('''w+''' )
UpperCAmelCase__ : Optional[Any] = tgt_path.open('''w+''' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
UpperCAmelCase__ : Any = x['''translation''']
src_fp.write(ex[src_lang] + '''\n''' )
tgt_fp.write(ex[tgt_lang] + '''\n''' )
print(f"""Saved {dataset} dataset to {save_dir}""" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 599
| 1
|
from __future__ import annotations
from math import ceil, floor, sqrt
def _UpperCAmelCase ( a : List[str] = 200_0000 ):
snake_case__ = [0]
snake_case__ = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
snake_case__ = 0
# the area corresponding to the grid that gives the product closest to target
snake_case__ = 0
# an estimate of b, using the quadratic formula
snake_case__ = 42
# the largest integer less than b_estimate
snake_case__ = 42
# the largest integer less than b_estimate
snake_case__ = 42
# the triangle number corresponding to b_floor
snake_case__ = 42
# the triangle number corresponding to b_ceil
snake_case__ = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
snake_case__ = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
snake_case__ = floor(__lowercase )
snake_case__ = ceil(__lowercase )
snake_case__ = triangle_numbers[b_floor]
snake_case__ = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
snake_case__ = triangle_b_first_guess * triangle_a
snake_case__ = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
snake_case__ = triangle_b_second_guess * triangle_a
snake_case__ = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F'''{solution() = }''')
| 654
|
"""simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int=13 , SCREAMING_SNAKE_CASE_ : Any=7 , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : str=99 , SCREAMING_SNAKE_CASE_ : List[Any]=16 , SCREAMING_SNAKE_CASE_ : List[Any]=36 , SCREAMING_SNAKE_CASE_ : List[Any]=6 , SCREAMING_SNAKE_CASE_ : str=6 , SCREAMING_SNAKE_CASE_ : List[str]=6 , SCREAMING_SNAKE_CASE_ : List[Any]=37 , SCREAMING_SNAKE_CASE_ : List[str]="gelu" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : Any=512 , SCREAMING_SNAKE_CASE_ : Optional[Any]=16 , SCREAMING_SNAKE_CASE_ : Tuple=2 , SCREAMING_SNAKE_CASE_ : Optional[int]=0.0_2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=3 , SCREAMING_SNAKE_CASE_ : Dict=4 , SCREAMING_SNAKE_CASE_ : List[Any]=None , ):
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = seq_length
lowerCamelCase__ = is_training
lowerCamelCase__ = use_input_mask
lowerCamelCase__ = use_token_type_ids
lowerCamelCase__ = use_labels
lowerCamelCase__ = vocab_size
lowerCamelCase__ = embedding_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_hidden_groups
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = type_sequence_label_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = num_labels
lowerCamelCase__ = num_choices
lowerCamelCase__ = scope
def __UpperCAmelCase ( self : Any ):
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ = None
if self.use_input_mask:
lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ = None
if self.use_token_type_ids:
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
if self.use_labels:
lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self : List[str] ):
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def __UpperCAmelCase ( self : int , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any ):
lowerCamelCase__ = AlbertModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __UpperCAmelCase ( self : Any , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple ):
lowerCamelCase__ = AlbertForPreTraining(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase__ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , sentence_order_label=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def __UpperCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str ):
lowerCamelCase__ = AlbertForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
lowerCamelCase__ = AlbertForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase__ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str ):
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = AlbertForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Tuple ):
lowerCamelCase__ = self.num_labels
lowerCamelCase__ = AlbertForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] ):
lowerCamelCase__ = self.num_choices
lowerCamelCase__ = AlbertForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase__ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCAmelCase ( self : Union[str, Any] ):
lowerCamelCase__ = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = config_and_inputs
lowerCamelCase__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
snake_case = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
snake_case = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case = True
def __UpperCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict=False ):
lowerCamelCase__ = super()._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
if return_labels:
if model_class in get_values(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )
return inputs_dict
def __UpperCAmelCase ( self : Union[str, Any] ):
lowerCamelCase__ = AlbertModelTester(self )
lowerCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def __UpperCAmelCase ( self : Tuple ):
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : str ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : Union[str, Any] ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : Any ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : str ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : List[Any] ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : Optional[int] ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : Any ):
lowerCamelCase__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase__ = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
@slow
def __UpperCAmelCase ( self : Optional[Any] ):
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ = AlbertModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self : List[str] ):
lowerCamelCase__ = AlbertModel.from_pretrained("""albert-base-v2""" )
lowerCamelCase__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowerCamelCase__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )[0]
lowerCamelCase__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = torch.tensor(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 129
| 0
|
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class UpperCAmelCase( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=16 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=3 , lowerCamelCase=4 , lowerCamelCase=None , ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = parent
lowercase__ : List[str] = batch_size
lowercase__ : Optional[Any] = seq_length
lowercase__ : int = is_training
lowercase__ : Dict = use_input_mask
lowercase__ : Optional[int] = use_token_type_ids
lowercase__ : Dict = use_labels
lowercase__ : Tuple = vocab_size
lowercase__ : int = hidden_size
lowercase__ : Optional[int] = num_hidden_layers
lowercase__ : str = num_attention_heads
lowercase__ : Any = intermediate_size
lowercase__ : Optional[int] = hidden_act
lowercase__ : int = hidden_dropout_prob
lowercase__ : int = attention_probs_dropout_prob
lowercase__ : int = max_position_embeddings
lowercase__ : str = type_vocab_size
lowercase__ : Optional[Any] = type_sequence_label_size
lowercase__ : Optional[int] = initializer_range
lowercase__ : List[str] = num_labels
lowercase__ : Union[str, Any] = num_choices
lowercase__ : Dict = scope
def __a ( self ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : str = None
if self.use_input_mask:
lowercase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : List[Any] = None
lowercase__ : Tuple = None
lowercase__ : Any = None
if self.use_labels:
lowercase__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ : int = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ : int = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self ) -> Dict:
"""simple docstring"""
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __a ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[str]:
"""simple docstring"""
lowercase__ : Union[str, Any] = DistilBertModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ : str = model(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ : str = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Tuple:
"""simple docstring"""
lowercase__ : int = DistilBertForMaskedLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ : Dict = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> str:
"""simple docstring"""
lowercase__ : Union[str, Any] = DistilBertForQuestionAnswering(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ : Optional[Any] = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , start_positions=UpperCamelCase_ , end_positions=UpperCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Dict:
"""simple docstring"""
lowercase__ : Tuple = self.num_labels
lowercase__ : Dict = DistilBertForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ : Dict = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[Any]:
"""simple docstring"""
lowercase__ : List[str] = self.num_labels
lowercase__ : Union[str, Any] = DistilBertForTokenClassification(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ : Any = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[str]:
"""simple docstring"""
lowercase__ : Any = self.num_choices
lowercase__ : List[str] = DistilBertForMultipleChoice(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : str = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self ) -> List[str]:
"""simple docstring"""
lowercase__ : int = self.prepare_config_and_inputs()
((lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__)) : List[str] = config_and_inputs
lowercase__ : Dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
a : str = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
a : Optional[Any] = (
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a : Optional[Any] = True
a : Dict = True
a : Dict = True
a : List[str] = True
def __a ( self ) -> List[Any]:
"""simple docstring"""
lowercase__ : Any = DistilBertModelTester(self )
lowercase__ : List[str] = ConfigTester(self , config_class=UpperCamelCase_ , dim=37 )
def __a ( self ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def __a ( self ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*UpperCamelCase_ )
def __a ( self ) -> Dict:
"""simple docstring"""
lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCamelCase_ )
def __a ( self ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCamelCase_ )
def __a ( self ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCamelCase_ )
def __a ( self ) -> List[Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCamelCase_ )
def __a ( self ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCamelCase_ )
@slow
def __a ( self ) -> Optional[Any]:
"""simple docstring"""
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : str = DistilBertModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@slow
@require_torch_gpu
def __a ( self ) -> int:
"""simple docstring"""
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
lowercase__ : Optional[Any] = True
lowercase__ : Any = model_class(config=UpperCamelCase_ )
lowercase__ : Tuple = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ : List[Any] = torch.jit.trace(
UpperCamelCase_ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(UpperCamelCase_ , os.path.join(UpperCamelCase_ , "traced_model.pt" ) )
lowercase__ : Tuple = torch.jit.load(os.path.join(UpperCamelCase_ , "traced_model.pt" ) , map_location=UpperCamelCase_ )
loaded(inputs_dict["input_ids"].to(UpperCamelCase_ ) , inputs_dict["attention_mask"].to(UpperCamelCase_ ) )
@require_torch
class UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
@slow
def __a ( self ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : int = DistilBertModel.from_pretrained("distilbert-base-uncased" )
lowercase__ : int = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowercase__ : List[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase__ : int = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )[0]
lowercase__ : List[Any] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , UpperCamelCase_ )
lowercase__ : List[str] = torch.tensor(
[[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase_ , atol=1E-4 ) )
| 714
|
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_="attention" ) -> int:
lowercase__ : Tuple = params[F"""{prefix}/layers_{i}/{layer_name}/key/kernel"""]
lowercase__ : str = params[F"""{prefix}/layers_{i}/{layer_name}/out/kernel"""]
lowercase__ : List[Any] = params[F"""{prefix}/layers_{i}/{layer_name}/query/kernel"""]
lowercase__ : List[str] = params[F"""{prefix}/layers_{i}/{layer_name}/value/kernel"""]
return k, o, q, v
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=False ) -> Any:
if split_mlp_wi:
lowercase__ : int = params[F"""{prefix}/layers_{i}/mlp/wi_0/kernel"""]
lowercase__ : List[Any] = params[F"""{prefix}/layers_{i}/mlp/wi_1/kernel"""]
lowercase__ : Optional[int] = (wi_a, wi_a)
else:
lowercase__ : Optional[int] = params[F"""{prefix}/layers_{i}/mlp/wi/kernel"""]
lowercase__ : Tuple = params[F"""{prefix}/layers_{i}/mlp/wo/kernel"""]
return wi, wo
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> int:
return params[F"""{prefix}/layers_{i}/{layer_name}/scale"""]
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,*, SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> Any:
lowercase__ : List[Any] = traverse_util.flatten_dict(variables["target"] )
lowercase__ : Any = {"/".join(SCREAMING_SNAKE_CASE_ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowercase__ : Optional[int] = "encoder/layers_0/mlp/wi_0/kernel" in old
print("Split MLP:" ,SCREAMING_SNAKE_CASE_ )
lowercase__ : Optional[int] = collections.OrderedDict()
# Shared embeddings.
lowercase__ : List[Any] = old["token_embedder/embedding"]
# Encoder.
for i in range(SCREAMING_SNAKE_CASE_ ):
# Block i, layer 0 (Self Attention).
lowercase__ : Any = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,"encoder" ,"pre_attention_layer_norm" )
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Optional[Any] = tax_attention_lookup(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,"encoder" ,"attention" )
lowercase__ : Tuple = layer_norm
lowercase__ : Optional[Any] = k.T
lowercase__ : Optional[int] = o.T
lowercase__ : Optional[int] = q.T
lowercase__ : str = v.T
# Block i, layer 1 (MLP).
lowercase__ : Union[str, Any] = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,"encoder" ,"pre_mlp_layer_norm" )
lowercase__ , lowercase__ : str = tax_mlp_lookup(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,"encoder" ,SCREAMING_SNAKE_CASE_ )
lowercase__ : List[str] = layer_norm
if split_mlp_wi:
lowercase__ : Dict = wi[0].T
lowercase__ : Optional[int] = wi[1].T
else:
lowercase__ : List[Any] = wi.T
lowercase__ : Optional[Any] = wo.T
lowercase__ : Optional[int] = old[
"encoder/relpos_bias/rel_embedding"
].T
lowercase__ : Tuple = old["encoder/encoder_norm/scale"]
if not is_encoder_only:
# Decoder.
for i in range(SCREAMING_SNAKE_CASE_ ):
# Block i, layer 0 (Self Attention).
lowercase__ : Optional[int] = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,"decoder" ,"pre_self_attention_layer_norm" )
lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[Any] = tax_attention_lookup(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,"decoder" ,"self_attention" )
lowercase__ : List[str] = layer_norm
lowercase__ : str = k.T
lowercase__ : int = o.T
lowercase__ : Dict = q.T
lowercase__ : Optional[int] = v.T
# Block i, layer 1 (Cross Attention).
lowercase__ : Optional[Any] = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,"decoder" ,"pre_cross_attention_layer_norm" )
lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[Any] = tax_attention_lookup(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,"decoder" ,"encoder_decoder_attention" )
lowercase__ : Union[str, Any] = layer_norm
lowercase__ : List[Any] = k.T
lowercase__ : str = o.T
lowercase__ : str = q.T
lowercase__ : Dict = v.T
# Block i, layer 2 (MLP).
lowercase__ : Any = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,"decoder" ,"pre_mlp_layer_norm" )
lowercase__ , lowercase__ : Optional[Any] = tax_mlp_lookup(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,"decoder" ,SCREAMING_SNAKE_CASE_ )
lowercase__ : Optional[Any] = layer_norm
if split_mlp_wi:
lowercase__ : int = wi[0].T
lowercase__ : Dict = wi[1].T
else:
lowercase__ : Any = wi.T
lowercase__ : Union[str, Any] = wo.T
lowercase__ : Tuple = old["decoder/decoder_norm/scale"]
lowercase__ : List[Any] = old[
"decoder/relpos_bias/rel_embedding"
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowercase__ : Union[str, Any] = old["decoder/logits_dense/kernel"].T
return new
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> List[str]:
lowercase__ : int = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowercase__ : Tuple = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowercase__ : Optional[int] = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
lowercase__ : str = state_dict["shared.weight"]
return state_dict
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> Tuple:
lowercase__ : Optional[Any] = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE_ )
lowercase__ : List[str] = convert_tax_to_pytorch(SCREAMING_SNAKE_CASE_ ,num_layers=config.num_layers ,is_encoder_only=SCREAMING_SNAKE_CASE_ )
lowercase__ : Optional[int] = make_state_dict(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ ,strict=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = False ) -> Tuple:
lowercase__ : Optional[int] = TaConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
print(F"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowercase__ : List[Any] = TaEncoderModel(SCREAMING_SNAKE_CASE_ )
else:
lowercase__ : Union[str, Any] = TaForConditionalGeneration(SCREAMING_SNAKE_CASE_ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Verify that we can load the checkpoint.
model.from_pretrained(SCREAMING_SNAKE_CASE_ )
print("Done" )
if __name__ == "__main__":
__a : Tuple = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
__a : Any = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 298
| 0
|
"""simple docstring"""
from __future__ import annotations
class lowercase__ :
'''simple docstring'''
def __init__( self : List[str] , _UpperCAmelCase : int ) -> None:
'''simple docstring'''
UpperCAmelCase_ = data
UpperCAmelCase_ = None
UpperCAmelCase_ = None
def a__ ( lowerCAmelCase__ ): # In Order traversal of the tree
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def a__ ( lowerCAmelCase__ ):
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def a__ ( lowerCAmelCase__ ):
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def a__ ( ): # Main function for testing.
UpperCAmelCase_ = Node(1 )
UpperCAmelCase_ = Node(2 )
UpperCAmelCase_ = Node(3 )
UpperCAmelCase_ = Node(4 )
UpperCAmelCase_ = Node(5 )
UpperCAmelCase_ = Node(6 )
UpperCAmelCase_ = Node(7 )
UpperCAmelCase_ = Node(8 )
UpperCAmelCase_ = Node(9 )
print(is_full_binary_tree(lowerCAmelCase__ ) )
print(depth_of_tree(lowerCAmelCase__ ) )
print("Tree is: " )
display(lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 82
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class A__ ( __SCREAMING_SNAKE_CASE ):
lowerCamelCase__ : Any ="openai/whisper-base"
lowerCamelCase__ : Any =(
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
lowerCamelCase__ : Union[str, Any] ="transcriber"
lowerCamelCase__ : List[str] =WhisperProcessor
lowerCamelCase__ : Tuple =WhisperForConditionalGeneration
lowerCamelCase__ : Tuple =["audio"]
lowerCamelCase__ : List[str] =["text"]
def lowercase ( self , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
return self.pre_processor(lowerCamelCase , return_tensors='''pt''' ).input_features
def lowercase ( self , lowerCamelCase ) -> Dict:
"""simple docstring"""
return self.model.generate(inputs=lowerCamelCase )
def lowercase ( self , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
return self.pre_processor.batch_decode(lowerCamelCase , skip_special_tokens=lowerCamelCase )[0]
| 154
| 0
|
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
a_ = logging.get_logger(__name__)
class UpperCAmelCase__ ( snake_case ):
"""simple docstring"""
def __init__( self: Optional[Any] , __lowerCAmelCase: Union[List[ControlNetModel], Tuple[ControlNetModel]] ) -> List[str]:
'''simple docstring'''
super().__init__()
__UpperCAmelCase = nn.ModuleList(__lowerCAmelCase )
def _UpperCAmelCase ( self: Any , __lowerCAmelCase: torch.FloatTensor , __lowerCAmelCase: Union[torch.Tensor, float, int] , __lowerCAmelCase: torch.Tensor , __lowerCAmelCase: List[torch.tensor] , __lowerCAmelCase: List[float] , __lowerCAmelCase: Optional[torch.Tensor] = None , __lowerCAmelCase: Optional[torch.Tensor] = None , __lowerCAmelCase: Optional[torch.Tensor] = None , __lowerCAmelCase: Optional[Dict[str, Any]] = None , __lowerCAmelCase: bool = False , __lowerCAmelCase: bool = True , ) -> Union[ControlNetOutput, Tuple]:
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(__lowerCAmelCase , __lowerCAmelCase , self.nets ) ):
__UpperCAmelCase , __UpperCAmelCase = controlnet(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
# merge samples
if i == 0:
__UpperCAmelCase , __UpperCAmelCase = down_samples, mid_sample
else:
__UpperCAmelCase = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(__lowerCAmelCase , __lowerCAmelCase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def _UpperCAmelCase ( self: List[Any] , __lowerCAmelCase: Union[str, os.PathLike] , __lowerCAmelCase: bool = True , __lowerCAmelCase: Callable = None , __lowerCAmelCase: bool = False , __lowerCAmelCase: Optional[str] = None , ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = 0
__UpperCAmelCase = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
__lowerCAmelCase , is_main_process=__lowerCAmelCase , save_function=__lowerCAmelCase , safe_serialization=__lowerCAmelCase , variant=__lowerCAmelCase , )
idx += 1
__UpperCAmelCase = model_path_to_save + F'''_{idx}'''
@classmethod
def _UpperCAmelCase ( cls: Any , __lowerCAmelCase: Optional[Union[str, os.PathLike]] , **__lowerCAmelCase: Dict ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = 0
__UpperCAmelCase = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
__UpperCAmelCase = pretrained_model_path
while os.path.isdir(__lowerCAmelCase ):
__UpperCAmelCase = ControlNetModel.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
controlnets.append(__lowerCAmelCase )
idx += 1
__UpperCAmelCase = pretrained_model_path + F'''_{idx}'''
logger.info(F'''{len(__lowerCAmelCase )} controlnets loaded from {pretrained_model_path}.''' )
if len(__lowerCAmelCase ) == 0:
raise ValueError(
F'''No ControlNets found under {os.path.dirname(__lowerCAmelCase )}. Expected at least {pretrained_model_path + "_0"}.''' )
return cls(__lowerCAmelCase )
| 286
|
from ..utils import DummyObject, requires_backends
class UpperCAmelCase__ ( metaclass=snake_case ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = ['transformers', 'torch', 'note_seq']
def __init__( self: List[str] , *__lowerCAmelCase: Optional[int] , **__lowerCAmelCase: List[Any] ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["transformers", "torch", "note_seq"] )
@classmethod
def _UpperCAmelCase ( cls: Optional[int] , *__lowerCAmelCase: Any , **__lowerCAmelCase: List[str] ) -> Dict:
'''simple docstring'''
requires_backends(cls , ["transformers", "torch", "note_seq"] )
@classmethod
def _UpperCAmelCase ( cls: Union[str, Any] , *__lowerCAmelCase: Optional[Any] , **__lowerCAmelCase: Optional[Any] ) -> Any:
'''simple docstring'''
requires_backends(cls , ["transformers", "torch", "note_seq"] )
| 286
| 1
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class a_ ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self ):
_lowercase = XLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
_lowercase = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] )
# The dog is cute and lives in the garden house
_lowercase = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
_lowercase = torch.tensor(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_lowercase = model(__UpperCamelCase )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , __UpperCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __UpperCamelCase , atol=1E-3 ) )
@slow
def UpperCamelCase_ ( self ):
_lowercase = XLMRobertaModel.from_pretrained("""xlm-roberta-large""" )
_lowercase = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] )
# The dog is cute and lives in the garden house
_lowercase = torch.Size((1, 12, 1_024) ) # batch_size, sequence_length, embedding_vector_dim
_lowercase = torch.tensor(
[[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_lowercase = model(__UpperCamelCase )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , __UpperCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __UpperCamelCase , atol=1E-3 ) )
| 287
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class a_ ( _a ):
a : Union[List[PIL.Image.Image], np.ndarray]
a : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.26.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version('''>=''', '''0.0.12''')
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class a_ ( _a ):
a : np.ndarray
a : List[bool]
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 287
| 1
|
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
# TODO Update this
_lowerCamelCase : Optional[int] = {
"""facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class lowerCamelCase__ ( __snake_case ):
__UpperCAmelCase = """esm"""
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=768 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=3_072 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=1_026 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1e-12 , lowerCAmelCase__="absolute" , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase__ , mask_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCamelCase :List[str] =vocab_size
_UpperCamelCase :str =hidden_size
_UpperCamelCase :Union[str, Any] =num_hidden_layers
_UpperCamelCase :Dict =num_attention_heads
_UpperCamelCase :Tuple =intermediate_size
_UpperCamelCase :Optional[Any] =hidden_dropout_prob
_UpperCamelCase :List[Any] =attention_probs_dropout_prob
_UpperCamelCase :int =max_position_embeddings
_UpperCamelCase :str =initializer_range
_UpperCamelCase :str =layer_norm_eps
_UpperCamelCase :List[str] =position_embedding_type
_UpperCamelCase :Dict =use_cache
_UpperCamelCase :str =emb_layer_norm_before
_UpperCamelCase :Union[str, Any] =token_dropout
_UpperCamelCase :List[Any] =is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("""No esmfold_config supplied for folding model, using default values.""" )
_UpperCamelCase :int =EsmFoldConfig()
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase :Union[str, Any] =EsmFoldConfig(**lowerCAmelCase__ )
_UpperCamelCase :List[str] =esmfold_config
if vocab_list is None:
logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" )
_UpperCamelCase :Any =get_default_vocab_list()
else:
_UpperCamelCase :Tuple =vocab_list
else:
_UpperCamelCase :List[str] =None
_UpperCamelCase :Optional[Any] =None
if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , lowerCAmelCase__ ):
raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" )
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase :str =super().to_dict()
if isinstance(self.esmfold_config , lowerCAmelCase__ ):
_UpperCamelCase :Dict =self.esmfold_config.to_dict()
return output
@dataclass
class lowerCamelCase__ :
__UpperCAmelCase = None
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = 0
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = 128
__UpperCAmelCase = None
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
if self.trunk is None:
_UpperCamelCase :Optional[Any] =TrunkConfig()
elif isinstance(self.trunk , lowerCAmelCase__ ):
_UpperCamelCase :List[str] =TrunkConfig(**self.trunk )
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase :Tuple =asdict(self )
_UpperCamelCase :Optional[Any] =self.trunk.to_dict()
return output
@dataclass
class lowerCamelCase__ :
__UpperCAmelCase = 48
__UpperCAmelCase = 1_024
__UpperCAmelCase = 128
__UpperCAmelCase = 32
__UpperCAmelCase = 32
__UpperCAmelCase = 32
__UpperCAmelCase = 0
__UpperCAmelCase = 0
__UpperCAmelCase = False
__UpperCAmelCase = 4
__UpperCAmelCase = 128
__UpperCAmelCase = None
def _UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
if self.structure_module is None:
_UpperCamelCase :Union[str, Any] =StructureModuleConfig()
elif isinstance(self.structure_module , lowerCAmelCase__ ):
_UpperCamelCase :Any =StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"""`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"""
f''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"""`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"""
f''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
_UpperCamelCase :Any =self.sequence_state_dim // self.sequence_head_width
_UpperCamelCase :Tuple =self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"""`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"""
f''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"""`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"""
f''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(f'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
_UpperCamelCase :str =asdict(self )
_UpperCamelCase :Tuple =self.structure_module.to_dict()
return output
@dataclass
class lowerCamelCase__ :
__UpperCAmelCase = 384
__UpperCAmelCase = 128
__UpperCAmelCase = 16
__UpperCAmelCase = 128
__UpperCAmelCase = 12
__UpperCAmelCase = 4
__UpperCAmelCase = 8
__UpperCAmelCase = 0.1
__UpperCAmelCase = 8
__UpperCAmelCase = 1
__UpperCAmelCase = 2
__UpperCAmelCase = 7
__UpperCAmelCase = 10
__UpperCAmelCase = 1e-8
__UpperCAmelCase = 1e5
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
return asdict(self )
def _lowerCAmelCase ( ) -> List[str]:
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 512
|
'''simple docstring'''
def _lowerCAmelCase ( __a , __a ) -> float:
'''simple docstring'''
def get_matched_characters(__a , __a ) -> str:
_UpperCamelCase :Any =[]
_UpperCamelCase :List[str] =min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
_UpperCamelCase :int =int(max(0 , i - limit ) )
_UpperCamelCase :List[Any] =int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(__a )
_UpperCamelCase :Optional[int] =F'''{_stra[0:_stra.index(__a )]} {_stra[_stra.index(__a ) + 1:]}'''
return "".join(__a )
# matching characters
_UpperCamelCase :str =get_matched_characters(__a , __a )
_UpperCamelCase :List[Any] =get_matched_characters(__a , __a )
_UpperCamelCase :List[str] =len(__a )
# transposition
_UpperCamelCase :Optional[Any] =(
len([(ca, ca) for ca, ca in zip(__a , __a ) if ca != ca] ) // 2
)
if not match_count:
_UpperCamelCase :List[str] =0.0
else:
_UpperCamelCase :Union[str, Any] =(
1
/ 3
* (
match_count / len(__a )
+ match_count / len(__a )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
_UpperCamelCase :int =0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("""hello""", """world"""))
| 512
| 1
|
'''simple docstring'''
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
UpperCAmelCase__ : Dict = argparse.ArgumentParser()
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--txt2img_unclip",
default="kakaobrain/karlo-v1-alpha",
type=str,
required=False,
help="The pretrained txt2img unclip.",
)
UpperCAmelCase__ : str = parser.parse_args()
UpperCAmelCase__ : List[Any] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
UpperCAmelCase__ : str = CLIPImageProcessor()
UpperCAmelCase__ : List[Any] = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14")
UpperCAmelCase__ : List[Any] = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 48
|
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class lowerCamelCase :
def __init__( self , lowercase__ , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__="resnet50" , lowercase__=3 , lowercase__=3_2 , lowercase__=3 , lowercase__=True , lowercase__=True , ):
__UpperCAmelCase : List[str] = parent
__UpperCAmelCase : Dict = out_indices if out_indices is not None else [4]
__UpperCAmelCase : List[Any] = stage_names
__UpperCAmelCase : int = out_features
__UpperCAmelCase : Union[str, Any] = backbone
__UpperCAmelCase : Optional[Any] = batch_size
__UpperCAmelCase : Optional[Any] = image_size
__UpperCAmelCase : Optional[int] = num_channels
__UpperCAmelCase : List[Any] = use_pretrained_backbone
__UpperCAmelCase : Dict = is_training
def A( self):
__UpperCAmelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__UpperCAmelCase : Tuple = self.get_config()
return config, pixel_values
def A( self):
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def A( self , lowercase__ , lowercase__):
__UpperCAmelCase : Tuple = TimmBackbone(config=lowercase__)
model.to(lowercase__)
model.eval()
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(lowercase__)
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 1_4, 1_4) , )
def A( self):
__UpperCAmelCase : Dict = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase : List[Any] = config_and_inputs
__UpperCAmelCase : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : Dict = (TimmBackbone,) if is_torch_available() else ()
_lowerCAmelCase : str = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
_lowerCAmelCase : List[str] = False
_lowerCAmelCase : str = False
_lowerCAmelCase : List[Any] = False
_lowerCAmelCase : List[str] = False
def A( self):
__UpperCAmelCase : List[Any] = TimmBackboneModelTester(self)
__UpperCAmelCase : int = ConfigTester(self , config_class=lowercase__ , has_text_modality=lowercase__)
def A( self):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A( self):
__UpperCAmelCase : int = '''resnet18'''
__UpperCAmelCase : List[str] = '''microsoft/resnet-18'''
__UpperCAmelCase : Any = AutoBackbone.from_pretrained(lowercase__ , use_timm_backbone=lowercase__)
__UpperCAmelCase : Union[str, Any] = AutoBackbone.from_pretrained(lowercase__)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(len(timm_model.stage_names) , len(transformers_model.stage_names))
self.assertEqual(timm_model.channels , transformers_model.channels)
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,))
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names) - 1])
__UpperCAmelCase : Union[str, Any] = AutoBackbone.from_pretrained(lowercase__ , use_timm_backbone=lowercase__ , out_indices=[1, 2, 3])
__UpperCAmelCase : Any = AutoBackbone.from_pretrained(lowercase__ , out_indices=[1, 2, 3])
self.assertEqual(timm_model.out_indices , transformers_model.out_indices)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(timm_model.channels , transformers_model.channels)
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''')
def A( self):
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''')
def A( self):
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''')
def A( self):
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''')
def A( self):
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''')
def A( self):
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''')
def A( self):
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''')
def A( self):
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''')
def A( self):
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''')
def A( self):
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''')
def A( self):
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''')
def A( self):
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''')
def A( self):
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''')
def A( self):
pass
@unittest.skip('''Safetensors is not supported by timm.''')
def A( self):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''')
def A( self):
pass
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Any = model_class(lowercase__)
__UpperCAmelCase : Optional[int] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : Optional[int] = [*signature.parameters.keys()]
__UpperCAmelCase : Dict = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase__)
def A( self):
__UpperCAmelCase , __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : Optional[int] = self.has_attentions
# no need to test all models as different heads yield the same functionality
__UpperCAmelCase : Optional[Any] = self.all_model_classes[0]
__UpperCAmelCase : Optional[int] = model_class(lowercase__)
model.to(lowercase__)
__UpperCAmelCase : Tuple = self._prepare_for_class(lowercase__ , lowercase__)
__UpperCAmelCase : Optional[int] = model(**lowercase__)
__UpperCAmelCase : List[str] = outputs[0][-1]
# Encoder-/Decoder-only models
__UpperCAmelCase : Dict = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__UpperCAmelCase : Optional[int] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowercase__)
self.assertIsNotNone(hidden_states.grad)
if self.has_attentions:
self.assertIsNotNone(attentions.grad)
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Optional[Any] = model_class(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : Union[str, Any] = model(**lowercase__)
self.assertEqual(len(result.feature_maps) , len(config.out_indices))
self.assertEqual(len(model.channels) , len(config.out_indices))
# Check output of last stage is taken if out_features=None, out_indices=None
__UpperCAmelCase : List[str] = copy.deepcopy(lowercase__)
__UpperCAmelCase : str = None
__UpperCAmelCase : Optional[int] = model_class(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : Optional[Any] = model(**lowercase__)
self.assertEqual(len(result.feature_maps) , 1)
self.assertEqual(len(model.channels) , 1)
# Check backbone can be initialized with fresh weights
__UpperCAmelCase : Tuple = copy.deepcopy(lowercase__)
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : Optional[int] = model_class(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : List[Any] = model(**lowercase__)
| 462
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a: int = logging.get_logger(__name__)
__a: int = {
"""facebook/timesformer""": """https://huggingface.co/facebook/timesformer/resolve/main/config.json""",
}
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = "timesformer"
def __init__( self , __lowerCAmelCase=224 , __lowerCAmelCase=16 , __lowerCAmelCase=3 , __lowerCAmelCase=8 , __lowerCAmelCase=768 , __lowerCAmelCase=12 , __lowerCAmelCase=12 , __lowerCAmelCase=3072 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=1E-6 , __lowerCAmelCase=True , __lowerCAmelCase="divided_space_time" , __lowerCAmelCase=0 , **__lowerCAmelCase , ) -> str:
super().__init__(**__lowerCAmelCase )
lowercase__ : Optional[Any] = image_size
lowercase__ : Optional[int] = patch_size
lowercase__ : Optional[int] = num_channels
lowercase__ : int = num_frames
lowercase__ : str = hidden_size
lowercase__ : Any = num_hidden_layers
lowercase__ : str = num_attention_heads
lowercase__ : Optional[Any] = intermediate_size
lowercase__ : List[str] = hidden_act
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : Union[str, Any] = initializer_range
lowercase__ : Optional[Any] = layer_norm_eps
lowercase__ : Any = qkv_bias
lowercase__ : Optional[int] = attention_type
lowercase__ : Optional[Any] = drop_path_rate
| 428
|
'''simple docstring'''
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="attention" ):
lowercase__ : List[str] = params[F"""{prefix}/layers_{i}/{layer_name}/key/kernel"""]
lowercase__ : List[str] = params[F"""{prefix}/layers_{i}/{layer_name}/out/kernel"""]
lowercase__ : Union[str, Any] = params[F"""{prefix}/layers_{i}/{layer_name}/query/kernel"""]
lowercase__ : Tuple = params[F"""{prefix}/layers_{i}/{layer_name}/value/kernel"""]
return k, o, q, v
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ):
if split_mlp_wi:
lowercase__ : str = params[F"""{prefix}/layers_{i}/mlp/wi_0/kernel"""]
lowercase__ : List[Any] = params[F"""{prefix}/layers_{i}/mlp/wi_1/kernel"""]
lowercase__ : str = (wi_a, wi_a)
else:
lowercase__ : str = params[F"""{prefix}/layers_{i}/mlp/wi/kernel"""]
lowercase__ : Any = params[F"""{prefix}/layers_{i}/mlp/wo/kernel"""]
return wi, wo
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
return params[F"""{prefix}/layers_{i}/{layer_name}/scale"""]
def __UpperCamelCase ( UpperCAmelCase , *, UpperCAmelCase , UpperCAmelCase ):
lowercase__ : int = traverse_util.flatten_dict(variables['''target'''] )
lowercase__ : Union[str, Any] = {'''/'''.join(UpperCAmelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowercase__ : Any = '''encoder/layers_0/mlp/wi_0/kernel''' in old
print('''Split MLP:''' , UpperCAmelCase )
lowercase__ : List[Any] = collections.OrderedDict()
# Shared embeddings.
lowercase__ : Union[str, Any] = old['''token_embedder/embedding''']
# Encoder.
for i in range(UpperCAmelCase ):
# Block i, layer 0 (Self Attention).
lowercase__ : Any = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , '''encoder''' , '''pre_attention_layer_norm''' )
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Optional[int] = tax_attention_lookup(UpperCAmelCase , UpperCAmelCase , '''encoder''' , '''attention''' )
lowercase__ : Optional[Any] = layer_norm
lowercase__ : Union[str, Any] = k.T
lowercase__ : List[str] = o.T
lowercase__ : Tuple = q.T
lowercase__ : Dict = v.T
# Block i, layer 1 (MLP).
lowercase__ : Optional[int] = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , '''encoder''' , '''pre_mlp_layer_norm''' )
lowercase__ , lowercase__ : Optional[int] = tax_mlp_lookup(UpperCAmelCase , UpperCAmelCase , '''encoder''' , UpperCAmelCase )
lowercase__ : int = layer_norm
if split_mlp_wi:
lowercase__ : Optional[int] = wi[0].T
lowercase__ : str = wi[1].T
else:
lowercase__ : Optional[int] = wi.T
lowercase__ : Optional[int] = wo.T
lowercase__ : Dict = old[
'''encoder/relpos_bias/rel_embedding'''
].T
lowercase__ : str = old['''encoder/encoder_norm/scale''']
if not is_encoder_only:
# Decoder.
for i in range(UpperCAmelCase ):
# Block i, layer 0 (Self Attention).
lowercase__ : List[str] = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , '''decoder''' , '''pre_self_attention_layer_norm''' )
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Any = tax_attention_lookup(UpperCAmelCase , UpperCAmelCase , '''decoder''' , '''self_attention''' )
lowercase__ : Dict = layer_norm
lowercase__ : int = k.T
lowercase__ : Any = o.T
lowercase__ : Tuple = q.T
lowercase__ : Optional[Any] = v.T
# Block i, layer 1 (Cross Attention).
lowercase__ : Optional[Any] = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , '''decoder''' , '''pre_cross_attention_layer_norm''' )
lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[str] = tax_attention_lookup(UpperCAmelCase , UpperCAmelCase , '''decoder''' , '''encoder_decoder_attention''' )
lowercase__ : Tuple = layer_norm
lowercase__ : List[str] = k.T
lowercase__ : Any = o.T
lowercase__ : Optional[int] = q.T
lowercase__ : Optional[Any] = v.T
# Block i, layer 2 (MLP).
lowercase__ : Union[str, Any] = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , '''decoder''' , '''pre_mlp_layer_norm''' )
lowercase__ , lowercase__ : int = tax_mlp_lookup(UpperCAmelCase , UpperCAmelCase , '''decoder''' , UpperCAmelCase )
lowercase__ : Union[str, Any] = layer_norm
if split_mlp_wi:
lowercase__ : Optional[int] = wi[0].T
lowercase__ : Optional[int] = wi[1].T
else:
lowercase__ : str = wi.T
lowercase__ : Optional[Any] = wo.T
lowercase__ : List[Any] = old['''decoder/decoder_norm/scale''']
lowercase__ : Any = old[
'''decoder/relpos_bias/rel_embedding'''
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowercase__ : Tuple = old['''decoder/logits_dense/kernel'''].T
return new
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Union[str, Any] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowercase__ : str = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowercase__ : Union[str, Any] = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
lowercase__ : Optional[int] = state_dict['''shared.weight''']
return state_dict
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Union[str, Any] = checkpoints.load_tax_checkpoint(UpperCAmelCase )
lowercase__ : List[Any] = convert_tax_to_pytorch(UpperCAmelCase , num_layers=config.num_layers , is_encoder_only=UpperCAmelCase )
lowercase__ : Optional[Any] = make_state_dict(UpperCAmelCase , UpperCAmelCase )
model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = False ):
lowercase__ : int = TaConfig.from_json_file(UpperCAmelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowercase__ : Optional[Any] = TaEncoderModel(UpperCAmelCase )
else:
lowercase__ : List[str] = TaForConditionalGeneration(UpperCAmelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(UpperCAmelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(UpperCAmelCase )
print('''Done''' )
if __name__ == "__main__":
__a: str = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
__a: Dict = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 428
| 1
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Any = {"""configuration_mmbt""": ["""MMBTConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Any = ["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
SCREAMING_SNAKE_CASE__ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 0
|
'''simple docstring'''
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class _UpperCAmelCase ( unittest.TestCase ):
__lowerCamelCase: List[Any] = inspect.getfile(accelerate.test_utils )
__lowerCamelCase: List[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_cli.py'] )
__lowerCamelCase: Optional[Any] = ['accelerate', 'launch']
__lowerCamelCase: List[str] = Path.home() / '.cache/huggingface/accelerate'
__lowerCamelCase: Dict = 'default_config.yaml'
__lowerCamelCase: Union[str, Any] = config_folder / config_file
__lowerCamelCase: Tuple = config_folder / '_default_config.yaml'
__lowerCamelCase: Union[str, Any] = Path('tests/test_configs' )
@classmethod
def lowerCAmelCase__ ( cls : List[Any] ):
'''simple docstring'''
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def lowerCAmelCase__ ( cls : str ):
'''simple docstring'''
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : List[str] = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
for config in sorted(self.test_config_path.glob("**/*.yaml" ) ):
with self.subTest(config_file=a ):
execute_subprocess_async(
self.base_cmd + ["--config_file", str(a ), self.test_file_path] , env=os.environ.copy() )
def lowerCAmelCase__ ( self : List[str] ):
'''simple docstring'''
execute_subprocess_async(["accelerate", "test"] , env=os.environ.copy() )
class _UpperCAmelCase ( unittest.TestCase ):
__lowerCamelCase: Tuple = 'test-tpu'
__lowerCamelCase: Dict = 'us-central1-a'
__lowerCamelCase: List[str] = 'ls'
__lowerCamelCase: Optional[int] = ['accelerate', 'tpu-config']
__lowerCamelCase: Optional[Any] = 'cd /usr/share'
__lowerCamelCase: Any = 'tests/test_samples/test_command_file.sh'
__lowerCamelCase: Tuple = 'Running gcloud compute tpus tpu-vm ssh'
def lowerCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ : str = run_command(
self.cmd
+ ["--command", self.command, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug"] , return_stdout=a , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a , )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : Optional[int] = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command",
self.command,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=a , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a , )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : Union[str, Any] = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--debug"] , return_stdout=a )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a , )
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : Any = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--command", self.command, "--debug"] , return_stdout=a , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a , )
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : Optional[Any] = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--command",
self.command,
"--command",
"echo \"Hello World\"",
"--debug",
] , return_stdout=a , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , a , )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ : Optional[Any] = run_command(
self.cmd
+ ["--config_file", "tests/test_configs/latest.yaml", "--command_file", self.command_file, "--debug"] , return_stdout=a , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a , )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : List[Any] = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command_file",
self.command_file,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=a , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a , )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ : Union[str, Any] = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--install_accelerate", "--debug"] , return_stdout=a , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a , )
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : Optional[Any] = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--install_accelerate",
"--accelerate_version",
"12.0.0",
"--debug",
] , return_stdout=a , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a , )
| 620
| 0
|
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __A ( lowerCamelCase__ ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = CTRLTokenizer
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def __snake_case ( self):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCamelCase : Optional[int] = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
_lowerCamelCase : List[str] = dict(zip(a__ , range(len(a__))))
_lowerCamelCase : int = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
_lowerCamelCase : List[Any] = {'''unk_token''': '''<unk>'''}
_lowerCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
_lowerCamelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as fp:
fp.write(json.dumps(a__) + '''\n''')
with open(self.merges_file , '''w''' , encoding='''utf-8''') as fp:
fp.write('''\n'''.join(a__))
def __snake_case ( self , **a__):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return CTRLTokenizer.from_pretrained(self.tmpdirname , **a__)
def __snake_case ( self , a__):
"""simple docstring"""
_lowerCamelCase : str = '''adapt react readapt apt'''
_lowerCamelCase : List[Any] = '''adapt react readapt apt'''
return input_text, output_text
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
_lowerCamelCase : List[Any] = '''adapt react readapt apt'''
_lowerCamelCase : Union[str, Any] = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
_lowerCamelCase : Union[str, Any] = tokenizer.tokenize(a__)
self.assertListEqual(a__ , a__)
_lowerCamelCase : List[Any] = tokens + [tokenizer.unk_token]
_lowerCamelCase : Dict = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__) , a__)
| 721
|
import pytest
import datasets
# Import fixture modules as plugins
_lowerCamelCase = ['tests.fixtures.files', 'tests.fixtures.hub', 'tests.fixtures.fsspec']
def __UpperCAmelCase( lowercase_ , lowercase_ ):
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def __UpperCAmelCase( lowercase_ ):
config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=lowercase_ )
def __UpperCAmelCase( lowercase_ , lowercase_ ):
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
_lowerCamelCase : Optional[Any] = tmp_path_factory.getbasetemp() / '''cache'''
_lowerCamelCase : Optional[Any] = test_hf_cache_home / '''datasets'''
_lowerCamelCase : Union[str, Any] = test_hf_cache_home / '''metrics'''
_lowerCamelCase : Dict = test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(lowercase_ ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(lowercase_ ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(lowercase_ ) )
_lowerCamelCase : str = test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(lowercase_ ) )
_lowerCamelCase : Optional[int] = test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(lowercase_ ) )
@pytest.fixture(autouse=lowercase_ , scope='''session''' )
def __UpperCAmelCase( ):
datasets.disable_progress_bar()
@pytest.fixture(autouse=lowercase_ )
def __UpperCAmelCase( lowercase_ ):
# don't take tests into account when counting downloads
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , lowercase_ )
@pytest.fixture
def __UpperCAmelCase( lowercase_ ):
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , lowercase_ )
| 613
| 0
|
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCAmelCase_ ( A ):
'''simple docstring'''
def __init__( self : Dict , snake_case__ : AutoencoderKL , snake_case__ : CLIPTextModel , snake_case__ : CLIPTokenizer , snake_case__ : UNetaDConditionModel , snake_case__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , snake_case__ : StableDiffusionSafetyChecker , snake_case__ : CLIPImageProcessor , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , unet=snake_case__ , scheduler=snake_case__ , safety_checker=snake_case__ , feature_extractor=snake_case__ , )
def UpperCamelCase ( self : List[str] , snake_case__ : Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase__ : Optional[int] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case__ )
def UpperCamelCase ( self : List[str] ):
'''simple docstring'''
self.enable_attention_slicing(snake_case__ )
@torch.no_grad()
def __call__( self : List[str] , snake_case__ : Union[str, List[str]] , snake_case__ : int = 5_12 , snake_case__ : int = 5_12 , snake_case__ : int = 50 , snake_case__ : float = 7.5 , snake_case__ : Optional[Union[str, List[str]]] = None , snake_case__ : Optional[int] = 1 , snake_case__ : float = 0.0 , snake_case__ : Optional[torch.Generator] = None , snake_case__ : Optional[torch.FloatTensor] = None , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , snake_case__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , snake_case__ : int = 1 , snake_case__ : Optional[torch.FloatTensor] = None , **snake_case__ : Optional[int] , ):
'''simple docstring'''
if isinstance(snake_case__ , snake_case__ ):
UpperCAmelCase__ : Optional[int] = 1
elif isinstance(snake_case__ , snake_case__ ):
UpperCAmelCase__ : str = len(snake_case__ )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(snake_case__ )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(snake_case__ , snake_case__ ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(snake_case__ )}.""" )
# get prompt text embeddings
UpperCAmelCase__ : List[str] = self.tokenizer(
snake_case__ , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
UpperCAmelCase__ : Any = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCAmelCase__ : List[Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
UpperCAmelCase__ : Tuple = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
UpperCAmelCase__ : int = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = text_embeddings.shape
UpperCAmelCase__ : Union[str, Any] = text_embeddings.repeat(1 , snake_case__ , 1 )
UpperCAmelCase__ : Tuple = text_embeddings.view(bs_embed * num_images_per_prompt , snake_case__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCAmelCase__ : List[Any] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCAmelCase__ : List[str]
if negative_prompt is None:
UpperCAmelCase__ : Dict = [""]
elif type(snake_case__ ) is not type(snake_case__ ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(snake_case__ )} !="""
F""" {type(snake_case__ )}.""" )
elif isinstance(snake_case__ , snake_case__ ):
UpperCAmelCase__ : Union[str, Any] = [negative_prompt]
elif batch_size != len(snake_case__ ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(snake_case__ )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
" the batch size of `prompt`." )
else:
UpperCAmelCase__ : Optional[int] = negative_prompt
UpperCAmelCase__ : List[Any] = text_input_ids.shape[-1]
UpperCAmelCase__ : List[str] = self.tokenizer(
snake_case__ , padding="max_length" , max_length=snake_case__ , truncation=snake_case__ , return_tensors="pt" , )
UpperCAmelCase__ : Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase__ : Union[str, Any] = uncond_embeddings.shape[1]
UpperCAmelCase__ : Optional[Any] = uncond_embeddings.repeat(snake_case__ , snake_case__ , 1 )
UpperCAmelCase__ : Optional[int] = uncond_embeddings.view(batch_size * num_images_per_prompt , snake_case__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase__ : List[str] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCAmelCase__ : List[str] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCAmelCase__ : Dict = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
UpperCAmelCase__ : Optional[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCAmelCase__ : Dict = torch.randn(
snake_case__ , generator=snake_case__ , device="cpu" , dtype=snake_case__ ).to(self.device )
UpperCAmelCase__ : str = torch.randn(snake_case__ , generator=snake_case__ , device="cpu" , dtype=snake_case__ ).to(
self.device )
else:
UpperCAmelCase__ : List[Any] = torch.randn(
snake_case__ , generator=snake_case__ , device=self.device , dtype=snake_case__ )
UpperCAmelCase__ : List[Any] = torch.randn(snake_case__ , generator=snake_case__ , device=self.device , dtype=snake_case__ )
else:
if latents_reference.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
UpperCAmelCase__ : Optional[Any] = latents_reference.to(self.device )
UpperCAmelCase__ : str = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
UpperCAmelCase__ : Any = (latents_shape[3] - latents_shape_reference[3]) // 2
UpperCAmelCase__ : int = (latents_shape[2] - latents_shape_reference[2]) // 2
UpperCAmelCase__ : Any = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
UpperCAmelCase__ : Dict = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
UpperCAmelCase__ : List[Any] = 0 if dx < 0 else dx
UpperCAmelCase__ : List[Any] = 0 if dy < 0 else dy
UpperCAmelCase__ : Optional[Any] = max(-dx , 0 )
UpperCAmelCase__ : List[str] = max(-dy , 0 )
# import pdb
# pdb.set_trace()
UpperCAmelCase__ : List[Any] = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(snake_case__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCAmelCase__ : List[Any] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase__ : Optional[int] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCAmelCase__ : int = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCAmelCase__ : Optional[Any] = {}
if accepts_eta:
UpperCAmelCase__ : Optional[int] = eta
for i, t in enumerate(self.progress_bar(snake_case__ ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase__ : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase__ : List[str] = self.scheduler.scale_model_input(snake_case__ , snake_case__ )
# predict the noise residual
UpperCAmelCase__ : str = self.unet(snake_case__ , snake_case__ , encoder_hidden_states=snake_case__ ).sample
# perform guidance
if do_classifier_free_guidance:
UpperCAmelCase__ , UpperCAmelCase__ : int = noise_pred.chunk(2 )
UpperCAmelCase__ : Any = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase__ : Union[str, Any] = self.scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase__ : List[str] = 1 / 0.18215 * latents
UpperCAmelCase__ : Any = self.vae.decode(snake_case__ ).sample
UpperCAmelCase__ : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCAmelCase__ : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
UpperCAmelCase__ : str = self.feature_extractor(self.numpy_to_pil(snake_case__ ) , return_tensors="pt" ).to(
self.device )
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.safety_checker(
images=snake_case__ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
UpperCAmelCase__ : List[Any] = None
if output_type == "pil":
UpperCAmelCase__ : str = self.numpy_to_pil(snake_case__ )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=snake_case__ , nsfw_content_detected=snake_case__ )
| 199
|
'''simple docstring'''
def snake_case_ ( lowercase__ ):
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("""Program to check whether a number is a Perfect number or not...""")
SCREAMING_SNAKE_CASE = int(input("""Enter number: """).strip())
print(F'{number} is {"" if perfect(number) else "not "}a Perfect Number.')
| 199
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {"""configuration_mbart""": ["""MBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MBartConfig""", """MBartOnnxConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["""MBartTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["""MBartTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""MBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MBartForCausalLM""",
"""MBartForConditionalGeneration""",
"""MBartForQuestionAnswering""",
"""MBartForSequenceClassification""",
"""MBartModel""",
"""MBartPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""TFMBartForConditionalGeneration""",
"""TFMBartModel""",
"""TFMBartPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""FlaxMBartForConditionalGeneration""",
"""FlaxMBartForQuestionAnswering""",
"""FlaxMBartForSequenceClassification""",
"""FlaxMBartModel""",
"""FlaxMBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 712
|
"""simple docstring"""
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class __magic_name__ :
_SCREAMING_SNAKE_CASE : float
_SCREAMING_SNAKE_CASE : TreeNode | None = None
_SCREAMING_SNAKE_CASE : TreeNode | None = None
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
def is_valid_tree(SCREAMING_SNAKE_CASE ) -> bool:
if node is None:
return True
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(SCREAMING_SNAKE_CASE ):
raise ValueError(
"Each node should be type of TreeNode and data should be float." )
def is_binary_search_tree_recursive_check(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , SCREAMING_SNAKE_CASE , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , SCREAMING_SNAKE_CASE )
)
return is_binary_search_tree_recursive_check(SCREAMING_SNAKE_CASE , -float("inf" ) , float("inf" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 614
| 0
|
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
lowerCAmelCase_ = logging.getLogger(__name__)
lowerCAmelCase_ = "pytorch_model.bin"
@dataclasses.dataclass
class A :
_SCREAMING_SNAKE_CASE = dataclasses.field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models."""} )
_SCREAMING_SNAKE_CASE = dataclasses.field(
default=__UpperCAmelCase ,metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co."""} ,)
@dataclasses.dataclass
class A :
_SCREAMING_SNAKE_CASE = dataclasses.field(metadata={"""help""": """A csv or a json file containing the training data."""} )
_SCREAMING_SNAKE_CASE = dataclasses.field(metadata={"""help""": """A csv or a json file containing the data to predict on."""} )
_SCREAMING_SNAKE_CASE = dataclasses.field(
default=__UpperCAmelCase ,metadata={"""help""": """A csv or a json file containing the validation data."""} )
_SCREAMING_SNAKE_CASE = dataclasses.field(
default=__UpperCAmelCase ,metadata={"""help""": """The name of the task to train on."""} ,)
_SCREAMING_SNAKE_CASE = dataclasses.field(
default=__UpperCAmelCase ,metadata={"""help""": """The list of labels for the task."""} )
@dataclasses.dataclass
class A :
_SCREAMING_SNAKE_CASE = dataclasses.field(
metadata={"""help""": """The output directory where the model predictions and checkpoints will be written."""} )
_SCREAMING_SNAKE_CASE = dataclasses.field(
default="""accuracy""" ,metadata={"""help""": """The evaluation metric used for the task."""} )
_SCREAMING_SNAKE_CASE = dataclasses.field(
default="""no""" ,metadata={
"""help""": """The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"""
} ,)
_SCREAMING_SNAKE_CASE = dataclasses.field(
default=10 ,metadata={"""help""": """Number of evaluation calls with no improvement after which training will be stopped."""} ,)
_SCREAMING_SNAKE_CASE = dataclasses.field(
default=0.0 ,metadata={
"""help""": """How much the specified evaluation metric must improve to satisfy early stopping conditions."""
} ,)
_SCREAMING_SNAKE_CASE = dataclasses.field(
default=__UpperCAmelCase ,metadata={"""help""": """Whether to filter the pseudo-labeled data based on the confidence score."""} ,)
_SCREAMING_SNAKE_CASE = dataclasses.field(
default=__UpperCAmelCase ,metadata={"""help""": """Whether to filter the pseudo-labeled data based on the validation performance."""} ,)
_SCREAMING_SNAKE_CASE = dataclasses.field(
default=__UpperCAmelCase ,metadata={"""help""": """Whether to fine-tune on labeled data after pseudo training."""} ,)
_SCREAMING_SNAKE_CASE = dataclasses.field(
default=0.0 ,metadata={"""help""": """Confidence threshold for pseudo-labeled data filtering."""} ,)
_SCREAMING_SNAKE_CASE = dataclasses.field(
default=100 ,metadata={"""help""": """Number of evaluation calls with no improvement after which training will be stopped."""} ,)
_SCREAMING_SNAKE_CASE = dataclasses.field(
default=__UpperCAmelCase ,metadata={"""help""": """Random seed for initialization."""} ,)
def A_ ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Dict:
_snake_case : Optional[Any] = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
_snake_case : Any = dataset.filter(lambda lowercase_ : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
_snake_case : Any = int(eval_result * len(lowercase_ ) )
print(lowercase_ )
_snake_case : Optional[int] = dataset.sort('''probability''' , reverse=lowercase_ )
_snake_case : int = dataset.select(range(lowercase_ ) )
_snake_case : Union[str, Any] = dataset.remove_columns(['''label''', '''probability'''] )
_snake_case : int = dataset.rename_column('''prediction''' , '''label''' )
_snake_case : Optional[Any] = dataset.map(lambda lowercase_ : {"label": idalabel[example["label"]]} )
_snake_case : Tuple = dataset.shuffle(seed=args.seed )
_snake_case : Dict = os.path.join(lowercase_ , f'''train_pseudo.{args.data_file_extension}''' )
if args.data_file_extension == "csv":
dataset.to_csv(lowercase_ , index=lowercase_ )
else:
dataset.to_json(lowercase_ )
def A_ ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ) -> Union[str, Any]:
_snake_case : List[str] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
_snake_case : Optional[int] = STModelArguments(model_name_or_path=lowercase_ )
_snake_case : Optional[int] = STDataArguments(train_file=lowercase_ , infer_file=lowercase_ )
_snake_case : Union[str, Any] = STTrainingArguments(output_dir=lowercase_ )
_snake_case : int = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(lowercase_ ).items():
setattr(lowercase_ , lowercase_ , lowercase_ )
for key, value in kwargs.items():
if hasattr(lowercase_ , lowercase_ ):
setattr(lowercase_ , lowercase_ , lowercase_ )
# Sanity checks
_snake_case : Optional[Any] = {}
_snake_case : int = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
_snake_case : int = args.train_file
_snake_case : str = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
_snake_case : Optional[Any] = args.eval_file
for key in data_files:
_snake_case : Optional[Any] = data_files[key].split('''.''' )[-1]
assert extension in ["csv", "json"], f'''`{key}_file` should be a csv or a json file.'''
if args.data_file_extension is None:
_snake_case : int = extension
else:
assert extension == args.data_file_extension, f'''`{key}_file` should be a {args.data_file_extension} file`.'''
assert (
args.eval_metric in datasets.list_metrics()
), f'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'''
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('''Creating the initial data directory for self-training...''' )
_snake_case : Dict = f'''{args.output_dir}/self-train_iter-{{}}'''.format
_snake_case : Any = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=lowercase_ )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
accelerator.wait_for_everyone()
_snake_case : Dict = None
_snake_case : str = None
_snake_case : int = 0
_snake_case : Dict = False
# Show the progress bar
_snake_case : Any = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
_snake_case : Union[str, Any] = data_dir_format(lowercase_ )
assert os.path.exists(lowercase_ )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
_snake_case : List[Any] = os.path.join(lowercase_ , '''stage-1''' )
_snake_case : str = {
'''accelerator''': accelerator,
'''model_name_or_path''': args.model_name_or_path,
'''cache_dir''': args.cache_dir,
'''do_train''': True,
'''train_file''': data_files['''train'''] if iteration == 0 else data_files['''train_pseudo'''],
'''do_eval''': True if args.eval_file is not None else False,
'''eval_file''': data_files['''eval'''],
'''do_predict''': True,
'''infer_file''': data_files['''infer'''],
'''task_name''': args.task_name,
'''label_list''': args.label_list,
'''output_dir''': current_output_dir,
'''eval_metric''': args.eval_metric,
'''evaluation_strategy''': args.evaluation_strategy,
'''early_stopping_patience''': args.early_stopping_patience,
'''early_stopping_threshold''': args.early_stopping_threshold,
'''seed''': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(lowercase_ , lowercase_ ):
arguments_dict.update({key: value} )
_snake_case : List[Any] = os.path.join(lowercase_ , '''best-checkpoint''' , lowercase_ )
if os.path.exists(lowercase_ ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.''' , lowercase_ , lowercase_ , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 1 *****''' , lowercase_ )
finetune(**lowercase_ )
accelerator.wait_for_everyone()
assert os.path.exists(lowercase_ )
logger.info('''Self-training job completed: iteration: %d, stage: 1.''' , lowercase_ )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
_snake_case : int = os.path.join(lowercase_ , '''best-checkpoint''' )
_snake_case : Any = os.path.join(lowercase_ , '''stage-2''' )
# Update arguments_dict
_snake_case : Dict = model_path
_snake_case : Union[str, Any] = data_files['''train''']
_snake_case : Optional[int] = current_output_dir
_snake_case : Dict = os.path.join(lowercase_ , '''best-checkpoint''' , lowercase_ )
if os.path.exists(lowercase_ ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.''' , lowercase_ , lowercase_ , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 2 *****''' , lowercase_ )
finetune(**lowercase_ )
accelerator.wait_for_everyone()
assert os.path.exists(lowercase_ )
logger.info('''Self-training job completed: iteration: %d, stage: 2.''' , lowercase_ )
_snake_case : List[Any] = iteration
_snake_case : Any = data_dir_format(iteration + 1 )
_snake_case : Optional[int] = AutoConfig.from_pretrained(os.path.join(lowercase_ , '''best-checkpoint''' ) )
_snake_case : Union[str, Any] = config.idalabel
_snake_case : Tuple = os.path.join(lowercase_ , '''eval_results_best-checkpoint.json''' )
_snake_case : Any = os.path.join(lowercase_ , '''test_results_best-checkpoint.json''' )
assert os.path.exists(lowercase_ )
with open(lowercase_ , '''r''' ) as f:
_snake_case : Tuple = float(json.load(lowercase_ )[args.eval_metric] )
_snake_case : List[str] = os.path.join(lowercase_ , '''infer_output_best-checkpoint.csv''' )
assert os.path.exists(lowercase_ )
# Loading the dataset from local csv or json files.
_snake_case : str = load_dataset(args.data_file_extension , data_files={'''data''': data_files['''infer''']} )['''data''']
_snake_case : Dict = load_dataset('''csv''' , data_files={'''data''': infer_output_file} )['''data''']
if accelerator.is_main_process:
os.makedirs(lowercase_ , exist_ok=lowercase_ )
shutil.copy(lowercase_ , os.path.join(lowercase_ , f'''eval_results_iter-{iteration}.json''' ) )
if os.path.exists(lowercase_ ):
shutil.copy(lowercase_ , os.path.join(lowercase_ , f'''test_results_iter-{iteration}.json''' ) )
create_pseudo_labeled_data(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
accelerator.wait_for_everyone()
_snake_case : Tuple = os.path.join(lowercase_ , f'''train_pseudo.{args.data_file_extension}''' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
_snake_case : Tuple = eval_result
if best_iteration is None:
_snake_case : Union[str, Any] = new_iteration
_snake_case : List[Any] = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
_snake_case : Optional[int] = new_iteration
_snake_case : Optional[int] = new_eval_result
_snake_case : Optional[int] = 0
else:
if new_eval_result == best_eval_result:
_snake_case : int = new_iteration
_snake_case : str = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
_snake_case : Dict = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('''Best iteration: %d''' , lowercase_ )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , lowercase_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(lowercase_ , f'''eval_results_iter-{iteration}.json''' ) , os.path.join(lowercase_ , '''eval_results_best-iteration.json''' ) , )
else:
# Assume that the last iteration is the best
logger.info('''Best iteration: %d''' , args.max_selftrain_iterations - 1 )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , lowercase_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(lowercase_ , f'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ) , os.path.join(lowercase_ , '''eval_results_best-iteration.json''' ) , )
| 326
|
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A (__UpperCAmelCase ,unittest.TestCase ):
_SCREAMING_SNAKE_CASE = MgpstrTokenizer
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = False
def __a ( self ) -> List[Any]:
'''simple docstring'''
super().setUp()
# fmt: off
_snake_case : int = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
_snake_case : Optional[Any] = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
_snake_case : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowercase_ ) + '''\n''' )
def __a ( self , **lowercase_ ) -> Optional[Any]:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def __a ( self , lowercase_ ) -> Optional[int]:
'''simple docstring'''
_snake_case : Optional[Any] = '''tester'''
_snake_case : List[str] = '''tester'''
return input_text, output_text
@unittest.skip('''MGP-STR always lower cases letters.''' )
def __a ( self ) -> Any:
'''simple docstring'''
pass
def __a ( self ) -> Optional[int]:
'''simple docstring'''
_snake_case : List[str] = self.get_tokenizers(do_lower_case=lowercase_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_snake_case : int = '''[SPECIAL_TOKEN]'''
tokenizer.add_special_tokens({'''cls_token''': special_token} )
_snake_case : Union[str, Any] = tokenizer.encode([special_token] , add_special_tokens=lowercase_ )
self.assertEqual(len(lowercase_ ) , 1 )
_snake_case : Union[str, Any] = tokenizer.decode(lowercase_ , skip_special_tokens=lowercase_ )
self.assertTrue(special_token not in decoded )
def __a ( self ) -> List[Any]:
'''simple docstring'''
_snake_case : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_snake_case , _snake_case : int = self.get_input_output_texts(lowercase_ )
_snake_case : Optional[Any] = tokenizer.tokenize(lowercase_ )
_snake_case : Any = tokenizer.convert_tokens_to_ids(lowercase_ )
_snake_case : Tuple = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_snake_case : Tuple = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertNotEqual(len(lowercase_ ) , 0 )
_snake_case : str = tokenizer.decode(lowercase_ )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual(text_a.replace(''' ''' , '''''' ) , lowercase_ )
@unittest.skip('''MGP-STR tokenizer only handles one sequence.''' )
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' )
def __a ( self ) -> str:
'''simple docstring'''
pass
| 326
| 1
|
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class _snake_case ( __snake_case ):
"""simple docstring"""
a = ["image_processor", "tokenizer"]
a = "AutoImageProcessor"
a = "AutoTokenizer"
def __init__( self : int , _A : str=None , _A : Tuple=None , **_A : Optional[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : int = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , _A , )
_SCREAMING_SNAKE_CASE : Optional[int] = kwargs.pop("""feature_extractor""")
_SCREAMING_SNAKE_CASE : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""")
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""")
super().__init__(_A , _A)
_SCREAMING_SNAKE_CASE : str = self.image_processor
_SCREAMING_SNAKE_CASE : int = False
def __call__( self : Optional[int] , *_A : Optional[int] , **_A : Optional[int]):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*_A , **_A)
_SCREAMING_SNAKE_CASE : Dict = kwargs.pop("""images""" , _A)
_SCREAMING_SNAKE_CASE : List[Any] = kwargs.pop("""text""" , _A)
if len(_A) > 0:
_SCREAMING_SNAKE_CASE : Dict = args[0]
_SCREAMING_SNAKE_CASE : Any = args[1:]
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""")
if images is not None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor(_A , *_A , **_A)
if text is not None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer(_A , **_A)
if text is None:
return inputs
elif images is None:
return encodings
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = encodings["""input_ids"""]
return inputs
def _lowerCAmelCase ( self : int , *_A : Tuple , **_A : Dict):
"""simple docstring"""
return self.tokenizer.batch_decode(*_A , **_A)
def _lowerCAmelCase ( self : Tuple , *_A : Optional[Any] , **_A : Any):
"""simple docstring"""
return self.tokenizer.decode(*_A , **_A)
@contextmanager
def _lowerCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your images inputs, or in a separate call.""")
_SCREAMING_SNAKE_CASE : str = True
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer
yield
_SCREAMING_SNAKE_CASE : Dict = self.image_processor
_SCREAMING_SNAKE_CASE : Dict = False
def _lowerCAmelCase ( self : List[Any] , _A : str , _A : Any=False , _A : List[str]=None):
"""simple docstring"""
if added_vocab is None:
_SCREAMING_SNAKE_CASE : Tuple = self.tokenizer.get_added_vocab()
_SCREAMING_SNAKE_CASE : Optional[int] = {}
while tokens:
_SCREAMING_SNAKE_CASE : Union[str, Any] = re.search(r"""<s_(.*?)>""" , _A , re.IGNORECASE)
if start_token is None:
break
_SCREAMING_SNAKE_CASE : Union[str, Any] = start_token.group(1)
_SCREAMING_SNAKE_CASE : Tuple = re.search(rf"""</s_{key}>""" , _A , re.IGNORECASE)
_SCREAMING_SNAKE_CASE : int = start_token.group()
if end_token is None:
_SCREAMING_SNAKE_CASE : str = tokens.replace(_A , """""")
else:
_SCREAMING_SNAKE_CASE : Any = end_token.group()
_SCREAMING_SNAKE_CASE : List[str] = re.escape(_A)
_SCREAMING_SNAKE_CASE : Union[str, Any] = re.escape(_A)
_SCREAMING_SNAKE_CASE : List[Any] = re.search(f"""{start_token_escaped}(.*?){end_token_escaped}""" , _A , re.IGNORECASE)
if content is not None:
_SCREAMING_SNAKE_CASE : str = content.group(1).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenajson(_A , is_inner_value=_A , added_vocab=_A)
if value:
if len(_A) == 1:
_SCREAMING_SNAKE_CASE : Optional[Any] = value[0]
_SCREAMING_SNAKE_CASE : Optional[int] = value
else: # leaf nodes
_SCREAMING_SNAKE_CASE : int = []
for leaf in content.split(r"""<sep/>"""):
_SCREAMING_SNAKE_CASE : Optional[int] = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
_SCREAMING_SNAKE_CASE : List[str] = leaf[1:-2] # for categorical special tokens
output[key].append(_A)
if len(output[key]) == 1:
_SCREAMING_SNAKE_CASE : str = output[key][0]
_SCREAMING_SNAKE_CASE : Union[str, Any] = tokens[tokens.find(_A) + len(_A) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=_A , added_vocab=_A)
if len(_A):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def _lowerCAmelCase ( self : List[str]):
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _A , )
return self.image_processor_class
@property
def _lowerCAmelCase ( self : List[Any]):
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , _A , )
return self.image_processor
| 635
|
"""simple docstring"""
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase_ = 16
lowerCAmelCase_ = 32
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 16 )-> str:
_SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = DatasetDict(
{
"""train""": dataset["""train"""].select(__SCREAMING_SNAKE_CASE ),
"""validation""": dataset["""train"""].select(__SCREAMING_SNAKE_CASE ),
"""test""": dataset["""validation"""],
} )
def tokenize_function(__SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
_SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_SCREAMING_SNAKE_CASE : str = datasets.map(
__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_SCREAMING_SNAKE_CASE : Any = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_SCREAMING_SNAKE_CASE : Any = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_SCREAMING_SNAKE_CASE : Optional[Any] = 16
elif accelerator.mixed_precision != "no":
_SCREAMING_SNAKE_CASE : Any = 8
else:
_SCREAMING_SNAKE_CASE : Optional[int] = None
return tokenizer.pad(
__SCREAMING_SNAKE_CASE , padding="""longest""" , max_length=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" , )
# Instantiate dataloaders.
_SCREAMING_SNAKE_CASE : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Optional[int] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Dict = DataLoader(
tokenized_datasets["""test"""] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader, test_dataloader
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict:
# New Code #
_SCREAMING_SNAKE_CASE : Union[str, Any] = []
# Download the dataset
_SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset("""glue""" , """mrpc""" )
# Create our splits
_SCREAMING_SNAKE_CASE : Dict = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
_SCREAMING_SNAKE_CASE : Any = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_SCREAMING_SNAKE_CASE : Tuple = config["""lr"""]
_SCREAMING_SNAKE_CASE : Tuple = int(config["""num_epochs"""] )
_SCREAMING_SNAKE_CASE : int = int(config["""seed"""] )
_SCREAMING_SNAKE_CASE : int = int(config["""batch_size"""] )
_SCREAMING_SNAKE_CASE : List[str] = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
_SCREAMING_SNAKE_CASE : Any = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_SCREAMING_SNAKE_CASE : List[str] = batch_size // MAX_GPU_BATCH_SIZE
_SCREAMING_SNAKE_CASE : List[str] = MAX_GPU_BATCH_SIZE
set_seed(__SCREAMING_SNAKE_CASE )
# New Code #
# Create our folds:
_SCREAMING_SNAKE_CASE : List[str] = kfold.split(np.zeros(datasets["""train"""].num_rows ) , datasets["""train"""]["""label"""] )
_SCREAMING_SNAKE_CASE : Optional[Any] = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = get_fold_dataloaders(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_SCREAMING_SNAKE_CASE : Any = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_SCREAMING_SNAKE_CASE : Tuple = model.to(accelerator.device )
# Instantiate optimizer
_SCREAMING_SNAKE_CASE : int = AdamW(params=model.parameters() , lr=__SCREAMING_SNAKE_CASE )
# Instantiate scheduler
_SCREAMING_SNAKE_CASE : int = get_linear_schedule_with_warmup(
optimizer=__SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(__SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = accelerator.prepare(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(__SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_SCREAMING_SNAKE_CASE : Optional[Any] = model(**__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Dict = outputs.loss
_SCREAMING_SNAKE_CASE : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(__SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_SCREAMING_SNAKE_CASE : List[str] = model(**__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : str = outputs.logits.argmax(dim=-1 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE , )
_SCREAMING_SNAKE_CASE : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , __SCREAMING_SNAKE_CASE )
# New Code #
# We also run predictions on the test set at the very end
_SCREAMING_SNAKE_CASE : str = []
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_SCREAMING_SNAKE_CASE : List[str] = model(**__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : List[Any] = outputs.logits
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(__SCREAMING_SNAKE_CASE , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
_SCREAMING_SNAKE_CASE : Optional[int] = torch.cat(__SCREAMING_SNAKE_CASE , dim=0 )
_SCREAMING_SNAKE_CASE : List[str] = torch.stack(__SCREAMING_SNAKE_CASE , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
_SCREAMING_SNAKE_CASE : int = metric.compute(predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE )
accelerator.print("""Average test metrics from all folds:""" , __SCREAMING_SNAKE_CASE )
def lowerCamelCase_()-> Optional[Any]:
_SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
# New Code #
parser.add_argument("""--num_folds""" , type=__SCREAMING_SNAKE_CASE , default=3 , help="""The number of splits to perform across the dataset""" )
_SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args()
_SCREAMING_SNAKE_CASE : Optional[int] = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 635
| 1
|
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
_lowercase = 256_047
_lowercase = 256_145
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( _UpperCAmelCase , unittest.TestCase ):
__lowerCamelCase = NllbTokenizer
__lowerCamelCase = NllbTokenizerFast
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = {}
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase__: Optional[int] = NllbTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowerCamelCase__: Tuple = NllbTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
lowerCamelCase__: Any = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCamelCase__: Tuple = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCamelCase__: List[Any] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowerCamelCase__: Dict = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowerCamelCase__: Dict = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-nllb""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCamelCase__: List[str] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase__: Dict = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase__: Any = tempfile.mkdtemp()
lowerCamelCase__: Tuple = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__: Union[str, Any] = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
lowerCamelCase__: List[Any] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Checks everything loads correctly in the same way
lowerCamelCase__: int = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__: Optional[int] = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
# Save tokenizer rust, legacy_format=True
lowerCamelCase__: int = tempfile.mkdtemp()
lowerCamelCase__: str = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ , legacy_format=SCREAMING_SNAKE_CASE_ )
lowerCamelCase__: Optional[Any] = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it save with the same files
self.assertSequenceEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Checks everything loads correctly in the same way
lowerCamelCase__: List[str] = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__: str = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
# Save tokenizer rust, legacy_format=False
lowerCamelCase__: Optional[int] = tempfile.mkdtemp()
lowerCamelCase__: Dict = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ , legacy_format=SCREAMING_SNAKE_CASE_ )
lowerCamelCase__: int = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCamelCase__: List[Any] = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__: List[str] = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
@require_torch
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
if not self.test_seqaseq:
return
lowerCamelCase__: Tuple = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Longer text that will definitely require truncation.
lowerCamelCase__: str = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for"""
""" Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons"""
""" will only worsen the violence and misery for millions of people.""",
]
lowerCamelCase__: Union[str, Any] = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al"""
""" Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi"""
""" că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
try:
lowerCamelCase__: Tuple = tokenizer.prepare_seqaseq_batch(
src_texts=SCREAMING_SNAKE_CASE_ , tgt_texts=SCREAMING_SNAKE_CASE_ , max_length=3 , max_target_length=10 , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""" , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
lowerCamelCase__: int = tokenizer.prepare_seqaseq_batch(
SCREAMING_SNAKE_CASE_ , tgt_texts=SCREAMING_SNAKE_CASE_ , max_length=3 , return_tensors="""pt""" )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
lowerCamelCase__: Union[str, Any] = tokenizer.prepare_seqaseq_batch(
src_texts=SCREAMING_SNAKE_CASE_ , max_length=3 , max_target_length=10 , return_tensors="""pt""" )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn("""decoder_input_ids""" , SCREAMING_SNAKE_CASE_ )
@unittest.skip("""Unfortunately way too slow to build a BPE with SentencePiece.""" )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCamelCase__: Optional[Any] = [AddedToken("""<special>""" , lstrip=SCREAMING_SNAKE_CASE_ )]
lowerCamelCase__: Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase__: Any = tokenizer_r.encode("""Hey this is a <special> token""" )
lowerCamelCase__: List[str] = tokenizer_r.encode("""<special>""" , add_special_tokens=SCREAMING_SNAKE_CASE_ )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
lowerCamelCase__: Tuple = self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowerCamelCase__: Optional[Any] = self.tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase__: Optional[int] = tokenizer_p.encode("""Hey this is a <special> token""" )
lowerCamelCase__: Dict = tokenizer_cr.encode("""Hey this is a <special> token""" )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( unittest.TestCase ):
__lowerCamelCase = 'facebook/nllb-200-distilled-600M'
__lowerCamelCase = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
__lowerCamelCase = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
__lowerCamelCase = [
25_6047,
1_6297,
13_4408,
8165,
24_8066,
1_4734,
950,
1135,
10_5721,
3573,
83,
2_7352,
108,
4_9486,
2,
]
@classmethod
def lowerCamelCase_ ( cls : int ):
'''simple docstring'''
lowerCamelCase__: NllbTokenizer = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""" )
lowerCamelCase__: str = 1
return cls
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Arab"""] , 256001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Latn"""] , 256002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""fra_Latn"""] , 256057 )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowerCamelCase__: Dict = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE_ )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
self.assertIn(SCREAMING_SNAKE_CASE_ , self.tokenizer.all_special_ids )
# fmt: off
lowerCamelCase__: Optional[int] = [RO_CODE, 4254, 98068, 112923, 39072, 3909, 713, 102767, 26, 17314, 35642, 14683, 33118, 2022, 66987, 2, 256047]
# fmt: on
lowerCamelCase__: Union[str, Any] = self.tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCamelCase__: int = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotIn(self.tokenizer.eos_token , SCREAMING_SNAKE_CASE_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__: Optional[int] = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , SCREAMING_SNAKE_CASE_ )
lowerCamelCase__: List[Any] = 10
lowerCamelCase__: str = self.tokenizer(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [256203, 3] )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowerCamelCase__: Optional[int] = tempfile.mkdtemp()
lowerCamelCase__: List[str] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__: List[str] = NllbTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , SCREAMING_SNAKE_CASE_ )
@require_torch
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowerCamelCase__: Dict = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
lowerCamelCase__: Tuple = shift_tokens_right(
batch["""labels"""] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id["""ron_Latn"""] )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
lowerCamelCase__: Union[str, Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowerCamelCase__: str = self.tokenizer(self.src_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=3 , return_tensors="""pt""" )
lowerCamelCase__: Optional[int] = self.tokenizer(
text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=10 , return_tensors="""pt""" )
lowerCamelCase__: List[str] = targets["""input_ids"""]
lowerCamelCase__: Optional[Any] = shift_tokens_right(
SCREAMING_SNAKE_CASE_ , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowerCamelCase__: Any = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , {
# A, test, EOS, en_XX
"""input_ids""": [[256047, 70, 7356, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 256057,
} , )
@require_torch
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowerCamelCase__: Optional[int] = True
lowerCamelCase__: Optional[int] = self.tokenizer(
"""UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
inputs.input_ids , [16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2, 256047] )
lowerCamelCase__: List[str] = False
lowerCamelCase__: List[Any] = self.tokenizer(
"""UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" )
self.assertEqual(
inputs.input_ids , [256047, 16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2] )
| 306
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
__A : Union[str, Any] = logging.get_logger(__name__)
__A : List[str] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
__A : Optional[int] = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
__A : Optional[Any] = {
'''allenai/led-base-16384''': 16384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def A_ ( ):
'''simple docstring'''
UpperCamelCase : str = (
list(range(ord("""!""" ) ,ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) ,ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) ,ord("""ÿ""" ) + 1 ) )
)
UpperCamelCase : Any = bs[:]
UpperCamelCase : List[Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case_ )
cs.append(2**8 + n )
n += 1
UpperCamelCase : Any = [chr(snake_case_ ) for n in cs]
return dict(zip(snake_case_ ,snake_case_ ) )
def A_ ( snake_case_ : Dict ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = set()
UpperCamelCase : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase : List[Any] = char
return pairs
class lowerCamelCase ( _UpperCAmelCase ):
lowercase : Optional[int] = VOCAB_FILES_NAMES
lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : Union[str, Any] = ['input_ids', 'attention_mask']
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="replace" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="<mask>" , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase : Union[str, Any] = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else bos_token
UpperCamelCase : int = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else eos_token
UpperCamelCase : Optional[int] = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else sep_token
UpperCamelCase : Tuple = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else cls_token
UpperCamelCase : Optional[int] = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else unk_token
UpperCamelCase : Optional[Any] = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase : Union[str, Any] = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
errors=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
with open(SCREAMING_SNAKE_CASE_ , encoding="""utf-8""" ) as vocab_handle:
UpperCamelCase : List[Any] = json.load(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = {v: k for k, v in self.encoder.items()}
UpperCamelCase : str = errors # how to handle errors in decoding
UpperCamelCase : Any = bytes_to_unicode()
UpperCamelCase : int = {v: k for k, v in self.byte_encoder.items()}
with open(SCREAMING_SNAKE_CASE_ , encoding="""utf-8""" ) as merges_handle:
UpperCamelCase : Tuple = merges_handle.read().split("""\n""" )[1:-1]
UpperCamelCase : Dict = [tuple(merge.split() ) for merge in bpe_merges]
UpperCamelCase : Optional[Any] = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
UpperCamelCase : Dict = {}
UpperCamelCase : Optional[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCamelCase : Union[str, Any] = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def a_ ( self ):
return len(self.encoder )
def a_ ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def a_ ( self , SCREAMING_SNAKE_CASE_ ):
if token in self.cache:
return self.cache[token]
UpperCamelCase : Any = tuple(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = get_pairs(SCREAMING_SNAKE_CASE_ )
if not pairs:
return token
while True:
UpperCamelCase : Union[str, Any] = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCamelCase , UpperCamelCase : Tuple = bigram
UpperCamelCase : Union[str, Any] = []
UpperCamelCase : List[Any] = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
try:
UpperCamelCase : int = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCamelCase : Dict = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCamelCase : List[Any] = tuple(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = new_word
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
else:
UpperCamelCase : Optional[int] = get_pairs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = """ """.join(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = word
return word
def a_ ( self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Dict = []
for token in re.findall(self.pat , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Dict = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE_ ).split(""" """ ) )
return bpe_tokens
def a_ ( self , SCREAMING_SNAKE_CASE_ ):
return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) )
def a_ ( self , SCREAMING_SNAKE_CASE_ ):
return self.decoder.get(SCREAMING_SNAKE_CASE_ )
def a_ ( self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Any = """""".join(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCamelCase : Any = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase : List[Any] = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(SCREAMING_SNAKE_CASE_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + """\n""" )
UpperCamelCase : Optional[int] = 0
with open(SCREAMING_SNAKE_CASE_ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
UpperCamelCase : Optional[int] = token_index
writer.write(""" """.join(SCREAMING_SNAKE_CASE_ ) + """\n""" )
index += 1
return vocab_file, merge_file
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase : Dict = [self.cls_token_id]
UpperCamelCase : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
UpperCamelCase : Tuple = [self.sep_token_id]
UpperCamelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Union[str, Any] = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE_ ) > 0 and not text[0].isspace()):
UpperCamelCase : List[Any] = """ """ + text
return (text, kwargs)
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = PaddingStrategy.DO_NOT_PAD , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , ):
UpperCamelCase : Any = super()._pad(
encoded_inputs=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , padding_strategy=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , )
# Load from model defaults
if return_attention_mask is None:
UpperCamelCase : Optional[Any] = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCamelCase : List[Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCamelCase : List[Any] = len(encoded_inputs["""global_attention_mask"""] ) != len(SCREAMING_SNAKE_CASE_ )
if needs_to_be_padded:
UpperCamelCase : Any = len(SCREAMING_SNAKE_CASE_ ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCamelCase : Tuple = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
UpperCamelCase : Optional[Any] = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 499
| 0
|
"""simple docstring"""
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def __lowercase ( _a ):
return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def __lowercase ( ):
snake_case_ : List[str] = ArgumentParser(
'''HuggingFace Datasets CLI tool''' , usage='''datasets-cli <command> [<args>]''' , allow_abbrev=_a )
snake_case_ : List[Any] = parser.add_subparsers(help='''datasets-cli command helpers''' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(_a )
EnvironmentCommand.register_subcommand(_a )
TestCommand.register_subcommand(_a )
RunBeamCommand.register_subcommand(_a )
DummyDataCommand.register_subcommand(_a )
# Parse args
snake_case_ : Optional[Any] = parser.parse_known_args()
if not hasattr(_a , '''func''' ):
parser.print_help()
exit(1 )
snake_case_ : Optional[int] = parse_unknown_args(_a )
# Run
snake_case_ : Optional[int] = args.func(_a , **_a )
service.run()
if __name__ == "__main__":
main()
| 707
|
"""simple docstring"""
import tensorflow as tf
from ...tf_utils import shape_list
class _UpperCAmelCase ( tf.keras.layers.Layer):
def __init__( self : int , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : List[Any] , lowercase_ : Tuple=1 , lowercase_ : List[str]=False , **lowercase_ : Optional[Any] ):
super().__init__(**lowercase_ )
snake_case_ : int = vocab_size
snake_case_ : Union[str, Any] = d_embed
snake_case_ : Optional[int] = d_proj
snake_case_ : int = cutoffs + [vocab_size]
snake_case_ : Optional[int] = [0] + self.cutoffs
snake_case_ : List[str] = div_val
snake_case_ : int = self.cutoffs[0]
snake_case_ : Optional[Any] = len(self.cutoffs ) - 1
snake_case_ : Any = self.shortlist_size + self.n_clusters
snake_case_ : Dict = keep_order
snake_case_ : Tuple = []
snake_case_ : Optional[Any] = []
def _snake_case ( self : Dict , lowercase_ : int ):
if self.n_clusters > 0:
snake_case_ : Tuple = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='''zeros''' , trainable=lowercase_ , name='''cluster_weight''' )
snake_case_ : List[str] = self.add_weight(
shape=(self.n_clusters,) , initializer='''zeros''' , trainable=lowercase_ , name='''cluster_bias''' )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
snake_case_ : List[Any] = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='''zeros''' , trainable=lowercase_ , name=f"out_projs_._{i}" , )
self.out_projs.append(lowercase_ )
else:
self.out_projs.append(lowercase_ )
snake_case_ : Any = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='''zeros''' , trainable=lowercase_ , name=f"out_layers_._{i}_._weight" , )
snake_case_ : Any = self.add_weight(
shape=(self.vocab_size,) , initializer='''zeros''' , trainable=lowercase_ , name=f"out_layers_._{i}_._bias" , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
snake_case_, snake_case_ : str = self.cutoff_ends[i], self.cutoff_ends[i + 1]
snake_case_ : List[str] = self.d_embed // (self.div_val**i)
snake_case_ : Optional[Any] = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='''zeros''' , trainable=lowercase_ , name=f"out_projs_._{i}" )
self.out_projs.append(lowercase_ )
snake_case_ : Any = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='''zeros''' , trainable=lowercase_ , name=f"out_layers_._{i}_._weight" , )
snake_case_ : List[Any] = self.add_weight(
shape=(r_idx - l_idx,) , initializer='''zeros''' , trainable=lowercase_ , name=f"out_layers_._{i}_._bias" , )
self.out_layers.append((weight, bias) )
super().build(lowercase_ )
@staticmethod
def _snake_case ( lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : str=None ):
snake_case_ : str = x
if proj is not None:
snake_case_ : str = tf.einsum('''ibd,ed->ibe''' , lowercase_ , lowercase_ )
return tf.einsum('''ibd,nd->ibn''' , lowercase_ , lowercase_ ) + b
@staticmethod
def _snake_case ( lowercase_ : Tuple , lowercase_ : Union[str, Any] ):
snake_case_ : Optional[int] = shape_list(lowercase_ )
snake_case_ : Any = tf.range(lp_size[0] , dtype=target.dtype )
snake_case_ : Optional[int] = tf.stack([r, target] , 1 )
return tf.gather_nd(lowercase_ , lowercase_ )
def _snake_case ( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : Tuple=True , lowercase_ : Tuple=False ):
snake_case_ : Optional[int] = 0
if self.n_clusters == 0:
snake_case_ : List[str] = self._logit(lowercase_ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
snake_case_ : List[Any] = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=lowercase_ , logits=lowercase_ )
snake_case_ : List[Any] = tf.nn.log_softmax(lowercase_ , axis=-1 )
else:
snake_case_ : Optional[int] = shape_list(lowercase_ )
snake_case_ : Any = []
snake_case_ : List[str] = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
snake_case_, snake_case_ : Union[str, Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
snake_case_ : Optional[Any] = (target >= l_idx) & (target < r_idx)
snake_case_ : Any = tf.where(lowercase_ )
snake_case_ : List[Any] = tf.boolean_mask(lowercase_ , lowercase_ ) - l_idx
if self.div_val == 1:
snake_case_ : Optional[int] = self.out_layers[0][0][l_idx:r_idx]
snake_case_ : List[Any] = self.out_layers[0][1][l_idx:r_idx]
else:
snake_case_ : Dict = self.out_layers[i][0]
snake_case_ : str = self.out_layers[i][1]
if i == 0:
snake_case_ : Tuple = tf.concat([cur_W, self.cluster_weight] , 0 )
snake_case_ : List[str] = tf.concat([cur_b, self.cluster_bias] , 0 )
snake_case_ : List[Any] = self._logit(lowercase_ , lowercase_ , lowercase_ , self.out_projs[0] )
snake_case_ : Union[str, Any] = tf.nn.log_softmax(lowercase_ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
snake_case_ : int = tf.boolean_mask(lowercase_ , lowercase_ )
snake_case_ : str = self._gather_logprob(lowercase_ , lowercase_ )
else:
snake_case_ : Union[str, Any] = self._logit(lowercase_ , lowercase_ , lowercase_ , self.out_projs[i] )
snake_case_ : List[str] = tf.nn.log_softmax(lowercase_ )
snake_case_ : Dict = self.cutoffs[0] + i - 1 # No probability for the head cluster
snake_case_ : Union[str, Any] = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(lowercase_ )
if target is not None:
snake_case_ : Optional[int] = tf.boolean_mask(lowercase_ , lowercase_ )
snake_case_ : Dict = tf.boolean_mask(lowercase_ , lowercase_ )
snake_case_ : Union[str, Any] = self._gather_logprob(lowercase_ , lowercase_ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(lowercase_ , -cur_logprob , shape_list(lowercase_ ) )
snake_case_ : Union[str, Any] = tf.concat(lowercase_ , axis=-1 )
if target is not None:
if return_mean:
snake_case_ : Dict = tf.reduce_mean(lowercase_ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(lowercase_ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(lowercase_ , name=self.name , aggregation='''mean''' if return_mean else '''''' )
return out
| 485
| 0
|
'''simple docstring'''
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,**UpperCamelCase ) -> str:
_UpperCamelCase : Optional[Any] = AutoConfig.from_pretrained(snake_case__ ,**snake_case__ )
_UpperCamelCase : Any = AutoModelForSeqaSeqLM.from_config(snake_case__ )
model.save_pretrained(snake_case__ )
AutoTokenizer.from_pretrained(snake_case__ ).save_pretrained(snake_case__ )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 683
|
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def UpperCamelCase ( snake_case__ : List[Any] ,snake_case__ : List[str] ):
'''simple docstring'''
__snake_case :int = []
for part_id in partition_order:
__snake_case :int = df.where(f'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(snake_case__ ):
expected_row_ids_and_row_dicts.append((f'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def UpperCamelCase ( ):
'''simple docstring'''
__snake_case :List[str] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
__snake_case :Any = spark.range(100 ).repartition(1 )
__snake_case :int = Spark(snake_case__ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def UpperCamelCase ( ):
'''simple docstring'''
__snake_case :Tuple = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
__snake_case :int = spark.range(10 ).repartition(2 )
__snake_case :str = [1, 0]
__snake_case :List[Any] = _generate_iterable_examples(snake_case__ ,snake_case__ ) # Reverse the partitions.
__snake_case :Union[str, Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case__ ,snake_case__ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
__snake_case , __snake_case :Union[str, Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def UpperCamelCase ( ):
'''simple docstring'''
__snake_case :Union[str, Any] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
__snake_case :Tuple = spark.range(10 ).repartition(1 )
__snake_case :Dict = SparkExamplesIterable(snake_case__ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(snake_case__ ):
assert row_id == f'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def UpperCamelCase ( ):
'''simple docstring'''
__snake_case :List[str] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
__snake_case :Union[str, Any] = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("""numpy.random.Generator""" ) as generator_mock:
__snake_case :Dict = lambda snake_case__ : x.reverse()
__snake_case :int = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case__ ,[2, 1, 0] )
__snake_case :Dict = SparkExamplesIterable(snake_case__ ).shuffle_data_sources(snake_case__ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(snake_case__ ):
__snake_case , __snake_case :List[str] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def UpperCamelCase ( ):
'''simple docstring'''
__snake_case :Union[str, Any] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
__snake_case :Tuple = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
__snake_case :List[Any] = SparkExamplesIterable(snake_case__ ).shard_data_sources(worker_id=0 ,num_workers=2 )
assert shard_it_a.n_shards == 2
__snake_case :Dict = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case__ ,[0, 2] )
for i, (row_id, row_dict) in enumerate(snake_case__ ):
__snake_case , __snake_case :Tuple = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
__snake_case :str = SparkExamplesIterable(snake_case__ ).shard_data_sources(worker_id=1 ,num_workers=2 )
assert shard_it_a.n_shards == 2
__snake_case :Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case__ ,[1, 3] )
for i, (row_id, row_dict) in enumerate(snake_case__ ):
__snake_case , __snake_case :Dict = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def UpperCamelCase ( ):
'''simple docstring'''
__snake_case :Union[str, Any] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
__snake_case :Tuple = spark.range(100 ).repartition(1 )
__snake_case :Dict = Spark(snake_case__ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 455
| 0
|
from math import factorial
def _a ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if n < k or k < 0:
raise ValueError('Please enter positive integers for n and k where n >= k' )
return factorial(__SCREAMING_SNAKE_CASE ) // (factorial(__SCREAMING_SNAKE_CASE ) * factorial(n - k ))
if __name__ == "__main__":
print(
'The number of five-card hands possible from a standard',
F"fifty-two card deck is: {combinations(52, 5)}\n",
)
print(
'If a class of 40 students must be arranged into groups of',
F"4 for group projects, there are {combinations(40, 4)} ways",
'to arrange them.\n',
)
print(
'If 10 teams are competing in a Formula One race, there',
F"are {combinations(10, 3)} ways that first, second and",
'third place can be awarded.',
)
| 700
|
import math
from numpy import inf
from scipy.integrate import quad
def _a ( __SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
if num <= 0:
raise ValueError('math domain error' )
return quad(__SCREAMING_SNAKE_CASE , 0 , __SCREAMING_SNAKE_CASE , args=(__SCREAMING_SNAKE_CASE) )[0]
def _a ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
return math.pow(__SCREAMING_SNAKE_CASE , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 585
| 0
|
"""simple docstring"""
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
SCREAMING_SNAKE_CASE_ = '''Usage of script: script_name <size_of_canvas:int>'''
SCREAMING_SNAKE_CASE_ = [0] * 100 + [1] * 10
random.shuffle(choice)
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = [[False for i in range(_lowerCAmelCase )] for j in range(_lowerCAmelCase )]
return canvas
def lowercase (_lowerCAmelCase ):
for i, row in enumerate(_lowerCAmelCase ):
for j, _ in enumerate(_lowerCAmelCase ):
__lowerCAmelCase = bool(random.getrandbits(1 ) )
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = np.array(_lowerCAmelCase )
__lowerCAmelCase = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(_lowerCAmelCase ):
for c, pt in enumerate(_lowerCAmelCase ):
__lowerCAmelCase = __judge_point(
_lowerCAmelCase , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
__lowerCAmelCase = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
__lowerCAmelCase = current_canvas.tolist()
return return_canvas
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = 0
__lowerCAmelCase = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
__lowerCAmelCase = pt
if pt:
if alive < 2:
__lowerCAmelCase = False
elif alive == 2 or alive == 3:
__lowerCAmelCase = True
elif alive > 3:
__lowerCAmelCase = False
else:
if alive == 3:
__lowerCAmelCase = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
SCREAMING_SNAKE_CASE_ = int(sys.argv[1])
# main working structure of this module.
SCREAMING_SNAKE_CASE_ = create_canvas(canvas_size)
seed(c)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = plt.subplots()
fig.show()
SCREAMING_SNAKE_CASE_ = ListedColormap(['''w''', '''k'''])
try:
while True:
SCREAMING_SNAKE_CASE_ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 465
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE_ = {
'''configuration_rag''': ['''RagConfig'''],
'''retrieval_rag''': ['''RagRetriever'''],
'''tokenization_rag''': ['''RagTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''RagModel''',
'''RagPreTrainedModel''',
'''RagSequenceForGeneration''',
'''RagTokenForGeneration''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''TFRagModel''',
'''TFRagPreTrainedModel''',
'''TFRagSequenceForGeneration''',
'''TFRagTokenForGeneration''',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 465
| 1
|
'''simple docstring'''
def lowerCAmelCase_ ( snake_case__ = 100_0000 ):
'''simple docstring'''
A : Optional[Any] = set(range(3 , snake_case__ , 2 ) )
primes.add(2 )
for p in range(3 , snake_case__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , snake_case__ , snake_case__ ) ) )
A : str = [float(snake_case__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(snake_case__ , limit + 1 , snake_case__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 343
|
'''simple docstring'''
import math
import sys
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Dict = ''''''
try:
with open(snake_case__ , '''rb''' ) as binary_file:
A : Optional[Any] = binary_file.read()
for dat in data:
A : Union[str, Any] = F'{dat:08b}'
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Optional[int] = {'''0''': '''0''', '''1''': '''1'''}
A, A : Union[str, Any] = '''''', ''''''
A : str = len(snake_case__ )
for i in range(len(snake_case__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
A : Dict = lexicon[curr_string]
result += last_match_id
A : Any = last_match_id + '''0'''
if math.loga(snake_case__ ).is_integer():
A : Optional[int] = {}
for curr_key in list(snake_case__ ):
A : Any = lexicon.pop(snake_case__ )
A : List[str] = new_lex
A : Dict = last_match_id + '''1'''
index += 1
A : List[str] = ''''''
return result
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : Tuple = 8
try:
with open(snake_case__ , '''wb''' ) as opened_file:
A : List[Any] = [
to_write[i : i + byte_length]
for i in range(0 , len(snake_case__ ) , snake_case__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(snake_case__ , 2 ).to_bytes(1 , byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Optional[int] = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
A : Union[str, Any] = data_bits[counter:]
A : Tuple = data_bits[counter + 1 :]
return data_bits
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : int = read_file_binary(snake_case__ )
A : Dict = remove_prefix(snake_case__ )
A : Union[str, Any] = decompress_data(snake_case__ )
write_file_binary(snake_case__ , snake_case__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 343
| 1
|
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class a ( UpperCamelCase_ ):
__lowercase = (DPMSolverSDEScheduler,)
__lowercase = 10
def lowerCAmelCase_ ( self , **__UpperCamelCase )-> str:
'''simple docstring'''
A__ : Optional[int] ={
'''num_train_timesteps''': 11_00,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''noise_sampler_seed''': 0,
}
config.update(**__UpperCamelCase )
return config
def lowerCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__UpperCamelCase )
def lowerCAmelCase_ ( self )-> int:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=__UpperCamelCase , beta_end=__UpperCamelCase )
def lowerCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__UpperCamelCase )
def lowerCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCamelCase )
def lowerCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
A__ : Optional[int] =self.scheduler_classes[0]
A__ : str =self.get_scheduler_config()
A__ : Optional[int] =scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
A__ : Optional[Any] =self.dummy_model()
A__ : str =self.dummy_sample_deter * scheduler.init_noise_sigma
A__ : List[str] =sample.to(__UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
A__ : str =scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
A__ : Optional[int] =model(__UpperCamelCase , __UpperCamelCase )
A__ : int =scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A__ : Optional[Any] =output.prev_sample
A__ : int =torch.sum(torch.abs(__UpperCamelCase ) )
A__ : Optional[int] =torch.mean(torch.abs(__UpperCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1E-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def lowerCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
A__ : Optional[int] =self.scheduler_classes[0]
A__ : Tuple =self.get_scheduler_config(prediction_type='''v_prediction''' )
A__ : Union[str, Any] =scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
A__ : Optional[int] =self.dummy_model()
A__ : str =self.dummy_sample_deter * scheduler.init_noise_sigma
A__ : Union[str, Any] =sample.to(__UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
A__ : Any =scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
A__ : List[str] =model(__UpperCamelCase , __UpperCamelCase )
A__ : List[str] =scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A__ : List[str] =output.prev_sample
A__ : Tuple =torch.sum(torch.abs(__UpperCamelCase ) )
A__ : int =torch.mean(torch.abs(__UpperCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1E-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1E-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1E-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1E-3
def lowerCAmelCase_ ( self )-> Any:
'''simple docstring'''
A__ : Optional[Any] =self.scheduler_classes[0]
A__ : Tuple =self.get_scheduler_config()
A__ : Tuple =scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=__UpperCamelCase )
A__ : List[str] =self.dummy_model()
A__ : Optional[int] =self.dummy_sample_deter.to(__UpperCamelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
A__ : Dict =scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
A__ : List[Any] =model(__UpperCamelCase , __UpperCamelCase )
A__ : Optional[Any] =scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A__ : int =output.prev_sample
A__ : str =torch.sum(torch.abs(__UpperCamelCase ) )
A__ : Optional[int] =torch.mean(torch.abs(__UpperCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1E-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def lowerCAmelCase_ ( self )-> Any:
'''simple docstring'''
A__ : Any =self.scheduler_classes[0]
A__ : Optional[Any] =self.get_scheduler_config()
A__ : int =scheduler_class(**__UpperCamelCase , use_karras_sigmas=__UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=__UpperCamelCase )
A__ : Optional[int] =self.dummy_model()
A__ : List[Any] =self.dummy_sample_deter.to(__UpperCamelCase ) * scheduler.init_noise_sigma
A__ : Tuple =sample.to(__UpperCamelCase )
for t in scheduler.timesteps:
A__ : List[Any] =scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
A__ : str =model(__UpperCamelCase , __UpperCamelCase )
A__ : Tuple =scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A__ : Union[str, Any] =output.prev_sample
A__ : Optional[int] =torch.sum(torch.abs(__UpperCamelCase ) )
A__ : List[str] =torch.mean(torch.abs(__UpperCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
| 416
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
class a ( UpperCamelCase_ ):
__lowercase = ["""pixel_values"""]
def __init__( self , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = PILImageResampling.BILINEAR , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = 1 / 2_55 , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , )-> None:
'''simple docstring'''
super().__init__(**__UpperCamelCase )
A__ : List[Any] =size if size is not None else {'''shortest_edge''': 2_56}
A__ : Union[str, Any] =get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
A__ : List[Any] =crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
A__ : Tuple =get_size_dict(__UpperCamelCase )
A__ : int =do_resize
A__ : List[str] =size
A__ : str =resample
A__ : Union[str, Any] =do_center_crop
A__ : Dict =crop_size
A__ : int =do_rescale
A__ : Union[str, Any] =rescale_factor
A__ : Optional[Any] =do_normalize
A__ : Dict =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A__ : Any =image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = PILImageResampling.BICUBIC , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
'''simple docstring'''
A__ : Union[str, Any] =get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A__ : Union[str, Any] =get_resize_output_image_size(__UpperCamelCase , size=size['''shortest_edge'''] , default_to_square=__UpperCamelCase )
return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
'''simple docstring'''
A__ : int =get_size_dict(__UpperCamelCase )
return center_crop(__UpperCamelCase , size=(size['''height'''], size['''width''']) , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase )-> np.ndarray:
'''simple docstring'''
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray:
'''simple docstring'''
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCAmelCase_ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , **__UpperCamelCase , )-> int:
'''simple docstring'''
A__ : int =do_resize if do_resize is not None else self.do_resize
A__ : Optional[Any] =size if size is not None else self.size
A__ : Optional[Any] =get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
A__ : Tuple =resample if resample is not None else self.resample
A__ : Optional[int] =do_center_crop if do_center_crop is not None else self.do_center_crop
A__ : int =crop_size if crop_size is not None else self.crop_size
A__ : Optional[Any] =get_size_dict(__UpperCamelCase )
A__ : int =do_rescale if do_rescale is not None else self.do_rescale
A__ : Tuple =rescale_factor if rescale_factor is not None else self.rescale_factor
A__ : Optional[Any] =do_normalize if do_normalize is not None else self.do_normalize
A__ : List[str] =image_mean if image_mean is not None else self.image_mean
A__ : Optional[int] =image_std if image_std is not None else self.image_std
A__ : Optional[Any] =make_list_of_images(__UpperCamelCase )
if not valid_images(__UpperCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
A__ : List[Any] =[to_numpy_array(__UpperCamelCase ) for image in images]
if do_resize:
A__ : List[Any] =[self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase ) for image in images]
if do_center_crop:
A__ : Dict =[self.center_crop(image=__UpperCamelCase , size=__UpperCamelCase ) for image in images]
if do_rescale:
A__ : Dict =[self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase ) for image in images]
if do_normalize:
A__ : Tuple =[self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase ) for image in images]
A__ : List[Any] =[to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase ) for image in images]
A__ : List[str] ={'''pixel_values''': images}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
| 416
| 1
|
'''simple docstring'''
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
UpperCamelCase_ : str = """scheduler_config.json"""
class lowerCamelCase__ ( __lowerCamelCase ):
"""simple docstring"""
UpperCamelCase__ = 1
UpperCamelCase__ = 2
UpperCamelCase__ = 3
UpperCamelCase__ = 4
UpperCamelCase__ = 5
@dataclass
class lowerCamelCase__ ( __lowerCamelCase ):
"""simple docstring"""
UpperCamelCase__ = 42
class lowerCamelCase__ :
"""simple docstring"""
UpperCamelCase__ = SCHEDULER_CONFIG_NAME
UpperCamelCase__ = ['''dtype''']
UpperCamelCase__ = []
UpperCamelCase__ = True
@classmethod
def lowerCAmelCase_ ( cls : Optional[Any] ,a__ : Dict[str, Any] = None ,a__ : Optional[str] = None ,a__ : Union[str, Any]=False ,**a__ : Tuple ,):
a__ , a__ = cls.load_config(
pretrained_model_name_or_path=a__ ,subfolder=a__ ,return_unused_kwargs=a__ ,**a__ ,)
a__ , a__ = cls.from_config(a__ ,return_unused_kwargs=a__ ,**a__ )
if hasattr(a__ ,"create_state" ) and getattr(a__ ,"has_state" ,a__ ):
a__ = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def lowerCAmelCase_ ( self : Any ,a__ : Union[str, os.PathLike] ,a__ : bool = False ,**a__ : Optional[int] ):
self.save_config(save_directory=a__ ,push_to_hub=a__ ,**a__ )
@property
def lowerCAmelCase_ ( self : List[str] ):
return self._get_compatibles()
@classmethod
def lowerCAmelCase_ ( cls : str ):
a__ = list(set([cls.__name__] + cls._compatibles ) )
a__ = importlib.import_module(__name__.split("." )[0] )
a__ = [
getattr(a__ ,a__ ) for c in compatible_classes_str if hasattr(a__ ,a__ )
]
return compatible_classes
def _lowerCAmelCase (_lowercase , _lowercase ):
"""simple docstring"""
assert len(_lowercase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(_lowercase ) - x.ndim) ) , _lowercase )
def _lowerCAmelCase (_lowercase , _lowercase=0.999 , _lowercase=jnp.floataa ):
"""simple docstring"""
def alpha_bar(_lowercase ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
a__ = []
for i in range(_lowercase ):
a__ = i / num_diffusion_timesteps
a__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(_lowercase ) / alpha_bar(_lowercase ) , _lowercase ) )
return jnp.array(_lowercase , dtype=_lowercase )
@flax.struct.dataclass
class lowerCamelCase__ :
"""simple docstring"""
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
@classmethod
def lowerCAmelCase_ ( cls : Tuple ,a__ : List[Any] ):
a__ = scheduler.config
if config.trained_betas is not None:
a__ = jnp.asarray(config.trained_betas ,dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
a__ = jnp.linspace(config.beta_start ,config.beta_end ,config.num_train_timesteps ,dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
a__ = (
jnp.linspace(
config.beta_start**0.5 ,config.beta_end**0.5 ,config.num_train_timesteps ,dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
a__ = betas_for_alpha_bar(config.num_train_timesteps ,dtype=scheduler.dtype )
else:
raise NotImplementedError(
f'beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}' )
a__ = 1.0 - betas
a__ = jnp.cumprod(a__ ,axis=0 )
return cls(
alphas=a__ ,betas=a__ ,alphas_cumprod=a__ ,)
def _lowerCAmelCase (_lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
a__ = state.alphas_cumprod
a__ = alphas_cumprod[timesteps] ** 0.5
a__ = sqrt_alpha_prod.flatten()
a__ = broadcast_to_shape_from_left(_lowercase , original_samples.shape )
a__ = (1 - alphas_cumprod[timesteps]) ** 0.5
a__ = sqrt_one_minus_alpha_prod.flatten()
a__ = broadcast_to_shape_from_left(_lowercase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def _lowerCAmelCase (_lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
a__ , a__ = get_sqrt_alpha_prod(_lowercase , _lowercase , _lowercase , _lowercase )
a__ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def _lowerCAmelCase (_lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
a__ , a__ = get_sqrt_alpha_prod(_lowercase , _lowercase , _lowercase , _lowercase )
a__ = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 700
|
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCamelCase_ : Optional[Any] = {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""",
}
class lowerCamelCase__ ( __lowerCamelCase ):
"""simple docstring"""
UpperCamelCase__ = '''mvp'''
UpperCamelCase__ = ['''past_key_values''']
UpperCamelCase__ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Union[str, Any] ,a__ : Dict=5_02_67 ,a__ : Tuple=10_24 ,a__ : str=12 ,a__ : Any=40_96 ,a__ : List[Any]=16 ,a__ : Dict=12 ,a__ : Any=40_96 ,a__ : Optional[int]=16 ,a__ : Optional[int]=0.0 ,a__ : List[str]=0.0 ,a__ : Dict="gelu" ,a__ : int=10_24 ,a__ : int=0.1 ,a__ : Any=0.0 ,a__ : Optional[Any]=0.0 ,a__ : List[str]=0.02 ,a__ : Dict=0.0 ,a__ : str=False ,a__ : Any=True ,a__ : Union[str, Any]=1 ,a__ : str=0 ,a__ : List[Any]=2 ,a__ : List[Any]=True ,a__ : Optional[Any]=2 ,a__ : Optional[int]=2 ,a__ : Union[str, Any]=False ,a__ : int=1_00 ,a__ : List[str]=8_00 ,**a__ : Union[str, Any] ,):
a__ = vocab_size
a__ = max_position_embeddings
a__ = d_model
a__ = encoder_ffn_dim
a__ = encoder_layers
a__ = encoder_attention_heads
a__ = decoder_ffn_dim
a__ = decoder_layers
a__ = decoder_attention_heads
a__ = dropout
a__ = attention_dropout
a__ = activation_dropout
a__ = activation_function
a__ = init_std
a__ = encoder_layerdrop
a__ = decoder_layerdrop
a__ = classifier_dropout
a__ = use_cache
a__ = encoder_layers
a__ = scale_embedding # scale factor will be sqrt(d_model) if True
a__ = use_prompt
a__ = prompt_length
a__ = prompt_mid_dim
super().__init__(
pad_token_id=a__ ,bos_token_id=a__ ,eos_token_id=a__ ,is_encoder_decoder=a__ ,decoder_start_token_id=a__ ,forced_eos_token_id=a__ ,**a__ ,)
if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" ,a__ ):
a__ = self.bos_token_id
warnings.warn(
f'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
"The config can simply be saved and uploaded again to be fixed." )
| 394
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A__: Dict = {
'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'],
'processing_layoutlmv2': ['LayoutLMv2Processor'],
'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Optional[int] = ['LayoutLMv2TokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: str = ['LayoutLMv2FeatureExtractor']
A__: Any = ['LayoutLMv2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Union[str, Any] = [
'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv2ForQuestionAnswering',
'LayoutLMv2ForSequenceClassification',
'LayoutLMv2ForTokenClassification',
'LayoutLMv2Layer',
'LayoutLMv2Model',
'LayoutLMv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
A__: Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 694
|
def UpperCAmelCase__ ( __magic_name__ : int = 1_00 ):
'''simple docstring'''
lowerCAmelCase : Dict = set()
lowerCAmelCase : Optional[int] = 0
lowerCAmelCase : List[Any] = n + 1 # maximum limit
for a in range(2 , __magic_name__ ):
for b in range(2 , __magic_name__ ):
lowerCAmelCase : Tuple = a**b # calculates the current power
collect_powers.add(__magic_name__ ) # adds the result to the set
return len(__magic_name__ )
if __name__ == "__main__":
print('Number of terms ', solution(int(str(input()).strip())))
| 348
| 0
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _A ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_snake_case : Union[str, Any] = TextToVideoSDPipeline
_snake_case : str = TEXT_TO_IMAGE_PARAMS
_snake_case : str = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
_snake_case : Tuple = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , )
__lowercase = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
__lowercase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__lowercase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="gelu" , projection_dim=512 , )
__lowercase = CLIPTextModel(_lowercase )
__lowercase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__lowercase = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def _snake_case ( self : int , lowerCamelCase : List[str] , lowerCamelCase : int=0 ):
'''simple docstring'''
if str(_lowercase ).startswith("mps" ):
__lowercase = torch.manual_seed(_lowercase )
else:
__lowercase = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__lowercase = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def _snake_case ( self : int ):
'''simple docstring'''
__lowercase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowercase = self.get_dummy_components()
__lowercase = TextToVideoSDPipeline(**_lowercase )
__lowercase = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
__lowercase = self.get_dummy_inputs(_lowercase )
__lowercase = 'np'
__lowercase = sd_pipe(**_lowercase ).frames
__lowercase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
__lowercase = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self : Any ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_lowercase , expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _snake_case ( self : List[str] ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_lowercase , expected_max_diff=1e-2 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def _snake_case ( self : Any ):
'''simple docstring'''
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def _snake_case ( self : Any ):
'''simple docstring'''
pass
def _snake_case ( self : List[str] ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class _A ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Any ):
'''simple docstring'''
__lowercase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy" )
__lowercase = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
__lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__lowercase = pipe.to("cuda" )
__lowercase = 'Spiderman is surfing'
__lowercase = torch.Generator(device="cpu" ).manual_seed(0 )
__lowercase = pipe(_lowercase , generator=_lowercase , num_inference_steps=25 , output_type="pt" ).frames
__lowercase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy" )
__lowercase = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
__lowercase = pipe.to("cuda" )
__lowercase = 'Spiderman is surfing'
__lowercase = torch.Generator(device="cpu" ).manual_seed(0 )
__lowercase = pipe(_lowercase , generator=_lowercase , num_inference_steps=2 , output_type="pt" ).frames
__lowercase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 707
|
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
snake_case__ : Optional[Any] = logging.getLogger()
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = "\n".join(_SCREAMING_SNAKE_CASE )
Path(_SCREAMING_SNAKE_CASE ).open("w" ).writelines(_SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = """patrickvonplaten/t5-tiny-random"""
snake_case__ : int = """sshleifer/bart-tiny-random"""
snake_case__ : Union[str, Any] = """sshleifer/tiny-mbart"""
snake_case__ : List[str] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class _A ( _lowercase ):
'''simple docstring'''
def _snake_case ( self : str , lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowercase = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
__lowercase = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
__lowercase = [" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."]
_dump_articles(lowerCamelCase , lowerCamelCase )
__lowercase = str(Path(self.get_auto_remove_tmp_dir() ) / "scores.json" )
__lowercase = "translation_en_to_de" if model == T5_TINY else "summarization"
__lowercase = f"""
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
""".split()
with patch.object(lowerCamelCase , "argv" , lowerCamelCase ):
run_generate()
assert Path(lowerCamelCase ).exists()
# os.remove(Path(output_file_name))
def _snake_case ( self : Dict ):
'''simple docstring'''
self.run_eval_tester(lowerCamelCase )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def _snake_case ( self : Optional[Any] , lowerCamelCase : str ):
'''simple docstring'''
self.run_eval_tester(lowerCamelCase )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def _snake_case ( self : Optional[Any] , lowerCamelCase : Optional[int] ):
'''simple docstring'''
__lowercase = Path(self.get_auto_remove_tmp_dir() ) / "utest_input.source"
__lowercase = input_file_name.parent / "utest_output.txt"
assert not output_file_name.exists()
__lowercase = {
"en": ["Machine learning is great, isn't it?", "I like to eat bananas", "Tomorrow is another great day!"],
"de": [
"Maschinelles Lernen ist großartig, oder?",
"Ich esse gerne Bananen",
"Morgen ist wieder ein toller Tag!",
],
}
__lowercase = Path(self.get_auto_remove_tmp_dir() )
__lowercase = str(tmp_dir / "scores.json" )
__lowercase = str(tmp_dir / "val.target" )
_dump_articles(lowerCamelCase , text["en"] )
_dump_articles(lowerCamelCase , text["de"] )
__lowercase = "translation_en_to_de" if model == T5_TINY else "summarization"
__lowercase = f"""
run_eval_search.py
{model}
{str(lowerCamelCase )}
{str(lowerCamelCase )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
""".split()
testargs.extend(["--search", "num_beams=1:2 length_penalty=0.9:1.0"] )
with patch.object(lowerCamelCase , "argv" , lowerCamelCase ):
with CaptureStdout() as cs:
run_search()
__lowercase = [" num_beams | length_penalty", model, "Best score args"]
__lowercase = ["Info"]
if "translation" in task:
expected_strings.append("bleu" )
else:
expected_strings.extend(lowerCamelCase )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(lowerCamelCase ).exists()
os.remove(Path(lowerCamelCase ) )
| 655
| 0
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = LongformerTokenizer
_a = True
_a = LongformerTokenizerFast
_a = True
def a__ ( self ) -> str:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_A : str = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_A : Dict = dict(zip(_a , range(len(_a ) ) ) )
_A : Any = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_A : str = {"""unk_token""": """<unk>"""}
_A : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_A : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_a ) )
def a__ ( self , **_a ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , **_a ) -> int:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_a )
def a__ ( self , _a ) -> List[Any]:
_A : Any = """lower newer"""
_A : List[Any] = """lower newer"""
return input_text, output_text
def a__ ( self ) -> Optional[Any]:
_A : List[Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
_A : int = """lower newer"""
_A : int = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
_A : Optional[int] = tokenizer.tokenize(_a ) # , add_prefix_space=True)
self.assertListEqual(_a , _a )
_A : List[Any] = tokens + [tokenizer.unk_token]
_A : Optional[Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a )
def a__ ( self ) -> Tuple:
_A : List[str] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=_a ) , [0, 3_1414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=_a ) , [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] , )
@slow
def a__ ( self ) -> List[str]:
_A : int = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
_A : str = tokenizer.encode("""sequence builders""" , add_special_tokens=_a )
_A : Dict = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_a )
_A : Union[str, Any] = tokenizer.encode(
"""sequence builders""" , add_special_tokens=_a , add_prefix_space=_a )
_A : Tuple = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=_a , add_prefix_space=_a )
_A : int = tokenizer.build_inputs_with_special_tokens(_a )
_A : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_a , _a )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def a__ ( self ) -> List[str]:
_A : Optional[Any] = self.get_tokenizer()
_A : Union[str, Any] = """Encode this sequence."""
_A : List[str] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
_A : Optional[int] = tokenizer.encode(_a , add_special_tokens=_a , add_prefix_space=_a )
_A : List[str] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(_a , _a )
_A : Optional[int] = tokenizer.encode(_a , add_special_tokens=_a , add_prefix_space=_a )
_A : Any = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(_a , _a )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
_A : str = tokenizer.encode(_a , add_special_tokens=_a )
_A : Tuple = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(_a , _a )
# Testing spaces after special tokens
_A : Dict = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(_a , lstrip=_a , rstrip=_a )} ) # mask token has a left space
_A : int = tokenizer.convert_tokens_to_ids(_a )
_A : Union[str, Any] = """Encode <mask> sequence"""
_A : Dict = """Encode <mask>sequence"""
_A : Tuple = tokenizer.encode(_a )
_A : Any = encoded.index(_a )
_A : Tuple = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(_a , _a )
_A : Any = tokenizer.encode(_a )
_A : List[str] = encoded.index(_a )
_A : Any = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(_a , _a )
def a__ ( self ) -> Any:
pass
def a__ ( self ) -> Optional[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_A : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(_a , **_a )
_A : Optional[int] = self.tokenizer_class.from_pretrained(_a , **_a )
_A : str = """A, <mask> AllenNLP sentence."""
_A : Optional[Any] = tokenizer_r.encode_plus(_a , add_special_tokens=_a , return_token_type_ids=_a )
_A : Union[str, Any] = tokenizer_p.encode_plus(_a , add_special_tokens=_a , return_token_type_ids=_a )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
_A : Tuple = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
_A : Any = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
_a , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
_a , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def a__ ( self ) -> Optional[int]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
_A : List[str] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=_a , add_prefix_space=_a , trim_offsets=_a )
_A : Dict = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
_A : Any = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , _a )
self.assertEqual(post_processor_state["""add_prefix_space"""] , _a )
self.assertEqual(post_processor_state["""trim_offsets"""] , _a )
def a__ ( self ) -> List[str]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_A : List[str] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
_A : Optional[int] = F'''{text_of_1_token} {text_of_1_token}'''
_A : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , add_prefix_space=_a , trim_offsets=_a )
_A : Tuple = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_a ) + 1, len(_a ) + 1 + len(_a )) , )
_A : Tuple = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , add_prefix_space=_a , trim_offsets=_a )
_A : Tuple = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_a ) + 1, len(_a ) + 1 + len(_a )) , )
_A : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , add_prefix_space=_a , trim_offsets=_a )
_A : str = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_a ), len(_a ) + 1 + len(_a )) , )
_A : Any = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , add_prefix_space=_a , trim_offsets=_a )
_A : Dict = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_a ), len(_a ) + 1 + len(_a )) , )
_A : List[str] = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
_A : int = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , add_prefix_space=_a , trim_offsets=_a )
_A : Tuple = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_a ) + 1, 1 + len(_a ) + 1 + len(_a )) , )
_A : str = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , add_prefix_space=_a , trim_offsets=_a )
_A : List[str] = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_a ), 1 + len(_a ) + 1 + len(_a )) , )
_A : Optional[int] = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , add_prefix_space=_a , trim_offsets=_a )
_A : Optional[int] = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_a ), 1 + len(_a ) + 1 + len(_a )) , )
| 307
|
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class lowercase ( UpperCamelCase__ ):
_a = 0
_a = False
_a = 3.0
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> Tuple:
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"""a""": 2} )
self.assertDictEqual(MockClass(a=2 , b=_a ).to_kwargs() , {"""a""": 2, """b""": True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"""a""": 2, """c""": 2.25} )
@require_cuda
def a__ ( self ) -> str:
# If no defaults are changed, `to_kwargs` returns an empty dict.
_A : Dict = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
_A : List[str] = Accelerator(mixed_precision="""fp16""" , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
_A : List[Any] = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , _a )
@require_multi_gpu
def a__ ( self ) -> List[Any]:
_A : int = ["""torchrun""", F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(_a , env=os.environ.copy() )
if __name__ == "__main__":
_snake_case = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
_snake_case = Accelerator(kwargs_handlers=[ddp_scaler])
_snake_case = torch.nn.Linear(100, 200)
_snake_case = accelerator.prepare(model)
# Check the values changed in kwargs
_snake_case = ""
_snake_case = model.bucket_bytes_cap // (1024 * 1024)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 307
| 1
|
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class __snake_case :
@staticmethod
def lowerCamelCase_ ( *lowercase , **lowercase) -> str:
'''simple docstring'''
pass
def __a ( _SCREAMING_SNAKE_CASE ) ->str:
a__: Union[str, Any] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __snake_case ( unittest.TestCase ):
a__ = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase) -> Tuple:
'''simple docstring'''
a__: Optional[Any] = DepthEstimationPipeline(model=lowercase , image_processor=lowercase)
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase_ ( self , lowercase , lowercase) -> Optional[Any]:
'''simple docstring'''
a__: int = depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png')
self.assertEqual({'predicted_depth': ANY(torch.Tensor), 'depth': ANY(Image.Image)} , lowercase)
import datasets
a__: int = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test')
a__: int = depth_estimator(
[
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png'),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
])
self.assertEqual(
[
{'predicted_depth': ANY(torch.Tensor), 'depth': ANY(Image.Image)},
{'predicted_depth': ANY(torch.Tensor), 'depth': ANY(Image.Image)},
{'predicted_depth': ANY(torch.Tensor), 'depth': ANY(Image.Image)},
{'predicted_depth': ANY(torch.Tensor), 'depth': ANY(Image.Image)},
{'predicted_depth': ANY(torch.Tensor), 'depth': ANY(Image.Image)},
] , lowercase , )
@require_tf
@unittest.skip('Depth estimation is not implemented in TF')
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
pass
@slow
@require_torch
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: Optional[Any] = 'Intel/dpt-large'
a__: Optional[Any] = pipeline('depth-estimation' , model=lowercase)
a__: List[Any] = depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg')
a__: List[Any] = hashimage(outputs['depth'])
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item()) , 29.304)
self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item()) , 2.662)
@require_torch
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT')
| 700
|
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 10**-10 ) ->float:
a__: int = a
while True:
a__: Optional[Any] = Decimal(_SCREAMING_SNAKE_CASE ) - (
Decimal(eval(_SCREAMING_SNAKE_CASE ) ) / Decimal(eval(str(diff(_SCREAMING_SNAKE_CASE ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(_SCREAMING_SNAKE_CASE ) ) < precision: # noqa: S307
return float(_SCREAMING_SNAKE_CASE )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}")
# Find root of polynomial
print(f"The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}")
# Find Square Root of 5
print(f"The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}")
# Exponential Roots
print(f"The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}")
| 217
| 0
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowercase__ = {"""LayoutLMv2Config""", """LayoutLMv3Config"""}
@is_pipeline_test
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
a_ : List[str] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
a_ : List[str] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
a_ : int = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
a_ : List[Any] = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def lowerCamelCase ( self : Union[str, Any] , a_ : Union[str, Any] , a_ : List[Any] , a_ : List[str] ):
lowerCAmelCase_ : Any = ZeroShotClassificationPipeline(
model=a_ , tokenizer=a_ , candidate_labels=["polics", "health"] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def lowerCamelCase ( self : Any , a_ : Tuple , a_ : int ):
lowerCAmelCase_ : Optional[int] = classifier("Who are you voting for in 2020?" , candidate_labels="politics" )
self.assertEqual(a_ , {"sequence": ANY(a_ ), "labels": [ANY(a_ )], "scores": [ANY(a_ )]} )
# No kwarg
lowerCAmelCase_ : Tuple = classifier("Who are you voting for in 2020?" , ["politics"] )
self.assertEqual(a_ , {"sequence": ANY(a_ ), "labels": [ANY(a_ )], "scores": [ANY(a_ )]} )
lowerCAmelCase_ : str = classifier("Who are you voting for in 2020?" , candidate_labels=["politics"] )
self.assertEqual(a_ , {"sequence": ANY(a_ ), "labels": [ANY(a_ )], "scores": [ANY(a_ )]} )
lowerCAmelCase_ : str = classifier("Who are you voting for in 2020?" , candidate_labels="politics, public health" )
self.assertEqual(
a_ , {"sequence": ANY(a_ ), "labels": [ANY(a_ ), ANY(a_ )], "scores": [ANY(a_ ), ANY(a_ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 )
lowerCAmelCase_ : List[str] = classifier("Who are you voting for in 2020?" , candidate_labels=["politics", "public health"] )
self.assertEqual(
a_ , {"sequence": ANY(a_ ), "labels": [ANY(a_ ), ANY(a_ )], "scores": [ANY(a_ ), ANY(a_ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 )
lowerCAmelCase_ : int = classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="This text is about {}" )
self.assertEqual(a_ , {"sequence": ANY(a_ ), "labels": [ANY(a_ )], "scores": [ANY(a_ )]} )
# https://github.com/huggingface/transformers/issues/13846
lowerCAmelCase_ : Optional[Any] = classifier(["I am happy"] , ["positive", "negative"] )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "labels": [ANY(a_ ), ANY(a_ )], "scores": [ANY(a_ ), ANY(a_ )]}
for i in range(1 )
] , )
lowerCAmelCase_ : Optional[int] = classifier(["I am happy", "I am sad"] , ["positive", "negative"] )
self.assertEqual(
a_ , [
{"sequence": ANY(a_ ), "labels": [ANY(a_ ), ANY(a_ )], "scores": [ANY(a_ ), ANY(a_ )]}
for i in range(2 )
] , )
with self.assertRaises(a_ ):
classifier("" , candidate_labels="politics" )
with self.assertRaises(a_ ):
classifier(a_ , candidate_labels="politics" )
with self.assertRaises(a_ ):
classifier("Who are you voting for in 2020?" , candidate_labels="" )
with self.assertRaises(a_ ):
classifier("Who are you voting for in 2020?" , candidate_labels=a_ )
with self.assertRaises(a_ ):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="Not formatting template" , )
with self.assertRaises(a_ ):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template=a_ , )
self.run_entailment_id(a_ )
def lowerCamelCase ( self : Union[str, Any] , a_ : Pipeline ):
lowerCAmelCase_ : Optional[Any] = zero_shot_classifier.model.config
lowerCAmelCase_ : List[Any] = config.labelaid
lowerCAmelCase_ : Tuple = zero_shot_classifier.entailment_id
lowerCAmelCase_ : List[str] = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
lowerCAmelCase_ : Any = {"entailment": 0, "neutral": 1, "contradiction": 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
lowerCAmelCase_ : Optional[Any] = {"ENTAIL": 0, "NON-ENTAIL": 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
lowerCAmelCase_ : Union[str, Any] = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
lowerCAmelCase_ : Any = original_labelaid
self.assertEqual(a_ , zero_shot_classifier.entailment_id )
@require_torch
def lowerCamelCase ( self : List[Any] ):
lowerCAmelCase_ : int = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"Who are you voting for in 2020?" * 1_00 , candidate_labels=["politics", "public health", "science"] )
@require_torch
def lowerCamelCase ( self : Tuple ):
lowerCAmelCase_ : Any = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
lowerCAmelCase_ : Optional[int] = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(a_ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
} , )
@require_tf
def lowerCamelCase ( self : str ):
lowerCAmelCase_ : int = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="tf" , )
lowerCAmelCase_ : Union[str, Any] = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(a_ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def lowerCamelCase ( self : Any ):
lowerCAmelCase_ : Optional[int] = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="pt" )
lowerCAmelCase_ : Any = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(a_ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.976, 0.015, 0.009],
} , )
lowerCAmelCase_ : int = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=a_ , )
self.assertEqual(
nested_simplify(a_ ) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def lowerCamelCase ( self : Any ):
lowerCAmelCase_ : Tuple = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="tf" )
lowerCAmelCase_ : Dict = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(a_ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.976, 0.015, 0.009],
} , )
lowerCAmelCase_ : Dict = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=a_ , )
self.assertEqual(
nested_simplify(a_ ) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.817, 0.713, 0.018, 0.018],
} , )
| 610
|
"""simple docstring"""
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __lowerCamelCase ( __UpperCamelCase ) -> int:
"""simple docstring"""
lowerCAmelCase_ : int = torch.exp(__UpperCamelCase )
lowerCAmelCase_ : Dict = torch.sum(__UpperCamelCase , dim=1 ) # sum of exp(x_i)
lowerCAmelCase_ : List[Any] = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(__UpperCamelCase ) - B / A
class __lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , a_ : Dict ):
super().__init__()
lowerCAmelCase_ : Dict = config.output_attentions
lowerCAmelCase_ : Any = config.output_hidden_states
lowerCAmelCase_ : Dict = nn.ModuleList([BertLayer(a_ ) for _ in range(config.num_hidden_layers )] )
lowerCAmelCase_ : int = nn.ModuleList([BertHighway(a_ ) for _ in range(config.num_hidden_layers )] )
lowerCAmelCase_ : Optional[Any] = [-1 for _ in range(config.num_hidden_layers )]
def lowerCamelCase ( self : List[Any] , a_ : Tuple ):
if (type(a_ ) is float) or (type(a_ ) is int):
for i in range(len(self.early_exit_entropy ) ):
lowerCAmelCase_ : Dict = x
else:
lowerCAmelCase_ : Optional[Any] = x
def lowerCamelCase ( self : List[str] , a_ : str ):
lowerCAmelCase_ : Any = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def lowerCamelCase ( self : Any , a_ : List[str] , a_ : Tuple=None , a_ : Optional[Any]=None , a_ : Tuple=None , a_ : Optional[Any]=None , ):
lowerCAmelCase_ : str = ()
lowerCAmelCase_ : int = ()
lowerCAmelCase_ : Optional[Any] = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
lowerCAmelCase_ : List[str] = all_hidden_states + (hidden_states,)
lowerCAmelCase_ : Optional[int] = layer_module(
a_ , a_ , head_mask[i] , a_ , a_ )
lowerCAmelCase_ : str = layer_outputs[0]
if self.output_attentions:
lowerCAmelCase_ : Tuple = all_attentions + (layer_outputs[1],)
lowerCAmelCase_ : Optional[int] = (hidden_states,)
if self.output_hidden_states:
lowerCAmelCase_ : Any = current_outputs + (all_hidden_states,)
if self.output_attentions:
lowerCAmelCase_ : Optional[Any] = current_outputs + (all_attentions,)
lowerCAmelCase_ : Tuple = self.highway[i](a_ )
# logits, pooled_output
if not self.training:
lowerCAmelCase_ : Union[str, Any] = highway_exit[0]
lowerCAmelCase_ : int = entropy(a_ )
lowerCAmelCase_ : str = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
lowerCAmelCase_ : List[str] = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
lowerCAmelCase_ : List[Any] = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(a_ , i + 1 )
else:
lowerCAmelCase_ : Optional[Any] = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
lowerCAmelCase_ : Any = all_hidden_states + (hidden_states,)
lowerCAmelCase_ : Union[str, Any] = (hidden_states,)
if self.output_hidden_states:
lowerCAmelCase_ : Any = outputs + (all_hidden_states,)
if self.output_attentions:
lowerCAmelCase_ : Dict = outputs + (all_attentions,)
lowerCAmelCase_ : List[str] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"""The Bert Model transformer with early exiting (DeeBERT). """ , A__ , )
class __lowerCamelCase ( A__ ):
'''simple docstring'''
def __init__( self : Optional[Any] , a_ : Optional[int] ):
super().__init__(a_ )
lowerCAmelCase_ : Any = config
lowerCAmelCase_ : int = BertEmbeddings(a_ )
lowerCAmelCase_ : List[Any] = DeeBertEncoder(a_ )
lowerCAmelCase_ : Optional[Any] = BertPooler(a_ )
self.init_weights()
def lowerCamelCase ( self : Tuple ):
self.encoder.init_highway_pooler(self.pooler )
def lowerCamelCase ( self : Optional[int] ):
return self.embeddings.word_embeddings
def lowerCamelCase ( self : int , a_ : Union[str, Any] ):
lowerCAmelCase_ : Dict = value
def lowerCamelCase ( self : Optional[int] , a_ : List[Any] ):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(a_ )
@add_start_docstrings_to_model_forward(a_ )
def lowerCamelCase ( self : str , a_ : Tuple=None , a_ : Union[str, Any]=None , a_ : str=None , a_ : Dict=None , a_ : List[str]=None , a_ : Tuple=None , a_ : List[Any]=None , a_ : Any=None , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
lowerCAmelCase_ : List[str] = input_ids.size()
elif inputs_embeds is not None:
lowerCAmelCase_ : Optional[Any] = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
lowerCAmelCase_ : Union[str, Any] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowerCAmelCase_ : int = torch.ones(a_ , device=a_ )
if encoder_attention_mask is None:
lowerCAmelCase_ : Any = torch.ones(a_ , device=a_ )
if token_type_ids is None:
lowerCAmelCase_ : Tuple = torch.zeros(a_ , dtype=torch.long , device=a_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowerCAmelCase_ : torch.Tensor = self.get_extended_attention_mask(a_ , a_ , a_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
lowerCAmelCase_ : Dict = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
lowerCAmelCase_ : List[Any] = encoder_attention_mask[:, None, None, :]
lowerCAmelCase_ : str = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
lowerCAmelCase_ : Union[str, Any] = (1.0 - encoder_extended_attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowerCAmelCase_ : Tuple = self.get_head_mask(a_ , self.config.num_hidden_layers )
lowerCAmelCase_ : Tuple = self.embeddings(
input_ids=a_ , position_ids=a_ , token_type_ids=a_ , inputs_embeds=a_ )
lowerCAmelCase_ : Dict = self.encoder(
a_ , attention_mask=a_ , head_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , )
lowerCAmelCase_ : Dict = encoder_outputs[0]
lowerCAmelCase_ : int = self.pooler(a_ )
lowerCAmelCase_ : Optional[Any] = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class __lowerCamelCase ( A__ ):
'''simple docstring'''
def __init__( self : int , a_ : Optional[int] , a_ : List[str] ):
lowerCAmelCase_ : Optional[Any] = message
lowerCAmelCase_ : Dict = exit_layer # start from 1!
class __lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , a_ : Union[str, Any] ):
super().__init__()
lowerCAmelCase_ : Optional[int] = BertPooler(a_ )
lowerCAmelCase_ : Union[str, Any] = nn.Dropout(config.hidden_dropout_prob )
lowerCAmelCase_ : Optional[Any] = nn.Linear(config.hidden_size , config.num_labels )
def lowerCamelCase ( self : List[Any] , a_ : int ):
# Pooler
lowerCAmelCase_ : Any = encoder_outputs[0]
lowerCAmelCase_ : List[Any] = self.pooler(a_ )
# "return" pooler_output
# BertModel
lowerCAmelCase_ : str = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
lowerCAmelCase_ : Union[str, Any] = bmodel_output[1]
lowerCAmelCase_ : Tuple = self.dropout(a_ )
lowerCAmelCase_ : str = self.classifier(a_ )
return logits, pooled_output
@add_start_docstrings(
"""Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. """ , A__ , )
class __lowerCamelCase ( A__ ):
'''simple docstring'''
def __init__( self : List[str] , a_ : Optional[Any] ):
super().__init__(a_ )
lowerCAmelCase_ : Dict = config.num_labels
lowerCAmelCase_ : List[Any] = config.num_hidden_layers
lowerCAmelCase_ : str = DeeBertModel(a_ )
lowerCAmelCase_ : str = nn.Dropout(config.hidden_dropout_prob )
lowerCAmelCase_ : Tuple = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(a_ )
def lowerCamelCase ( self : List[str] , a_ : Union[str, Any]=None , a_ : Any=None , a_ : List[Any]=None , a_ : Optional[int]=None , a_ : List[str]=None , a_ : Dict=None , a_ : Any=None , a_ : Optional[int]=-1 , a_ : Union[str, Any]=False , ):
lowerCAmelCase_ : Dict = self.num_layers
try:
lowerCAmelCase_ : Optional[int] = self.bert(
a_ , attention_mask=a_ , token_type_ids=a_ , position_ids=a_ , head_mask=a_ , inputs_embeds=a_ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
lowerCAmelCase_ : Union[str, Any] = outputs[1]
lowerCAmelCase_ : List[Any] = self.dropout(a_ )
lowerCAmelCase_ : Dict = self.classifier(a_ )
lowerCAmelCase_ : int = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
lowerCAmelCase_ : Optional[int] = e.message
lowerCAmelCase_ : str = e.exit_layer
lowerCAmelCase_ : Union[str, Any] = outputs[0]
if not self.training:
lowerCAmelCase_ : List[str] = entropy(a_ )
lowerCAmelCase_ : str = []
lowerCAmelCase_ : List[str] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
lowerCAmelCase_ : int = MSELoss()
lowerCAmelCase_ : Union[str, Any] = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
lowerCAmelCase_ : int = CrossEntropyLoss()
lowerCAmelCase_ : Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
lowerCAmelCase_ : Any = []
for highway_exit in outputs[-1]:
lowerCAmelCase_ : Optional[Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(a_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
lowerCAmelCase_ : Tuple = MSELoss()
lowerCAmelCase_ : Dict = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
lowerCAmelCase_ : Optional[Any] = CrossEntropyLoss()
lowerCAmelCase_ : List[str] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(a_ )
if train_highway:
lowerCAmelCase_ : Union[str, Any] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
lowerCAmelCase_ : List[str] = (loss,) + outputs
if not self.training:
lowerCAmelCase_ : List[str] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
lowerCAmelCase_ : Optional[int] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 610
| 1
|
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class _a ( unittest.TestCase):
"""simple docstring"""
def lowercase__ ( self : str )->str:
_UpperCAmelCase = 0
def lowercase__ ( self : int )->List[Any]:
_UpperCAmelCase = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : Dict )->List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = Path(__UpperCamelCase ) / '''preprocessor_config.json'''
_UpperCAmelCase = Path(__UpperCamelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__UpperCamelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__UpperCamelCase , '''w''' ) )
_UpperCAmelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : int )->List[Any]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = Path(__UpperCamelCase ) / '''preprocessor_config.json'''
_UpperCAmelCase = Path(__UpperCamelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__UpperCamelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__UpperCamelCase , '''w''' ) )
_UpperCAmelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : Optional[Any] )->Dict:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = CLIPConfig()
# Create a dummy config file with image_proceesor_type
_UpperCAmelCase = Path(__UpperCamelCase ) / '''preprocessor_config.json'''
_UpperCAmelCase = Path(__UpperCamelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__UpperCamelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__UpperCamelCase , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
_UpperCAmelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase ).to_dict()
config_dict.pop('''image_processor_type''' )
_UpperCAmelCase = CLIPImageProcessor(**__UpperCamelCase )
# save in new folder
model_config.save_pretrained(__UpperCamelCase )
config.save_pretrained(__UpperCamelCase )
_UpperCAmelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase )
# make sure private variable is not incorrectly saved
_UpperCAmelCase = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : int )->Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = Path(__UpperCamelCase ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__UpperCamelCase , '''w''' ) , )
_UpperCAmelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def lowercase__ ( self : List[Any] )->Optional[int]:
with self.assertRaisesRegex(
__UpperCamelCase , '''clip-base is not a local folder and is not a valid model identifier''' ):
_UpperCAmelCase = AutoImageProcessor.from_pretrained('''clip-base''' )
def lowercase__ ( self : Any )->Optional[int]:
with self.assertRaisesRegex(
__UpperCamelCase , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
_UpperCAmelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase , revision='''aaaaaa''' )
def lowercase__ ( self : str )->str:
with self.assertRaisesRegex(
__UpperCamelCase , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
_UpperCAmelCase = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def lowercase__ ( self : Optional[int] )->Optional[int]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__UpperCamelCase ):
_UpperCAmelCase = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__UpperCamelCase ):
_UpperCAmelCase = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__UpperCamelCase )
_UpperCAmelCase = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__UpperCamelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__UpperCamelCase )
_UpperCAmelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase , trust_remote_code=__UpperCamelCase )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def lowercase__ ( self : str )->List[str]:
try:
AutoConfig.register('''custom''' , __UpperCamelCase )
AutoImageProcessor.register(__UpperCamelCase , __UpperCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__UpperCamelCase ):
AutoImageProcessor.register(__UpperCamelCase , __UpperCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = Path(__UpperCamelCase ) / '''preprocessor_config.json'''
_UpperCAmelCase = Path(__UpperCamelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__UpperCamelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__UpperCamelCase , '''w''' ) )
_UpperCAmelCase = CustomImageProcessor.from_pretrained(__UpperCamelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__UpperCamelCase )
_UpperCAmelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowercase__ ( self : List[str] )->Union[str, Any]:
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = True
try:
AutoConfig.register('''custom''' , __UpperCamelCase )
AutoImageProcessor.register(__UpperCamelCase , __UpperCamelCase )
# If remote code is not set, the default is to use local
_UpperCAmelCase = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
_UpperCAmelCase = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__UpperCamelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
_UpperCAmelCase = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__UpperCamelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(__UpperCamelCase , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 95
|
"""simple docstring"""
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class _a ( yaml.SafeLoader):
"""simple docstring"""
def lowercase__ ( self : List[str] , __UpperCamelCase : Any )->List[Any]:
_UpperCAmelCase = [self.constructed_objects[key_node] for key_node, _ in node.value]
_UpperCAmelCase = [tuple(__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else key for key in keys]
_UpperCAmelCase = Counter(__UpperCamelCase )
_UpperCAmelCase = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F'Got duplicate yaml keys: {duplicate_keys}' )
def lowercase__ ( self : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str=False )->Dict:
_UpperCAmelCase = super().construct_mapping(__UpperCamelCase , deep=__UpperCamelCase )
self._check_no_duplicates_on_constructed_node(__UpperCamelCase )
return mapping
def lowercase ( _SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
_UpperCAmelCase = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
_UpperCAmelCase = full_content[1:].index('''---''' ) + 1
_UpperCAmelCase = '''\n'''.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(_SCREAMING_SNAKE_CASE )
class _a ( lowerCAmelCase):
"""simple docstring"""
# class attributes
UpperCamelCase__ = {"""train_eval_index"""} # train-eval-index in the YAML metadata
@classmethod
def lowercase__ ( cls : List[Any] , __UpperCamelCase : Path )->"DatasetMetadata":
with open(__UpperCamelCase , encoding='''utf-8''' ) as readme_file:
_UpperCAmelCase , _UpperCAmelCase = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(__UpperCamelCase )
else:
return cls()
def lowercase__ ( self : Tuple , __UpperCamelCase : Path )->List[Any]:
if path.exists():
with open(__UpperCamelCase , encoding='''utf-8''' ) as readme_file:
_UpperCAmelCase = readme_file.read()
else:
_UpperCAmelCase = None
_UpperCAmelCase = self._to_readme(__UpperCamelCase )
with open(__UpperCamelCase , '''w''' , encoding='''utf-8''' ) as readme_file:
readme_file.write(__UpperCamelCase )
def lowercase__ ( self : List[str] , __UpperCamelCase : Optional[str] = None )->str:
if readme_content is not None:
_UpperCAmelCase , _UpperCAmelCase = _split_yaml_from_readme(__UpperCamelCase )
_UpperCAmelCase = '''---\n''' + self.to_yaml_string() + '''---\n''' + content
else:
_UpperCAmelCase = '''---\n''' + self.to_yaml_string() + '''---\n'''
return full_content
@classmethod
def lowercase__ ( cls : str , __UpperCamelCase : str )->"DatasetMetadata":
_UpperCAmelCase = yaml.load(__UpperCamelCase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
_UpperCAmelCase = {
(key.replace('''-''' , '''_''' ) if key.replace('''-''' , '''_''' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**__UpperCamelCase )
def lowercase__ ( self : str )->str:
return yaml.safe_dump(
{
(key.replace('''_''' , '''-''' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=__UpperCamelCase , allow_unicode=__UpperCamelCase , encoding='''utf-8''' , ).decode('''utf-8''' )
__A : str = {
"image-classification": [],
"translation": [],
"image-segmentation": [],
"fill-mask": [],
"automatic-speech-recognition": [],
"token-classification": [],
"sentence-similarity": [],
"audio-classification": [],
"question-answering": [],
"summarization": [],
"zero-shot-classification": [],
"table-to-text": [],
"feature-extraction": [],
"other": [],
"multiple-choice": [],
"text-classification": [],
"text-to-image": [],
"text2text-generation": [],
"zero-shot-image-classification": [],
"tabular-classification": [],
"tabular-regression": [],
"image-to-image": [],
"tabular-to-text": [],
"unconditional-image-generation": [],
"text-retrieval": [],
"text-to-speech": [],
"object-detection": [],
"audio-to-audio": [],
"text-generation": [],
"conversational": [],
"table-question-answering": [],
"visual-question-answering": [],
"image-to-text": [],
"reinforcement-learning": [],
"voice-activity-detection": [],
"time-series-forecasting": [],
"document-question-answering": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
__A : str = ArgumentParser(usage="Validate the yaml metadata block of a README.md file.")
ap.add_argument("readme_filepath")
__A : Union[str, Any] = ap.parse_args()
__A : Dict = Path(args.readme_filepath)
__A : Tuple = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 95
| 1
|
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
UpperCAmelCase : Any = True
except ImportError:
UpperCAmelCase : Union[str, Any] = False
UpperCAmelCase : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
def __lowerCamelCase ( lowerCamelCase__ : Namespace ):
'''simple docstring'''
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class __lowercase ( a_ ):
"""simple docstring"""
@staticmethod
def __A ( A ) -> str:
'''simple docstring'''
lowerCamelCase = parser.add_parser("""add-new-model""" )
add_new_model_parser.add_argument("""--testing""" , action="""store_true""" , help="""If in testing mode.""" )
add_new_model_parser.add_argument("""--testing_file""" , type=A , help="""Configuration file on which to run.""" )
add_new_model_parser.add_argument(
"""--path""" , type=A , help="""Path to cookiecutter. Should only be used for testing purposes.""" )
add_new_model_parser.set_defaults(func=A )
def __init__( self , A , A , A=None , *A ) -> List[str]:
'''simple docstring'''
lowerCamelCase = testing
lowerCamelCase = testing_file
lowerCamelCase = path
def __A ( self ) -> str:
'''simple docstring'''
warnings.warn(
"""The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. """
"""It is not actively maintained anymore, so might give a result that won't pass all tests and quality """
"""checks, you should use `transformers-cli add-new-model-like` instead.""" )
if not _has_cookiecutter:
raise ImportError(
"""Model creation dependencies are required to use the `add_new_model` command. Install them by running """
"""the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n""" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
lowerCamelCase = [directory for directory in os.listdir() if """cookiecutter-template-""" == directory[:22]]
if len(A ) > 0:
raise ValueError(
"""Several directories starting with `cookiecutter-template-` in current working directory. """
"""Please clean your directory by removing all folders starting with `cookiecutter-template-` or """
"""change your working directory.""" )
lowerCamelCase = (
Path(A ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
lowerCamelCase = path_to_transformer_root / """templates""" / """adding_a_new_model"""
# Execute cookiecutter
if not self._testing:
cookiecutter(str(A ) )
else:
with open(self._testing_file , """r""" ) as configuration_file:
lowerCamelCase = json.load(A )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=A , extra_context=A , )
lowerCamelCase = [directory for directory in os.listdir() if """cookiecutter-template-""" in directory[:22]][0]
# Retrieve configuration
with open(directory + """/configuration.json""" , """r""" ) as configuration_file:
lowerCamelCase = json.load(A )
lowerCamelCase = configuration["""lowercase_modelname"""]
lowerCamelCase = configuration["""generate_tensorflow_pytorch_and_flax"""]
os.remove(F'{directory}/configuration.json' )
lowerCamelCase = """PyTorch""" in generate_tensorflow_pytorch_and_flax
lowerCamelCase = """TensorFlow""" in generate_tensorflow_pytorch_and_flax
lowerCamelCase = """Flax""" in generate_tensorflow_pytorch_and_flax
lowerCamelCase = F'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(A , exist_ok=A )
os.makedirs(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}' , exist_ok=A )
# Tests require submodules as they have parent imports
with open(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' , """w""" ):
pass
shutil.move(
F'{directory}/__init__.py' , F'{model_dir}/__init__.py' , )
shutil.move(
F'{directory}/configuration_{lowercase_model_name}.py' , F'{model_dir}/configuration_{lowercase_model_name}.py' , )
def remove_copy_lines(A ):
with open(A , """r""" ) as f:
lowerCamelCase = f.readlines()
with open(A , """w""" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(A )
if output_pytorch:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_{lowercase_model_name}.py' , F'{model_dir}/modeling_{lowercase_model_name}.py' , )
shutil.move(
F'{directory}/test_modeling_{lowercase_model_name}.py' , F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' , )
else:
os.remove(F'{directory}/modeling_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_{lowercase_model_name}.py' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_tf_{lowercase_model_name}.py' , F'{model_dir}/modeling_tf_{lowercase_model_name}.py' , )
shutil.move(
F'{directory}/test_modeling_tf_{lowercase_model_name}.py' , F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' , )
else:
os.remove(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_tf_{lowercase_model_name}.py' )
if output_flax:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_flax_{lowercase_model_name}.py' , F'{model_dir}/modeling_flax_{lowercase_model_name}.py' , )
shutil.move(
F'{directory}/test_modeling_flax_{lowercase_model_name}.py' , F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' , )
else:
os.remove(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/{lowercase_model_name}.md' , F'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' , )
shutil.move(
F'{directory}/tokenization_{lowercase_model_name}.py' , F'{model_dir}/tokenization_{lowercase_model_name}.py' , )
shutil.move(
F'{directory}/tokenization_fast_{lowercase_model_name}.py' , F'{model_dir}/tokenization_{lowercase_model_name}_fast.py' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(A , A , A ):
# Create temp file
lowerCamelCase , lowerCamelCase = mkstemp()
lowerCamelCase = False
with fdopen(A , """w""" ) as new_file:
with open(A ) as old_file:
for line in old_file:
new_file.write(A )
if line_to_copy_below in line:
lowerCamelCase = True
for line_to_copy in lines_to_copy:
new_file.write(A )
if not line_found:
raise ValueError(F'Line {line_to_copy_below} was not found in file.' )
# Copy the file permissions from the old file to the new file
copymode(A , A )
# Remove original file
remove(A )
# Move new file
move(A , A )
def skip_units(A ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(A ):
with open(A ) as datafile:
lowerCamelCase = []
lowerCamelCase = False
lowerCamelCase = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
lowerCamelCase = line.split("""\"""" )[1]
lowerCamelCase = skip_units(A )
elif "# Below: " in line and "##" not in line:
lowerCamelCase = line.split("""\"""" )[1]
lowerCamelCase = skip_units(A )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(A , A , A )
lowerCamelCase = []
elif "# Replace with" in line and "##" not in line:
lowerCamelCase = []
elif "##" not in line:
lines_to_copy.append(A )
remove(A )
replace_in_files(F'{directory}/to_replace_{lowercase_model_name}.py' )
os.rmdir(A )
| 457
|
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase : Optional[Any] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class __lowercase ( a_ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = ReformerTokenizer
UpperCamelCase : Optional[int] = ReformerTokenizerFast
UpperCamelCase : Union[str, Any] = True
UpperCamelCase : Dict = False
UpperCamelCase : Dict = True
def __A ( self ) -> str:
'''simple docstring'''
super().setUp()
lowerCamelCase = ReformerTokenizer(A , keep_accents=A )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = """<s>"""
lowerCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(A ) , 10_00 )
def __A ( self ) -> int:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def __A ( self ) -> Tuple:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = self.get_rust_tokenizer()
lowerCamelCase = """I was born in 92000, and this is falsé."""
lowerCamelCase = tokenizer.tokenize(A )
lowerCamelCase = rust_tokenizer.tokenize(A )
self.assertListEqual(A , A )
lowerCamelCase = tokenizer.encode(A , add_special_tokens=A )
lowerCamelCase = rust_tokenizer.encode(A , add_special_tokens=A )
self.assertListEqual(A , A )
lowerCamelCase = self.get_rust_tokenizer()
lowerCamelCase = tokenizer.encode(A )
lowerCamelCase = rust_tokenizer.encode(A )
self.assertListEqual(A , A )
def __A ( self , A=15 ) -> Tuple:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCamelCase = self.rust_tokenizer_class.from_pretrained(A , **A )
# Simple input
lowerCamelCase = """This is a simple input"""
lowerCamelCase = ["""This is a simple input 1""", """This is a simple input 2"""]
lowerCamelCase = ("""This is a simple input""", """This is a pair""")
lowerCamelCase = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(A , tokenizer_r.encode , A , max_length=A , padding="""max_length""" )
# Simple input
self.assertRaises(A , tokenizer_r.encode_plus , A , max_length=A , padding="""max_length""" )
# Simple input
self.assertRaises(
A , tokenizer_r.batch_encode_plus , A , max_length=A , padding="""max_length""" , )
# Pair input
self.assertRaises(A , tokenizer_r.encode , A , max_length=A , padding="""max_length""" )
# Pair input
self.assertRaises(A , tokenizer_r.encode_plus , A , max_length=A , padding="""max_length""" )
# Pair input
self.assertRaises(
A , tokenizer_r.batch_encode_plus , A , max_length=A , padding="""max_length""" , )
def __A ( self ) -> List[Any]:
'''simple docstring'''
pass
def __A ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = ReformerTokenizer(A , keep_accents=A )
lowerCamelCase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A ) , [2_85, 46, 10, 1_70, 3_82] , )
lowerCamelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCamelCase = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(
A , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowerCamelCase = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def __A ( self ) -> List[Any]:
'''simple docstring'''
return ReformerTokenizer.from_pretrained("""google/reformer-crime-and-punishment""" )
@slow
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase = """Hello World!"""
lowerCamelCase = [1_26, 32, 2_62, 1_52, 38, 72, 2_87]
self.assertListEqual(A , self.big_tokenizer.encode(A ) )
@slow
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
lowerCamelCase = [
1_08,
2_65,
24,
1_11,
4,
2_58,
1_56,
35,
28,
2_75,
3,
2_59,
2_97,
2_60,
84,
4,
35,
1_10,
44,
8,
2_59,
91,
2_68,
21,
11,
2_09,
2_74,
1_09,
2_66,
2_77,
1_17,
86,
93,
3_15,
2_58,
2_78,
2_58,
2_77,
2_58,
0,
2_58,
2_88,
2_58,
3_19,
2_58,
0,
2_58,
0,
2_58,
0,
2_58,
0,
2_58,
2_87,
2_58,
3_15,
2_58,
2_89,
2_58,
2_78,
99,
2_69,
2_66,
2_62,
8,
2_59,
2_41,
4,
2_17,
2_30,
2_68,
2_66,
55,
1_68,
1_06,
75,
1_93,
2_66,
2_23,
27,
49,
26,
2_82,
25,
2_64,
2_99,
19,
26,
0,
2_58,
2_77,
1_17,
86,
93,
1_76,
1_83,
2_70,
11,
2_62,
42,
61,
2_65,
]
self.assertListEqual(A , self.big_tokenizer.encode(A ) )
@require_torch
@slow
def __A ( self ) -> Tuple:
'''simple docstring'''
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
lowerCamelCase = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowerCamelCase = """ """.join(A )
lowerCamelCase = self.big_tokenizer.encode_plus(A , return_tensors="""pt""" )
lowerCamelCase = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="""pt""" )
lowerCamelCase = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
lowerCamelCase = encoded_sequence["""input_ids"""].shape
lowerCamelCase = ReformerModel(A )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**A )
model(**A )
@slow
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = {"""input_ids""": [[1_08, 2_65, 24, 1_11, 4, 2_58, 1_56, 7, 51, 2_79, 58, 7, 76, 25, 69, 2_78], [1_40, 2_43, 2_64, 1_34, 17, 2_67, 77, 2_63, 22, 2_62, 2_97, 2_58, 3_04, 1_77, 2_79, 2_66, 14, 89, 13, 35, 2_61, 2_99, 2_72, 1_37, 2_75, 2_78]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
lowerCamelCase = [
"""This is a very simple sentence.""",
"""The quick brown fox jumps over the lazy dog.""",
]
self.tokenizer_integration_test_util(
expected_encoding=A , model_name="""google/reformer-crime-and-punishment""" , revision="""0e6c3decb8211d49bf881013425dc8b0448b3f5a""" , padding=A , sequences=A , )
| 457
| 1
|
'''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __UpperCamelCase (_UpperCAmelCase ):
__A = DistilBertTokenizer
__A = DistilBertTokenizerFast
__A = True
@slow
def _a ( self ) -> Optional[int]:
'''simple docstring'''
lowercase = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" )
lowercase = tokenizer.encode("""sequence builders""" , add_special_tokens=_lowerCAmelCase )
lowercase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_lowerCAmelCase )
lowercase = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase )
lowercase = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase , _lowerCAmelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 653
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( ):
lowercase = []
lowercase = 1
while len(lowercase_ ) < 1E6:
constant.append(str(lowercase_ ) )
i += 1
lowercase = """""".join(lowercase_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 653
| 1
|
'''simple docstring'''
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _a ( a__ , a__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = VQModel
SCREAMING_SNAKE_CASE_ : Tuple = """sample"""
@property
def _lowercase ( self ,_SCREAMING_SNAKE_CASE=(32, 32) ) -> Dict:
_snake_case = 4
_snake_case = 3
_snake_case = floats_tensor((batch_size, num_channels) + sizes ).to(__a )
return {"sample": image}
@property
def _lowercase ( self ) -> Optional[Any]:
return (3, 32, 32)
@property
def _lowercase ( self ) -> str:
return (3, 32, 32)
def _lowercase ( self ) -> str:
_snake_case = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 3,
}
_snake_case = self.dummy_input
return init_dict, inputs_dict
def _lowercase ( self ) -> Dict:
pass
def _lowercase ( self ) -> Any:
pass
def _lowercase ( self ) -> int:
_snake_case = VQModel.from_pretrained("fusing/vqgan-dummy" ,output_loading_info=__a )
self.assertIsNotNone(__a )
self.assertEqual(len(loading_info["missing_keys"] ) ,0 )
model.to(__a )
_snake_case = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def _lowercase ( self ) -> List[str]:
_snake_case = VQModel.from_pretrained("fusing/vqgan-dummy" )
model.to(__a ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
_snake_case = torch.randn(1 ,model.config.in_channels ,model.config.sample_size ,model.config.sample_size )
_snake_case = image.to(__a )
with torch.no_grad():
_snake_case = model(__a ).sample
_snake_case = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_snake_case = torch.tensor([-0.0_1_5_3, -0.4_0_4_4, -0.1_8_8_0, -0.5_1_6_1, -0.2_4_1_8, -0.4_0_7_2, -0.1_6_1_2, -0.0_6_3_3, -0.0_1_4_3] )
# fmt: on
self.assertTrue(torch.allclose(__a ,__a ,atol=1e-3 ) )
| 185
|
'''simple docstring'''
def lowerCamelCase__ ( __lowercase ):
if not isinstance(__lowercase , __lowercase ):
snake_case : int = F'''Input value of [number={number}] must be an integer'''
raise TypeError(__lowercase )
if number < 0:
return False
snake_case : Optional[int] = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 116
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
lowerCamelCase = {
"""config""": [
"""EXTERNAL_DATA_FORMAT_SIZE_LIMIT""",
"""OnnxConfig""",
"""OnnxConfigWithPast""",
"""OnnxSeq2SeqConfigWithPast""",
"""PatchingSpec""",
],
"""convert""": ["""export""", """validate_model_outputs"""],
"""features""": ["""FeaturesManager"""],
"""utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 14
|
"""simple docstring"""
lowerCamelCase = {
"joule": 1.0,
"kilojoule": 1_000,
"megajoule": 1_000_000,
"gigajoule": 1_000_000_000,
"wattsecond": 1.0,
"watthour": 3_600,
"kilowatthour": 3_600_000,
"newtonmeter": 1.0,
"calorie_nutr": 4_186.8,
"kilocalorie_nutr": 4_186_800.00,
"electronvolt": 1.6_0_2_1_7_6_6_3_4e-1_9,
"britishthermalunit_it": 1_055.05_585,
"footpound": 1.355_818,
}
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
UpperCAmelCase_ = (
f"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
f"""Valid values are: {', '.join(lowerCAmelCase__ )}"""
)
raise ValueError(lowerCAmelCase__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14
| 1
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : torch.FloatTensor
__A : torch.FloatTensor
__A : Optional[torch.FloatTensor] =None
class _snake_case (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__A : List[Any] =2
@register_to_config
def __init__( self ,_snake_case = 0.02 ,_snake_case = 1_00 ,_snake_case = 1.007 ,_snake_case = 80 ,_snake_case = 0.05 ,_snake_case = 50 ,):
# standard deviation of the initial noise distribution
UpperCAmelCase_ : List[str] = sigma_max
# setable values
UpperCAmelCase_ : int = None
UpperCAmelCase_ : np.IntTensor = None
UpperCAmelCase_ : torch.FloatTensor = None # sigma(t_i)
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ):
return sample
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ):
UpperCAmelCase_ : Optional[Any] = num_inference_steps
UpperCAmelCase_ : Any = np.arange(0 ,self.num_inference_steps )[::-1].copy()
UpperCAmelCase_ : int = torch.from_numpy(_snake_case ).to(_snake_case )
UpperCAmelCase_ : str = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
UpperCAmelCase_ : List[Any] = torch.tensor(_snake_case ,dtype=torch.floataa ,device=_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = None ):
if self.config.s_min <= sigma <= self.config.s_max:
UpperCAmelCase_ : List[Any] = min(self.config.s_churn / self.num_inference_steps ,2**0.5 - 1 )
else:
UpperCAmelCase_ : Any = 0
# sample eps ~ N(0, S_noise^2 * I)
UpperCAmelCase_ : int = self.config.s_noise * randn_tensor(sample.shape ,generator=_snake_case ).to(sample.device )
UpperCAmelCase_ : str = sigma + gamma * sigma
UpperCAmelCase_ : Dict = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case = True ,):
UpperCAmelCase_ : Optional[int] = sample_hat + sigma_hat * model_output
UpperCAmelCase_ : Dict = (sample_hat - pred_original_sample) / sigma_hat
UpperCAmelCase_ : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=_snake_case ,derivative=_snake_case ,pred_original_sample=_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case = True ,):
UpperCAmelCase_ : List[Any] = sample_prev + sigma_prev * model_output
UpperCAmelCase_ : Union[str, Any] = (sample_prev - pred_original_sample) / sigma_prev
UpperCAmelCase_ : Optional[int] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=_snake_case ,derivative=_snake_case ,pred_original_sample=_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ):
raise NotImplementedError()
| 71
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class __magic_name__ (unittest.TestCase ):
lowerCamelCase__ = StableDiffusionLDMaDPipeline
lowerCamelCase__ = TEXT_TO_IMAGE_PARAMS
lowerCamelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
def __a ( self ) -> List[Any]:
torch.manual_seed(0 )
lowerCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
lowerCAmelCase_ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=_a , set_alpha_to_one=_a , )
torch.manual_seed(0 )
lowerCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCAmelCase_ = CLIPTextModel(_a )
lowerCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCAmelCase_ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __a ( self , _a , _a=0 ) -> Any:
if str(_a ).startswith("mps" ):
lowerCAmelCase_ = torch.manual_seed(_a )
else:
lowerCAmelCase_ = torch.Generator(device=_a ).manual_seed(_a )
lowerCAmelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __a ( self ) -> List[str]:
lowerCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ = self.get_dummy_components()
lowerCAmelCase_ = StableDiffusionLDMaDPipeline(**_a )
lowerCAmelCase_ = ldmad_pipe.to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = self.get_dummy_inputs(_a )
lowerCAmelCase_ = ldmad_pipe(**_a )
lowerCAmelCase_ , lowerCAmelCase_ = output.rgb, output.depth
lowerCAmelCase_ = rgb[0, -3:, -3:, -1]
lowerCAmelCase_ = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
lowerCAmelCase_ = np.array(
[0.3_7_3_3_8_1_7_6, 0.7_0_2_4_7, 0.7_4_2_0_3_1_9_3, 0.5_1_6_4_3_6_0_4, 0.5_8_2_5_6_7_9_3, 0.6_0_9_3_2_1_3_6, 0.4_1_8_1_0_9_5, 0.4_8_3_5_5_8_7_7, 0.4_6_5_3_5_2_6_2] )
lowerCAmelCase_ = np.array([1_0_3.4_6_7_2_7, 8_5.8_1_2_0_0_4, 8_7.8_4_9_2_3_6] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1E-2
def __a ( self ) -> Tuple:
lowerCAmelCase_ = self.get_dummy_components()
lowerCAmelCase_ = StableDiffusionLDMaDPipeline(**_a )
lowerCAmelCase_ = ldmad_pipe.to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = self.get_dummy_inputs(_a )
lowerCAmelCase_ = 3 * [inputs["prompt"]]
# forward
lowerCAmelCase_ = ldmad_pipe(**_a )
lowerCAmelCase_ , lowerCAmelCase_ = output.rgb, output.depth
lowerCAmelCase_ = rgb_slice_a[0, -3:, -3:, -1]
lowerCAmelCase_ = depth_slice_a[0, -3:, -1]
lowerCAmelCase_ = self.get_dummy_inputs(_a )
lowerCAmelCase_ = 3 * [inputs.pop("prompt" )]
lowerCAmelCase_ = ldmad_pipe.tokenizer(
_a , padding="max_length" , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=_a , return_tensors="pt" , )
lowerCAmelCase_ = text_inputs["input_ids"].to(_a )
lowerCAmelCase_ = ldmad_pipe.text_encoder(_a )[0]
lowerCAmelCase_ = prompt_embeds
# forward
lowerCAmelCase_ = ldmad_pipe(**_a )
lowerCAmelCase_ , lowerCAmelCase_ = output.rgb, output.depth
lowerCAmelCase_ = rgb_slice_a[0, -3:, -3:, -1]
lowerCAmelCase_ = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1E-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1E-4
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ = self.get_dummy_components()
lowerCAmelCase_ = PNDMScheduler(skip_prk_steps=_a )
lowerCAmelCase_ = StableDiffusionLDMaDPipeline(**_a )
lowerCAmelCase_ = ldmad_pipe.to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = self.get_dummy_inputs(_a )
lowerCAmelCase_ = "french fries"
lowerCAmelCase_ = ldmad_pipe(**_a , negative_prompt=_a )
lowerCAmelCase_ , lowerCAmelCase_ = output.rgb, output.depth
lowerCAmelCase_ = rgb[0, -3:, -3:, -1]
lowerCAmelCase_ = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
lowerCAmelCase_ = np.array(
[0.3_7_0_4_4, 0.7_1_8_1_1_5_0_3, 0.7_2_2_3_2_5_1, 0.4_8_6_0_3_6_7_5, 0.5_6_3_8_3_9_1, 0.6_3_6_4_9_4_8, 0.4_2_8_3_3_7_0_4, 0.4_9_0_1_3_1_5, 0.4_7_9_2_6_2_1_7] )
lowerCAmelCase_ = np.array([1_0_7.8_4_7_3_8, 8_4.6_2_8_0_2, 8_9.9_6_2_1_3_5] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1E-2
@slow
@require_torch_gpu
class __magic_name__ (unittest.TestCase ):
def __a ( self ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self , _a , _a="cpu" , _a=torch.floataa , _a=0 ) -> Dict:
lowerCAmelCase_ = torch.Generator(device=_a ).manual_seed(_a )
lowerCAmelCase_ = np.random.RandomState(_a ).standard_normal((1, 4, 64, 64) )
lowerCAmelCase_ = torch.from_numpy(_a ).to(device=_a , dtype=_a )
lowerCAmelCase_ = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" )
lowerCAmelCase_ = ldmad_pipe.to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = self.get_inputs(_a )
lowerCAmelCase_ = ldmad_pipe(**_a )
lowerCAmelCase_ , lowerCAmelCase_ = output.rgb, output.depth
lowerCAmelCase_ = rgb[0, -3:, -3:, -1].flatten()
lowerCAmelCase_ = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
lowerCAmelCase_ = np.array(
[0.5_3_8_0_5_4_6_5, 0.5_6_7_0_7_3_0_5, 0.5_4_8_6_5_1_5, 0.5_7_0_1_2_2_3_6, 0.5_8_1_4_5_1_1, 0.5_6_2_5_3_4_8_7, 0.5_4_8_4_3_0_1_4, 0.5_5_0_9_2_2_6_3, 0.6_4_5_9_7_0_6] )
lowerCAmelCase_ = np.array(
[0.9_2_6_3_7_8_1, 0.6_6_7_8_6_7_2, 0.5_4_8_6_5_1_5, 0.9_2_2_0_2_1_4_5, 0.6_7_8_3_1_1_3_5, 0.5_6_2_5_3_4_8_7, 0.9_2_4_1_6_9_4, 0.7_5_5_1_4_7_8, 0.6_4_5_9_7_0_6] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3E-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3E-3
@nightly
@require_torch_gpu
class __magic_name__ (unittest.TestCase ):
def __a ( self ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self , _a , _a="cpu" , _a=torch.floataa , _a=0 ) -> Union[str, Any]:
lowerCAmelCase_ = torch.Generator(device=_a ).manual_seed(_a )
lowerCAmelCase_ = np.random.RandomState(_a ).standard_normal((1, 4, 64, 64) )
lowerCAmelCase_ = torch.from_numpy(_a ).to(device=_a , dtype=_a )
lowerCAmelCase_ = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 50,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def __a ( self ) -> str:
lowerCAmelCase_ = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d" ).to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = self.get_inputs(_a )
lowerCAmelCase_ = ldmad_pipe(**_a )
lowerCAmelCase_ , lowerCAmelCase_ = output.rgb, output.depth
lowerCAmelCase_ = 0.4_9_5_5_8_6
lowerCAmelCase_ = 0.3_3_7_9_5_5_1_5
lowerCAmelCase_ = 1_1_2.4_8_5_1_8
lowerCAmelCase_ = 9_8.4_8_9_7_4_6
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
def __a ( self ) -> Dict:
lowerCAmelCase_ = StableDiffusionLDMaDPipeline.from_pretrained("Intel/ldm3d-4c" ).to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = self.get_inputs(_a )
lowerCAmelCase_ = ldmad_pipe(**_a )
lowerCAmelCase_ , lowerCAmelCase_ = output.rgb, output.depth
lowerCAmelCase_ = 0.4_1_9_4_1_2_7
lowerCAmelCase_ = 0.3_5_3_7_5_5_8_6
lowerCAmelCase_ = 0.5_6_3_8_5_0_2
lowerCAmelCase_ = 0.3_4_6_8_6_1_0_3
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
| 122
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCAmelCase = {
'''configuration_nezha''': ['''NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NezhaConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'''NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NezhaForNextSentencePrediction''',
'''NezhaForMaskedLM''',
'''NezhaForPreTraining''',
'''NezhaForMultipleChoice''',
'''NezhaForQuestionAnswering''',
'''NezhaForSequenceClassification''',
'''NezhaForTokenClassification''',
'''NezhaModel''',
'''NezhaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 700
|
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
'''simple docstring'''
__lowercase : List[str] = IFImgaImgSuperResolutionPipeline
__lowercase : int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
__lowercase : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
__lowercase : Any = PipelineTesterMixin.required_optional_params - {'''latents'''}
def UpperCAmelCase_ ( self ) -> List[Any]:
return self._get_superresolution_dummy_components()
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=0 ) -> List[str]:
if str(__UpperCAmelCase ).startswith("""mps""" ):
lowerCAmelCase__ : Any = torch.manual_seed(__UpperCAmelCase )
else:
lowerCAmelCase__ : Union[str, Any] = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
lowerCAmelCase__ : Any = floats_tensor((1, 3, 32, 32) ,rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = floats_tensor((1, 3, 16, 16) ,rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() ,reason="""XFormers attention is only available with CUDA and `xformers` installed""" ,)
def UpperCAmelCase_ ( self ) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def UpperCAmelCase_ ( self ) -> int:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" ,reason="""float16 requires CUDA""" )
def UpperCAmelCase_ ( self ) -> List[str]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCAmelCase_ ( self ) -> List[str]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCAmelCase_ ( self ) -> Optional[int]:
self._test_save_load_local()
def UpperCAmelCase_ ( self ) -> Any:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 ,)
| 160
| 0
|
"""simple docstring"""
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def snake_case ( A__ ,A__ ):
assert isinstance(A__ ,A__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" ,[False, True] )
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : int = tmp_path / "cache"
UpperCAmelCase_ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase_ : Tuple = JsonDatasetReader(A__ ,cache_dir=A__ ,keep_in_memory=A__ ).read()
_check_json_dataset(A__ ,A__ )
@pytest.mark.parametrize(
"features" ,[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] ,)
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : Union[str, Any] = tmp_path / "cache"
UpperCAmelCase_ : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCAmelCase_ : int = features.copy() if features else default_expected_features
UpperCAmelCase_ : Dict = (
Features({feature: Value(A__ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase_ : List[str] = JsonDatasetReader(A__ ,features=A__ ,cache_dir=A__ ).read()
_check_json_dataset(A__ ,A__ )
@pytest.mark.parametrize(
"features" ,[
None,
{"col_3": "float64", "col_1": "string", "col_2": "int64"},
] ,)
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : str = tmp_path / "cache"
UpperCAmelCase_ : List[Any] = {"col_3": "float64", "col_1": "string", "col_2": "int64"}
UpperCAmelCase_ : List[Any] = features.copy() if features else default_expected_features
UpperCAmelCase_ : List[str] = (
Features({feature: Value(A__ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase_ : Optional[int] = JsonDatasetReader(A__ ,features=A__ ,cache_dir=A__ ).read()
assert isinstance(A__ ,A__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def snake_case ( A__ ,A__ ):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
UpperCAmelCase_ : Optional[int] = {"col_2": "int64", "col_3": "float64", "col_1": "string"}
UpperCAmelCase_ : Tuple = features.copy()
UpperCAmelCase_ : Any = (
Features({feature: Value(A__ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase_ : Dict = tmp_path / "cache"
UpperCAmelCase_ : Tuple = JsonDatasetReader(A__ ,features=A__ ,cache_dir=A__ ).read()
assert isinstance(A__ ,A__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("split" ,[None, NamedSplit("train" ), "train", "test"] )
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : List[str] = tmp_path / "cache"
UpperCAmelCase_ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCAmelCase_ : Any = JsonDatasetReader(A__ ,cache_dir=A__ ,split=A__ ).read()
_check_json_dataset(A__ ,A__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" ,[str, list] )
def snake_case ( A__ ,A__ ,A__ ):
if issubclass(A__ ,A__ ):
UpperCAmelCase_ : Tuple = jsonl_path
elif issubclass(A__ ,A__ ):
UpperCAmelCase_ : int = [jsonl_path]
UpperCAmelCase_ : Dict = tmp_path / "cache"
UpperCAmelCase_ : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCAmelCase_ : List[str] = JsonDatasetReader(A__ ,cache_dir=A__ ).read()
_check_json_dataset(A__ ,A__ )
def snake_case ( A__ ,A__ ,A__=("train",) ):
assert isinstance(A__ ,A__ )
for split in splits:
UpperCAmelCase_ : Union[str, Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" ,[False, True] )
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : List[str] = tmp_path / "cache"
UpperCAmelCase_ : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase_ : List[Any] = JsonDatasetReader({"train": jsonl_path} ,cache_dir=A__ ,keep_in_memory=A__ ).read()
_check_json_datasetdict(A__ ,A__ )
@pytest.mark.parametrize(
"features" ,[
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] ,)
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : Dict = tmp_path / "cache"
UpperCAmelCase_ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCAmelCase_ : str = features.copy() if features else default_expected_features
UpperCAmelCase_ : Any = (
Features({feature: Value(A__ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase_ : Optional[int] = JsonDatasetReader({"train": jsonl_path} ,features=A__ ,cache_dir=A__ ).read()
_check_json_datasetdict(A__ ,A__ )
@pytest.mark.parametrize("split" ,[None, NamedSplit("train" ), "train", "test"] )
def snake_case ( A__ ,A__ ,A__ ):
if split:
UpperCAmelCase_ : Tuple = {split: jsonl_path}
else:
UpperCAmelCase_ : List[Any] = "train"
UpperCAmelCase_ : str = {"train": jsonl_path, "test": jsonl_path}
UpperCAmelCase_ : str = tmp_path / "cache"
UpperCAmelCase_ : int = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCAmelCase_ : Dict = JsonDatasetReader(A__ ,cache_dir=A__ ).read()
_check_json_datasetdict(A__ ,A__ ,splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def snake_case ( A__ ):
return json.load(A__ )
def snake_case ( A__ ):
return [json.loads(A__ ) for line in buffer]
class UpperCamelCase_ :
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] ) -> str:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCAmelCase_ , lowerCAmelCase_ , lines=lowerCAmelCase_ ).write()
buffer.seek(0 )
UpperCAmelCase_ : Union[str, Any] = load_json_function(lowerCAmelCase_ )
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
assert isinstance(exported_content[0] , lowerCAmelCase_ )
assert len(lowerCAmelCase_ ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] ) -> List[str]:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCAmelCase_ , lowerCAmelCase_ , lines=lowerCAmelCase_ , orient=lowerCAmelCase_ ).write()
buffer.seek(0 )
UpperCAmelCase_ : Dict = load_json(lowerCAmelCase_ )
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowerCAmelCase_ , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(lowerCAmelCase_ ) == 10
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple ) -> Union[str, Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCAmelCase_ , lowerCAmelCase_ , lines=lowerCAmelCase_ , num_proc=2 ).write()
buffer.seek(0 )
UpperCAmelCase_ : str = load_json_function(lowerCAmelCase_ )
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
assert isinstance(exported_content[0] , lowerCAmelCase_ )
assert len(lowerCAmelCase_ ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Dict ) -> Optional[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCAmelCase_ , lowerCAmelCase_ , lines=lowerCAmelCase_ , orient=lowerCAmelCase_ , num_proc=2 ).write()
buffer.seek(0 )
UpperCAmelCase_ : Union[str, Any] = load_json(lowerCAmelCase_ )
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowerCAmelCase_ , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(lowerCAmelCase_ ) == 10
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : Optional[Any] ) -> List[Any]:
with pytest.raises(lowerCAmelCase_ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCAmelCase_ , lowerCAmelCase_ , num_proc=0 )
@pytest.mark.parametrize("compression, extension" , [("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")] )
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp("data" ) / f"""test.json.{extension}"""
UpperCAmelCase_ : Dict = str(shared_datadir / f"""test_file.json.{extension}""" )
JsonDatasetWriter(lowerCAmelCase_ , lowerCAmelCase_ , compression=lowerCAmelCase_ ).write()
with fsspec.open(lowerCAmelCase_ , "rb" , compression="infer" ) as f:
UpperCAmelCase_ : Optional[Any] = f.read()
with fsspec.open(lowerCAmelCase_ , "rb" , compression="infer" ) as f:
UpperCAmelCase_ : Optional[int] = f.read()
assert exported_content == original_content
| 95
|
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Any:
'''simple docstring'''
# Initialise PyTorch model
UpperCamelCase = MobileBertConfig.from_json_file(UpperCamelCase_ )
print(f"""Building PyTorch model from configuration: {config}""" )
UpperCamelCase = MobileBertForPreTraining(UpperCamelCase_ )
# Load weights from tf checkpoint
UpperCamelCase = load_tf_weights_in_mobilebert(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , UpperCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--mobilebert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained MobileBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 537
| 0
|
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
_snake_case : Optional[Any] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
_snake_case : str = [0, 25, 50]
_snake_case : Union[str, Any] = [25, 50, 75]
_snake_case : Any = fuzz.membership.trimf(X, abca)
_snake_case : int = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
_snake_case : Tuple = np.ones(75)
_snake_case : int = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
_snake_case : Dict = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
_snake_case : str = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
_snake_case : Optional[int] = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
_snake_case : List[str] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
_snake_case : Any = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
_snake_case : Any = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
_snake_case : Optional[int] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
_snake_case : List[Any] = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("Young")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("Middle aged")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("union")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("intersection")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("complement_a")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("difference a/b")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("alg_sum")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("alg_product")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("bdd_sum")
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title("bdd_difference")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 203
|
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_snake_case : List[Any] = "src/diffusers"
_snake_case : str = "."
# This is to make sure the diffusers module imported is the one in the repo.
_snake_case : List[str] = importlib.util.spec_from_file_location(
"diffusers",
os.path.join(DIFFUSERS_PATH, "__init__.py"),
submodule_search_locations=[DIFFUSERS_PATH],
)
_snake_case : Dict = spec.loader.load_module()
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
return line.startswith(__lowerCamelCase ) or len(__lowerCamelCase ) <= 1 or re.search(R"^\s*\)(\s*->.*:|:)\s*$" , __lowerCamelCase ) is not None
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : str = object_name.split("." )
__snake_case : Any = 0
# First let's find the module where our object lives.
__snake_case : Optional[int] = parts[i]
while i < len(__lowerCamelCase ) and not os.path.isfile(os.path.join(__lowerCamelCase , F'{module}.py' ) ):
i += 1
if i < len(__lowerCamelCase ):
__snake_case : Dict = os.path.join(__lowerCamelCase , parts[i] )
if i >= len(__lowerCamelCase ):
raise ValueError(F'`object_name` should begin with the name of a module of diffusers but got {object_name}.' )
with open(os.path.join(__lowerCamelCase , F'{module}.py' ) , "r" , encoding="utf-8" , newline="\n" ) as f:
__snake_case : Optional[Any] = f.readlines()
# Now let's find the class / func in the code!
__snake_case : Any = ""
__snake_case : Any = 0
for name in parts[i + 1 :]:
while (
line_index < len(__lowerCamelCase ) and re.search(RF'^{indent}(class|def)\s+{name}(\(|\:)' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(__lowerCamelCase ):
raise ValueError(F' {object_name} does not match any function or class in {module}.' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
__snake_case : Optional[Any] = line_index
while line_index < len(__lowerCamelCase ) and _should_continue(lines[line_index] , __lowerCamelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__snake_case : Dict = lines[start_index:line_index]
return "".join(__lowerCamelCase )
_snake_case : Any = re.compile(R"^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)")
_snake_case : Union[str, Any] = re.compile(R"^\s*(\S+)->(\S+)(\s+.*|$)")
_snake_case : Optional[int] = re.compile(R"<FILL\s+[^>]*>")
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Dict = code.split("\n" )
__snake_case : List[str] = 0
while idx < len(__lowerCamelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(__lowerCamelCase ):
return re.search(R"^(\s*)\S" , lines[idx] ).groups()[0]
return ""
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : List[str] = len(get_indent(__lowerCamelCase ) ) > 0
if has_indent:
__snake_case : str = F'class Bla:\n{code}'
__snake_case : List[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 , preview=__lowerCamelCase )
__snake_case : int = black.format_str(__lowerCamelCase , mode=__lowerCamelCase )
__snake_case , __snake_case : List[str] = style_docstrings_in_code(__lowerCamelCase )
return result[len("class Bla:\n" ) :] if has_indent else result
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase=False ):
with open(__lowerCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
__snake_case : List[Any] = f.readlines()
__snake_case : int = []
__snake_case : Optional[Any] = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(__lowerCamelCase ):
__snake_case : List[Any] = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
__snake_case , __snake_case , __snake_case : List[str] = search.groups()
__snake_case : List[str] = find_code_in_diffusers(__lowerCamelCase )
__snake_case : str = get_indent(__lowerCamelCase )
__snake_case : Optional[Any] = line_index + 1 if indent == theoretical_indent else line_index + 2
__snake_case : Tuple = theoretical_indent
__snake_case : str = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
__snake_case : str = True
while line_index < len(__lowerCamelCase ) and should_continue:
line_index += 1
if line_index >= len(__lowerCamelCase ):
break
__snake_case : Union[str, Any] = lines[line_index]
__snake_case : Any = _should_continue(__lowerCamelCase , __lowerCamelCase ) and re.search(F'^{indent}# End copy' , __lowerCamelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__snake_case : int = lines[start_index:line_index]
__snake_case : int = "".join(__lowerCamelCase )
# Remove any nested `Copied from` comments to avoid circular copies
__snake_case : Union[str, Any] = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(__lowerCamelCase ) is None]
__snake_case : Optional[Any] = "\n".join(__lowerCamelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(__lowerCamelCase ) > 0:
__snake_case : Optional[int] = replace_pattern.replace("with" , "" ).split("," )
__snake_case : Tuple = [_re_replace_pattern.search(__lowerCamelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
__snake_case , __snake_case , __snake_case : Optional[Any] = pattern.groups()
__snake_case : Tuple = re.sub(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if option.strip() == "all-casing":
__snake_case : List[Any] = re.sub(obja.lower() , obja.lower() , __lowerCamelCase )
__snake_case : Union[str, Any] = re.sub(obja.upper() , obja.upper() , __lowerCamelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
__snake_case : str = blackify(lines[start_index - 1] + theoretical_code )
__snake_case : Tuple = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
__snake_case : str = lines[:start_index] + [theoretical_code] + lines[line_index:]
__snake_case : List[Any] = start_index + 1
if overwrite and len(__lowerCamelCase ) > 0:
# Warn the user a file has been modified.
print(F'Detected changes, rewriting {filename}.' )
with open(__lowerCamelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(__lowerCamelCase )
return diffs
def lowerCAmelCase_ ( __lowerCamelCase = False ):
__snake_case : Optional[Any] = glob.glob(os.path.join(__lowerCamelCase , "**/*.py" ) , recursive=__lowerCamelCase )
__snake_case : Dict = []
for filename in all_files:
__snake_case : Union[str, Any] = is_copy_consistent(__lowerCamelCase , __lowerCamelCase )
diffs += [F'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs]
if not overwrite and len(__lowerCamelCase ) > 0:
__snake_case : Optional[Any] = "\n".join(__lowerCamelCase )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
_snake_case : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_snake_case : Union[str, Any] = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 203
| 1
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : Optional[int] = {
"""google/pix2struct-textcaps-base""": (
"""https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"""
),
}
class _UpperCamelCase ( UpperCAmelCase__ ):
'''simple docstring'''
a_ : Dict = "pix2struct_text_model"
a_ : Any = ["past_key_values"]
a_ : Any = {
"hidden_size": "hidden_size",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : int , _lowerCamelCase : Optional[int]=5_0_2_4_4 , _lowerCamelCase : Union[str, Any]=7_6_8 , _lowerCamelCase : int=6_4 , _lowerCamelCase : Optional[Any]=2_0_4_8 , _lowerCamelCase : List[str]=1_2 , _lowerCamelCase : int=1_2 , _lowerCamelCase : Tuple=3_2 , _lowerCamelCase : Union[str, Any]=1_2_8 , _lowerCamelCase : int=0.1 , _lowerCamelCase : Optional[int]=1E-6 , _lowerCamelCase : Union[str, Any]=1.0 , _lowerCamelCase : int="gelu_new" , _lowerCamelCase : str=0 , _lowerCamelCase : Optional[int]=False , _lowerCamelCase : str=0 , _lowerCamelCase : Optional[int]=1 , _lowerCamelCase : Optional[Any]=False , _lowerCamelCase : Union[str, Any]=True , **_lowerCamelCase : Union[str, Any] , ):
'''simple docstring'''
__lowerCamelCase : Tuple = vocab_size
__lowerCamelCase : Union[str, Any] = hidden_size
__lowerCamelCase : Union[str, Any] = d_kv
__lowerCamelCase : List[Any] = d_ff
__lowerCamelCase : Union[str, Any] = num_layers
__lowerCamelCase : Dict = num_heads
__lowerCamelCase : Optional[int] = relative_attention_num_buckets
__lowerCamelCase : Union[str, Any] = relative_attention_max_distance
__lowerCamelCase : List[Any] = dropout_rate
__lowerCamelCase : str = layer_norm_epsilon
__lowerCamelCase : List[Any] = initializer_factor
__lowerCamelCase : str = use_cache
__lowerCamelCase : Optional[Any] = eos_token_id
__lowerCamelCase : List[Any] = decoder_start_token_id
# for backwards compatibility
__lowerCamelCase : Optional[Any] = dense_act_fn
super().__init__(
pad_token_id=lowercase_ , eos_token_id=lowercase_ , decoder_start_token_id=lowercase_ , tie_word_embeddings=lowercase_ , is_decoder=lowercase_ , **lowercase_ , )
@classmethod
def _snake_case ( cls : Union[str, Any] , _lowerCamelCase : Union[str, os.PathLike] , **_lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
cls._set_token_in_kwargs(lowercase_ )
__lowerCamelCase : Optional[int] = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
__lowerCamelCase : Dict = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class _UpperCamelCase ( UpperCAmelCase__ ):
'''simple docstring'''
a_ : List[str] = "pix2struct_vision_model"
def __init__( self : Union[str, Any] , _lowerCamelCase : Optional[Any]=7_6_8 , _lowerCamelCase : Union[str, Any]=7_6_8 , _lowerCamelCase : List[str]=2_0_4_8 , _lowerCamelCase : Tuple=6_4 , _lowerCamelCase : Optional[int]=1_2 , _lowerCamelCase : Optional[int]=1_2 , _lowerCamelCase : Any="gelu_new" , _lowerCamelCase : str=1E-6 , _lowerCamelCase : Optional[Any]=0.0 , _lowerCamelCase : Any=0.0 , _lowerCamelCase : Dict=1E-10 , _lowerCamelCase : Tuple=1.0 , _lowerCamelCase : Optional[Any]=4_0_9_6 , _lowerCamelCase : Optional[int]=3_2 , _lowerCamelCase : Union[str, Any]=1_2_8 , **_lowerCamelCase : Any , ):
'''simple docstring'''
super().__init__(**lowercase_ )
__lowerCamelCase : int = hidden_size
__lowerCamelCase : int = patch_embed_hidden_size
__lowerCamelCase : str = d_ff
__lowerCamelCase : List[Any] = dropout_rate
__lowerCamelCase : Tuple = num_hidden_layers
__lowerCamelCase : Dict = num_attention_heads
__lowerCamelCase : List[Any] = initializer_range
__lowerCamelCase : List[str] = initializer_factor
__lowerCamelCase : Union[str, Any] = attention_dropout
__lowerCamelCase : Any = layer_norm_eps
__lowerCamelCase : Optional[int] = dense_act_fn
__lowerCamelCase : int = seq_len
__lowerCamelCase : List[str] = relative_attention_num_buckets
__lowerCamelCase : Union[str, Any] = relative_attention_max_distance
__lowerCamelCase : int = d_kv
@classmethod
def _snake_case ( cls : Optional[Any] , _lowerCamelCase : Union[str, os.PathLike] , **_lowerCamelCase : str ):
'''simple docstring'''
cls._set_token_in_kwargs(lowercase_ )
__lowerCamelCase : List[str] = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
__lowerCamelCase : Dict = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowercase_ , **lowercase_ )
class _UpperCamelCase ( UpperCAmelCase__ ):
'''simple docstring'''
a_ : List[Any] = "pix2struct"
a_ : Any = True
def __init__( self : Optional[int] , _lowerCamelCase : Dict=None , _lowerCamelCase : Tuple=None , _lowerCamelCase : List[str]=1.0 , _lowerCamelCase : int=0.02 , _lowerCamelCase : List[Any]=False , _lowerCamelCase : str=False , _lowerCamelCase : Dict=True , **_lowerCamelCase : Optional[int] , ):
'''simple docstring'''
super().__init__(tie_word_embeddings=lowercase_ , is_encoder_decoder=lowercase_ , **lowercase_ )
if text_config is None:
__lowerCamelCase : Optional[int] = {}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" )
if vision_config is None:
__lowerCamelCase : Optional[Any] = {}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" )
__lowerCamelCase : str = PixaStructTextConfig(**lowercase_ )
__lowerCamelCase : Optional[Any] = PixaStructVisionConfig(**lowercase_ )
__lowerCamelCase : List[Any] = self.text_config.decoder_start_token_id
__lowerCamelCase : Dict = self.text_config.pad_token_id
__lowerCamelCase : Dict = self.text_config.eos_token_id
__lowerCamelCase : Tuple = initializer_factor
__lowerCamelCase : List[str] = initializer_range
__lowerCamelCase : Dict = self.initializer_range
__lowerCamelCase : int = self.initializer_range
__lowerCamelCase : Tuple = is_vqa
@classmethod
def _snake_case ( cls : str , _lowerCamelCase : PixaStructTextConfig , _lowerCamelCase : PixaStructVisionConfig , **_lowerCamelCase : Dict ):
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase_ )
def _snake_case ( self : List[str] ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = copy.deepcopy(self.__dict__ )
__lowerCamelCase : str = self.text_config.to_dict()
__lowerCamelCase : Dict = self.vision_config.to_dict()
__lowerCamelCase : Tuple = self.__class__.model_type
return output
| 519
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : Optional[Any] = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Tuple = ["""XLNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Tuple = ["""XLNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Tuple = [
"""XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLNetForMultipleChoice""",
"""XLNetForQuestionAnswering""",
"""XLNetForQuestionAnsweringSimple""",
"""XLNetForSequenceClassification""",
"""XLNetForTokenClassification""",
"""XLNetLMHeadModel""",
"""XLNetModel""",
"""XLNetPreTrainedModel""",
"""load_tf_weights_in_xlnet""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
"""TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLNetForMultipleChoice""",
"""TFXLNetForQuestionAnsweringSimple""",
"""TFXLNetForSequenceClassification""",
"""TFXLNetForTokenClassification""",
"""TFXLNetLMHeadModel""",
"""TFXLNetMainLayer""",
"""TFXLNetModel""",
"""TFXLNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 512
| 0
|
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
lowerCamelCase__ = numpy.array([0, 0])
lowerCamelCase__ = numpy.array([0.5, 0.8_660_254])
lowerCamelCase__ = numpy.array([1, 0])
lowerCamelCase__ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def A(__a: list[numpy.ndarray] , __a: int ):
lowerCAmelCase_ = initial_vectors
for _ in range(__a ):
lowerCAmelCase_ = iteration_step(__a )
return vectors
def A(__a: list[numpy.ndarray] ):
lowerCAmelCase_ = []
for i, start_vector in enumerate(vectors[:-1] ):
lowerCAmelCase_ = vectors[i + 1]
new_vectors.append(__a )
lowerCAmelCase_ = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def A(__a: numpy.ndarray , __a: float ):
lowerCAmelCase_ = numpy.radians(__a )
lowerCAmelCase_ , lowerCAmelCase_ = numpy.cos(__a ), numpy.sin(__a )
lowerCAmelCase_ = numpy.array(((c, -s), (s, c)) )
return numpy.dot(__a , __a )
def A(__a: list[numpy.ndarray] ):
lowerCAmelCase_ = plt.gca()
axes.set_aspect("equal" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
lowerCAmelCase_ , lowerCAmelCase_ = zip(*__a )
plt.plot(__a , __a )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 226
|
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def A():
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
"-m" , "--pretrained_model_name_or_path" , type=__a , default=__a , required=__a , help="Path to pretrained model or model identifier from huggingface.co/models." , )
parser.add_argument(
"-c" , "--caption" , type=__a , default="robotic cat with wings" , help="Text used to generate images." , )
parser.add_argument(
"-n" , "--images_num" , type=__a , default=4 , help="How much images to generate." , )
parser.add_argument(
"-s" , "--seed" , type=__a , default=42 , help="Seed for random process." , )
parser.add_argument(
"-ci" , "--cuda_id" , type=__a , default=0 , help="cuda_id." , )
lowerCAmelCase_ = parser.parse_args()
return args
def A(__a: List[Any] , __a: Any , __a: Optional[Any] ):
if not len(__a ) == rows * cols:
raise ValueError("The specified number of rows and columns are not correct." )
lowerCAmelCase_ , lowerCAmelCase_ = imgs[0].size
lowerCAmelCase_ = Image.new("RGB" , size=(cols * w, rows * h) )
lowerCAmelCase_ , lowerCAmelCase_ = grid.size
for i, img in enumerate(__a ):
grid.paste(__a , box=(i % cols * w, i // cols * h) )
return grid
def A(__a: Optional[int] , __a: List[Any]="robotic cat with wings" , __a: str=7.5 , __a: Optional[int]=50 , __a: List[Any]=1 , __a: List[Any]=42 , ):
lowerCAmelCase_ = torch.Generator(pipeline.device ).manual_seed(__a )
lowerCAmelCase_ = pipeline(
__a , guidance_scale=__a , num_inference_steps=__a , generator=__a , num_images_per_prompt=__a , ).images
lowerCAmelCase_ = int(math.sqrt(__a ) )
lowerCAmelCase_ = image_grid(__a , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
lowerCamelCase__ = parse_args()
# Load models and create wrapper for stable diffusion
lowerCamelCase__ = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='''tokenizer''')
lowerCamelCase__ = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''text_encoder''')
lowerCamelCase__ = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='''vae''')
lowerCamelCase__ = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''unet''')
lowerCamelCase__ = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
lowerCamelCase__ = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, '''best_model.pt''')):
lowerCamelCase__ = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, '''unet''', unet)
else:
lowerCamelCase__ = unet.to(torch.device('''cuda''', args.cuda_id))
lowerCamelCase__ = pipeline.to(unet.device)
lowerCamelCase__ , lowerCamelCase__ = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '''{}.png'''.format('''_'''.join(args.caption.split()))))
lowerCamelCase__ = os.path.join(args.pretrained_model_name_or_path, '''_'''.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '''{}.png'''.format(idx + 1)))
| 226
| 1
|
"""simple docstring"""
import json
import sys
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
with open(UpperCamelCase_ , encoding="""utf-8""" ) as f:
__SCREAMING_SNAKE_CASE = json.load(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = ["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """]
for benchmark_name in sorted(UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = results[benchmark_name]
__SCREAMING_SNAKE_CASE = benchmark_name.split("""/""" )[-1]
output_md.append(f"### Benchmark: {benchmark_file_name}" )
__SCREAMING_SNAKE_CASE = """| metric |"""
__SCREAMING_SNAKE_CASE = """|--------|"""
__SCREAMING_SNAKE_CASE = """| new / old (diff) |"""
for metric_name in sorted(UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = benchmark_res[metric_name]
__SCREAMING_SNAKE_CASE = metric_vals["""new"""]
__SCREAMING_SNAKE_CASE = metric_vals.get("""old""" , UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = metric_vals.get("""diff""" , UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = f" {new_val:f}" if isinstance(UpperCamelCase_ , (int, float) ) else """None"""
if old_val is not None:
val_str += f" / {old_val:f}" if isinstance(UpperCamelCase_ , (int, float) ) else "None"
if dif_val is not None:
val_str += f" ({dif_val:f})" if isinstance(UpperCamelCase_ , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("""</details>""" )
with open(UpperCamelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.writelines("""\n""".join(UpperCamelCase_ ) )
if __name__ == "__main__":
__magic_name__ = sys.argv[1]
__magic_name__ = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 155
|
"""simple docstring"""
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("hub/hopper-medium-v2/unet/hor32", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/unet/hor128", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/value_function", exist_ok=True)
def _lowerCAmelCase ( UpperCamelCase_ ):
if hor == 128:
__SCREAMING_SNAKE_CASE = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
__SCREAMING_SNAKE_CASE = (32, 128, 256)
__SCREAMING_SNAKE_CASE = ("""UpResnetBlock1D""", """UpResnetBlock1D""")
elif hor == 32:
__SCREAMING_SNAKE_CASE = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
__SCREAMING_SNAKE_CASE = (32, 64, 128, 256)
__SCREAMING_SNAKE_CASE = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""")
__SCREAMING_SNAKE_CASE = torch.load(f"/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch" )
__SCREAMING_SNAKE_CASE = model.state_dict()
__SCREAMING_SNAKE_CASE = {
"""down_block_types""": down_block_types,
"""block_out_channels""": block_out_channels,
"""up_block_types""": up_block_types,
"""layers_per_block""": 1,
"""use_timestep_embedding""": True,
"""out_block_type""": """OutConv1DBlock""",
"""norm_num_groups""": 8,
"""downsample_each_block""": False,
"""in_channels""": 14,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""sample_size""": 6_5536,
"""mid_block_type""": """MidResTemporalBlock1D""",
"""act_fn""": """mish""",
}
__SCREAMING_SNAKE_CASE = UNetaDModel(**UpperCamelCase_ )
print(f"length of state dict: {len(state_dict.keys() )}" )
print(f"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
__SCREAMING_SNAKE_CASE = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
__SCREAMING_SNAKE_CASE = state_dict.pop(UpperCamelCase_ )
hf_value_function.load_state_dict(UpperCamelCase_ )
torch.save(hf_value_function.state_dict() , f"hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin" )
with open(f"hub/hopper-medium-v2/unet/hor{hor}/config.json" , """w""" ) as f:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
def _lowerCAmelCase ( ):
__SCREAMING_SNAKE_CASE = {
"""in_channels""": 14,
"""down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""),
"""up_block_types""": (),
"""out_block_type""": """ValueFunction""",
"""mid_block_type""": """ValueFunctionMidBlock1D""",
"""block_out_channels""": (32, 64, 128, 256),
"""layers_per_block""": 1,
"""downsample_each_block""": True,
"""sample_size""": 6_5536,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""use_timestep_embedding""": True,
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""norm_num_groups""": 8,
"""act_fn""": """mish""",
}
__SCREAMING_SNAKE_CASE = torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" )
__SCREAMING_SNAKE_CASE = model
__SCREAMING_SNAKE_CASE = UNetaDModel(**UpperCamelCase_ )
print(f"length of state dict: {len(state_dict.keys() )}" )
print(f"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
__SCREAMING_SNAKE_CASE = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
__SCREAMING_SNAKE_CASE = state_dict.pop(UpperCamelCase_ )
hf_value_function.load_state_dict(UpperCamelCase_ )
torch.save(hf_value_function.state_dict() , """hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" )
with open("""hub/hopper-medium-v2/value_function/config.json""" , """w""" ) as f:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 155
| 1
|
'''simple docstring'''
def _UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
lowercase_ : Optional[int] = [3_1, 2_8, 3_1, 3_0, 3_1, 3_0, 3_1, 3_1, 3_0, 3_1, 3_0, 3_1]
lowercase_ : int = 6
lowercase_ : Tuple = 1
lowercase_ : Tuple = 1_9_0_1
lowercase_ : str = 0
while year < 2_0_0_1:
day += 7
if (year % 4 == 0 and year % 1_0_0 != 0) or (year % 4_0_0 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
lowercase_ : Tuple = day - days_per_month[month - 2]
elif day > 2_9 and month == 2:
month += 1
lowercase_ : Tuple = day - 2_9
else:
if day > days_per_month[month - 1]:
month += 1
lowercase_ : Any = day - days_per_month[month - 2]
if month > 1_2:
year += 1
lowercase_ : Any = 1
if year < 2_0_0_1 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 700
|
'''simple docstring'''
def _UpperCAmelCase ( a : list[list[float]] ) -> list[list[float]]:
"""simple docstring"""
lowercase_ : list[list[float]] = []
for data in source_data:
for i, el in enumerate(a ):
if len(a ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(a ) )
return data_lists
def _UpperCAmelCase ( a : list[list[float]] , a : list[int] ) -> list[list[float]]:
"""simple docstring"""
lowercase_ : list[list[float]] = []
for dlist, weight in zip(a , a ):
lowercase_ : Tuple = min(a )
lowercase_ : Any = max(a )
lowercase_ : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
lowercase_ : str = f"Invalid weight of {weight:f} provided"
raise ValueError(a )
score_lists.append(a )
return score_lists
def _UpperCAmelCase ( a : list[list[float]] ) -> list[float]:
"""simple docstring"""
lowercase_ : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(a ):
lowercase_ : List[Any] = final_scores[j] + ele
return final_scores
def _UpperCAmelCase ( a : list[list[float]] , a : list[int] ) -> list[list[float]]:
"""simple docstring"""
lowercase_ : int = get_data(a )
lowercase_ : Optional[int] = calculate_each_score(a , a )
lowercase_ : Dict = generate_final_scores(a )
# append scores to source data
for i, ele in enumerate(a ):
source_data[i].append(a )
return source_data
| 7
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
_UpperCAmelCase : str = {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""",
}
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = """albert"""
def __init__( self : Dict , UpperCAmelCase : Tuple=30000 , UpperCAmelCase : Dict=128 , UpperCAmelCase : Tuple=4096 , UpperCAmelCase : Union[str, Any]=12 , UpperCAmelCase : List[Any]=1 , UpperCAmelCase : str=64 , UpperCAmelCase : Dict=16384 , UpperCAmelCase : Tuple=1 , UpperCAmelCase : Dict="gelu_new" , UpperCAmelCase : Union[str, Any]=0 , UpperCAmelCase : Union[str, Any]=0 , UpperCAmelCase : Union[str, Any]=512 , UpperCAmelCase : Dict=2 , UpperCAmelCase : Union[str, Any]=0.0_2 , UpperCAmelCase : Optional[Any]=1e-12 , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : Union[str, Any]="absolute" , UpperCAmelCase : Union[str, Any]=0 , UpperCAmelCase : str=2 , UpperCAmelCase : int=3 , **UpperCAmelCase : Any , ) -> List[Any]:
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = vocab_size
lowerCamelCase__ : int = embedding_size
lowerCamelCase__ : Any = hidden_size
lowerCamelCase__ : Union[str, Any] = num_hidden_layers
lowerCamelCase__ : Dict = num_hidden_groups
lowerCamelCase__ : str = num_attention_heads
lowerCamelCase__ : List[str] = inner_group_num
lowerCamelCase__ : Optional[Any] = hidden_act
lowerCamelCase__ : str = intermediate_size
lowerCamelCase__ : Tuple = hidden_dropout_prob
lowerCamelCase__ : List[str] = attention_probs_dropout_prob
lowerCamelCase__ : Any = max_position_embeddings
lowerCamelCase__ : Union[str, Any] = type_vocab_size
lowerCamelCase__ : Optional[Any] = initializer_range
lowerCamelCase__ : Union[str, Any] = layer_norm_eps
lowerCamelCase__ : List[str] = classifier_dropout_prob
lowerCamelCase__ : Any = position_embedding_type
class lowerCAmelCase ( __UpperCamelCase ):
@property
def A_ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCamelCase__ : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCamelCase__ : str = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 295
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
def A_ ( self : str ) -> Dict:
lowerCamelCase__ : Optional[Any] = tempfile.mkdtemp()
# fmt: off
lowerCamelCase__ : Tuple = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest']
# fmt: on
lowerCamelCase__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
lowerCamelCase__ : Any = {
'do_resize': True,
'size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.5, 0.5, 0.5],
'image_std': [0.5, 0.5, 0.5],
}
lowerCamelCase__ : int = os.path.join(self.tmpdirname , UpperCAmelCase )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(UpperCAmelCase , UpperCAmelCase )
def A_ ( self : List[str] , **UpperCAmelCase : str ) -> int:
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def A_ ( self : Optional[int] , **UpperCAmelCase : List[str] ) -> List[str]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def A_ ( self : int ) -> Any:
shutil.rmtree(self.tmpdirname )
def A_ ( self : str ) -> str:
lowerCamelCase__ : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase__ : Union[str, Any] = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A_ ( self : Dict ) -> Dict:
lowerCamelCase__ : List[Any] = self.get_tokenizer()
lowerCamelCase__ : Tuple = self.get_image_processor()
lowerCamelCase__ : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ : List[str] = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def A_ ( self : List[str] ) -> Tuple:
lowerCamelCase__ : List[Any] = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ : Tuple = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCamelCase__ : int = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 )
lowerCamelCase__ : Union[str, Any] = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def A_ ( self : Any ) -> Tuple:
lowerCamelCase__ : List[Any] = self.get_image_processor()
lowerCamelCase__ : Any = self.get_tokenizer()
lowerCamelCase__ : Optional[int] = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase__ : Optional[int] = self.prepare_image_inputs()
lowerCamelCase__ : int = image_processor(UpperCAmelCase , return_tensors='np' )
lowerCamelCase__ : List[str] = processor(images=UpperCAmelCase , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A_ ( self : Tuple ) -> List[Any]:
lowerCamelCase__ : Dict = self.get_image_processor()
lowerCamelCase__ : List[str] = self.get_tokenizer()
lowerCamelCase__ : str = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase__ : Tuple = 'lower newer'
lowerCamelCase__ : Tuple = processor(text=UpperCAmelCase )
lowerCamelCase__ : Any = tokenizer(UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A_ ( self : Optional[int] ) -> Any:
lowerCamelCase__ : Optional[int] = self.get_image_processor()
lowerCamelCase__ : Tuple = self.get_tokenizer()
lowerCamelCase__ : Tuple = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase__ : str = 'lower newer'
lowerCamelCase__ : Dict = self.prepare_image_inputs()
lowerCamelCase__ : int = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with self.assertRaises(UpperCAmelCase ):
processor()
def A_ ( self : Optional[Any] ) -> List[str]:
lowerCamelCase__ : Dict = self.get_image_processor()
lowerCamelCase__ : Union[str, Any] = self.get_tokenizer()
lowerCamelCase__ : List[str] = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase__ : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase__ : Dict = processor.batch_decode(UpperCAmelCase )
lowerCamelCase__ : List[str] = tokenizer.batch_decode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def A_ ( self : Optional[int] ) -> List[str]:
lowerCamelCase__ : Tuple = self.get_image_processor()
lowerCamelCase__ : List[str] = self.get_tokenizer()
lowerCamelCase__ : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase__ : Dict = 'lower newer'
lowerCamelCase__ : List[Any] = self.prepare_image_inputs()
lowerCamelCase__ : Union[str, Any] = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 295
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Tuple = logging.get_logger(__name__)
_a : int = {
'weiweishi/roc-bert-base-zh': 'https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json',
}
class a_ ( a ):
A__ : List[str] = 'roc_bert'
def __init__( self : int , UpperCAmelCase__ : List[Any]=30_522 , UpperCAmelCase__ : List[str]=768 , UpperCAmelCase__ : List[Any]=12 , UpperCAmelCase__ : Optional[int]=12 , UpperCAmelCase__ : int=3_072 , UpperCAmelCase__ : List[str]="gelu" , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Tuple=512 , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : Dict=0.02 , UpperCAmelCase__ : int=1e-1_2 , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Any=0 , UpperCAmelCase__ : Optional[Any]="absolute" , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Union[str, Any]=768 , UpperCAmelCase__ : Any=910 , UpperCAmelCase__ : str=512 , UpperCAmelCase__ : Optional[Any]=24_858 , UpperCAmelCase__ : List[str]=True , **UpperCAmelCase__ : Any , ):
"""simple docstring"""
snake_case : str = vocab_size
snake_case : Tuple = max_position_embeddings
snake_case : List[Any] = hidden_size
snake_case : Tuple = num_hidden_layers
snake_case : str = num_attention_heads
snake_case : int = intermediate_size
snake_case : Union[str, Any] = hidden_act
snake_case : List[str] = hidden_dropout_prob
snake_case : str = attention_probs_dropout_prob
snake_case : Dict = initializer_range
snake_case : Optional[Any] = type_vocab_size
snake_case : Optional[int] = layer_norm_eps
snake_case : Dict = use_cache
snake_case : str = enable_pronunciation
snake_case : Union[str, Any] = enable_shape
snake_case : List[str] = pronunciation_embed_dim
snake_case : Union[str, Any] = pronunciation_vocab_size
snake_case : Union[str, Any] = shape_embed_dim
snake_case : str = shape_vocab_size
snake_case : List[Any] = concat_input
snake_case : Any = position_embedding_type
snake_case : Dict = classifier_dropout
super().__init__(pad_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
| 84
|
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_a : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
_a : Dict = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def a_ ( __magic_name__ , __magic_name__ , __magic_name__=8 ) -> str:
"""simple docstring"""
snake_case : List[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
snake_case : Tuple = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class a_ ( a ):
def __init__( self : Optional[int] , UpperCAmelCase__ : UNetaDConditionModel , UpperCAmelCase__ : DDPMScheduler , UpperCAmelCase__ : VQModel , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , movq=UpperCAmelCase__ , )
snake_case : List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase( self : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any ):
"""simple docstring"""
if latents is None:
snake_case : int = randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=UpperCAmelCase__ , dtype=UpperCAmelCase__ )
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}" )
snake_case : Optional[Any] = latents.to(UpperCAmelCase__ )
snake_case : List[Any] = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : Optional[int]=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
snake_case : Union[str, Any] = torch.device(F"cuda:{gpu_id}" )
snake_case : Dict = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCAmelCase( self : List[Any] , UpperCAmelCase__ : Any=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
snake_case : Optional[int] = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=UpperCAmelCase__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
snake_case : List[str] = None
for cpu_offloaded_model in [self.unet, self.movq]:
snake_case , snake_case : Optional[int] = cpu_offload_with_hook(UpperCAmelCase__ , UpperCAmelCase__ , prev_module_hook=UpperCAmelCase__ )
# We'll offload the last model manually.
snake_case : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCAmelCase__ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCAmelCase__ )
def __call__( self : List[str] , UpperCAmelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCAmelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCAmelCase__ : torch.FloatTensor , UpperCAmelCase__ : int = 512 , UpperCAmelCase__ : int = 512 , UpperCAmelCase__ : int = 100 , UpperCAmelCase__ : float = 4.0 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase__ : Optional[torch.FloatTensor] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , ):
"""simple docstring"""
snake_case : Optional[int] = self._execution_device
snake_case : Union[str, Any] = guidance_scale > 1.0
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : Any = torch.cat(UpperCAmelCase__ , dim=0 )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : Union[str, Any] = torch.cat(UpperCAmelCase__ , dim=0 )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
snake_case : int = torch.cat(UpperCAmelCase__ , dim=0 )
snake_case : List[Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
snake_case : Dict = image_embeds.repeat_interleave(UpperCAmelCase__ , dim=0 )
snake_case : Optional[Any] = negative_image_embeds.repeat_interleave(UpperCAmelCase__ , dim=0 )
snake_case : Tuple = hint.repeat_interleave(UpperCAmelCase__ , dim=0 )
snake_case : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCAmelCase__ )
snake_case : List[str] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCAmelCase__ )
self.scheduler.set_timesteps(UpperCAmelCase__ , device=UpperCAmelCase__ )
snake_case : str = self.scheduler.timesteps
snake_case : Optional[Any] = self.movq.config.latent_channels
snake_case , snake_case : Optional[Any] = downscale_height_and_width(UpperCAmelCase__ , UpperCAmelCase__ , self.movq_scale_factor )
# create initial latent
snake_case : Dict = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(UpperCAmelCase__ ) ):
# expand the latents if we are doing classifier free guidance
snake_case : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case : Optional[int] = {'''image_embeds''': image_embeds, '''hint''': hint}
snake_case : Any = self.unet(
sample=UpperCAmelCase__ , timestep=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , added_cond_kwargs=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , )[0]
if do_classifier_free_guidance:
snake_case , snake_case : Dict = noise_pred.split(latents.shape[1] , dim=1 )
snake_case , snake_case : Any = noise_pred.chunk(2 )
snake_case , snake_case : Dict = variance_pred.chunk(2 )
snake_case : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
snake_case : List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
snake_case , snake_case : Tuple = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
snake_case : List[Any] = self.scheduler.step(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__ , )[0]
# post-processing
snake_case : List[Any] = self.movq.decode(UpperCAmelCase__ , force_not_quantize=UpperCAmelCase__ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
snake_case : Optional[Any] = image * 0.5 + 0.5
snake_case : int = image.clamp(0 , 1 )
snake_case : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
snake_case : str = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 84
| 1
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 535
|
"""simple docstring"""
UpperCAmelCase = 8.3_144_598
def __magic_name__ ( _lowerCamelCase: float, _lowerCamelCase: float ) -> float:
'''simple docstring'''
if temperature < 0:
raise Exception('''Temperature cannot be less than 0 K''' )
if molar_mass <= 0:
raise Exception('''Molar mass cannot be less than or equal to 0 kg/mol''' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
UpperCAmelCase = 3_0_0
UpperCAmelCase = 2_8
UpperCAmelCase = rms_speed_of_molecule(temperature, molar_mass)
print(f"""Vrms of Nitrogen gas at 300 K is {vrms} m/s""")
| 535
| 1
|
'''simple docstring'''
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class snake_case__( SCREAMING_SNAKE_CASE_ ):
A__ = 42
A__ = None
def a_ ( _UpperCAmelCase : Dict ,_UpperCAmelCase : Optional[Any]=0.9_9_9 ,_UpperCAmelCase : Any="cosine" ,) -> str:
if alpha_transform_type == "cosine":
def alpha_bar_fn(_UpperCAmelCase : str ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_UpperCAmelCase : List[str] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
__snake_case : List[Any] = []
for i in range(_UpperCAmelCase ):
__snake_case : int = i / num_diffusion_timesteps
__snake_case : str = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_UpperCAmelCase ) / alpha_bar_fn(_UpperCAmelCase ) ,_UpperCAmelCase ) )
return torch.tensor(_UpperCAmelCase ,dtype=torch.floataa )
class snake_case__( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
@register_to_config
def __init__( self : Optional[int] , __a : int = 1000 , __a : str = "fixed_small_log" , __a : bool = True , __a : Optional[float] = 1.0 , __a : str = "epsilon" , __a : str = "squaredcos_cap_v2" , ) -> int:
'''simple docstring'''
if beta_schedule != "squaredcos_cap_v2":
raise ValueError('UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'' )
__snake_case : Dict = betas_for_alpha_bar(__a )
__snake_case : int = 1.0 - self.betas
__snake_case : List[Any] = torch.cumprod(self.alphas , dim=0 )
__snake_case : Dict = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
__snake_case : int = 1.0
# setable values
__snake_case : Optional[int] = None
__snake_case : Optional[int] = torch.from_numpy(np.arange(0 , __a )[::-1].copy() )
__snake_case : Any = variance_type
def A_ ( self : Union[str, Any] , __a : torch.FloatTensor , __a : Optional[int] = None ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def A_ ( self : List[Any] , __a : int , __a : Union[str, torch.device] = None ) -> str:
'''simple docstring'''
__snake_case : str = num_inference_steps
__snake_case : int = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
__snake_case : Dict = (np.arange(0 , __a ) * step_ratio).round()[::-1].copy().astype(np.intaa )
__snake_case : Union[str, Any] = torch.from_numpy(__a ).to(__a )
def A_ ( self : Any , __a : str , __a : Dict=None , __a : Dict=None , __a : List[str]=None ) -> List[str]:
'''simple docstring'''
if prev_timestep is None:
__snake_case : List[str] = t - 1
__snake_case : Dict = self.alphas_cumprod[t]
__snake_case : Dict = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
__snake_case : int = 1 - alpha_prod_t
__snake_case : int = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
__snake_case : Any = self.betas[t]
else:
__snake_case : Union[str, Any] = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__snake_case : Optional[Any] = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
__snake_case : List[str] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
__snake_case : List[str] = torch.log(torch.clamp(__a , min=1e-20 ) )
__snake_case : Optional[Any] = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
__snake_case : List[str] = variance.log()
__snake_case : Optional[Any] = beta.log()
__snake_case : Optional[int] = (predicted_variance + 1) / 2
__snake_case : Dict = frac * max_log + (1 - frac) * min_log
return variance
def A_ ( self : Any , __a : torch.FloatTensor , __a : int , __a : torch.FloatTensor , __a : Optional[int] = None , __a : Tuple=None , __a : bool = True , ) -> Union[UnCLIPSchedulerOutput, Tuple]:
'''simple docstring'''
__snake_case : Optional[Any] = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
__snake_case : Tuple = torch.split(__a , sample.shape[1] , dim=1 )
else:
__snake_case : Union[str, Any] = None
# 1. compute alphas, betas
if prev_timestep is None:
__snake_case : List[Any] = t - 1
__snake_case : Tuple = self.alphas_cumprod[t]
__snake_case : Optional[int] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
__snake_case : Union[str, Any] = 1 - alpha_prod_t
__snake_case : List[Any] = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
__snake_case : List[str] = self.betas[t]
__snake_case : Optional[int] = self.alphas[t]
else:
__snake_case : int = 1 - alpha_prod_t / alpha_prod_t_prev
__snake_case : List[str] = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__snake_case : int = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__snake_case : Dict = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'''
' for the UnCLIPScheduler.' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__snake_case : Any = torch.clamp(
__a , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__snake_case : str = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
__snake_case : Optional[Any] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__snake_case : Dict = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
__snake_case : Optional[int] = 0
if t > 0:
__snake_case : Optional[int] = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=__a , device=model_output.device )
__snake_case : Optional[Any] = self._get_variance(
__a , predicted_variance=__a , prev_timestep=__a , )
if self.variance_type == "fixed_small_log":
__snake_case : Any = variance
elif self.variance_type == "learned_range":
__snake_case : Tuple = (0.5 * variance).exp()
else:
raise ValueError(
f'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'''
' for the UnCLIPScheduler.' )
__snake_case : Union[str, Any] = variance * variance_noise
__snake_case : Optional[Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=__a , pred_original_sample=__a )
def A_ ( self : List[Any] , __a : torch.FloatTensor , __a : torch.FloatTensor , __a : torch.IntTensor , ) -> torch.FloatTensor:
'''simple docstring'''
__snake_case : Tuple = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
__snake_case : Dict = timesteps.to(original_samples.device )
__snake_case : Dict = alphas_cumprod[timesteps] ** 0.5
__snake_case : Union[str, Any] = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
__snake_case : List[Any] = sqrt_alpha_prod.unsqueeze(-1 )
__snake_case : str = (1 - alphas_cumprod[timesteps]) ** 0.5
__snake_case : Any = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
__snake_case : Optional[int] = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
__snake_case : List[str] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 706
|
'''simple docstring'''
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A__ : Union[str, Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class snake_case__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
A__ = XLMProphetNetTokenizer
A__ = False
A__ = True
def A_ ( self : Dict ) -> List[str]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__snake_case : Optional[int] = XLMProphetNetTokenizer(__a , keep_accents=__a )
tokenizer.save_pretrained(self.tmpdirname )
def A_ ( self : str ) -> int:
'''simple docstring'''
__snake_case : Tuple = '[PAD]'
__snake_case : List[str] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def A_ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
__snake_case : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '[PAD]' )
self.assertEqual(vocab_keys[1] , '[CLS]' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(__a ) , 1012 )
def A_ ( self : Tuple ) -> Tuple:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1012 )
def A_ ( self : int ) -> Optional[Any]:
'''simple docstring'''
__snake_case : str = XLMProphetNetTokenizer(__a , keep_accents=__a )
__snake_case : List[Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(__a , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__snake_case : Optional[int] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
__snake_case : List[str] = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
__snake_case : Optional[int] = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'[UNK]',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'[UNK]',
'.',
] , )
@cached_property
def A_ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
return XLMProphetNetTokenizer.from_pretrained('microsoft/xprophetnet-large-wiki100-cased' )
@slow
def A_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
__snake_case : int = 'Hello World!'
__snake_case : Tuple = [35389, 6672, 49, 2]
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@slow
def A_ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
# fmt: off
__snake_case : Union[str, Any] = {'input_ids': [[11073, 82783, 18, 26, 82783, 549, 51540, 248, 17209, 1301, 217, 20, 215186, 1325, 147, 17209, 1301, 217, 20, 56370, 53, 122020, 20, 16477, 27, 87355, 4548, 20, 4728, 78392, 17, 159969, 18, 26, 24491, 629, 15, 538, 22704, 5439, 15, 2788, 24491, 9885, 15, 43534, 605, 15, 814, 18403, 33200, 29, 15, 43534, 24458, 12410, 111, 24966, 83669, 9637, 144068, 26, 850, 22346, 27, 147, 24966, 83669, 83490, 26, 39113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 122020, 115785, 34, 816, 1339, 46887, 18, 147, 53905, 1951, 42238, 41170, 17732, 834, 436, 15, 27523, 98733, 217, 147, 5542, 4981, 930, 17347, 16, 2], [20091, 629, 94, 82786, 58, 490, 20, 1528, 84, 53905, 344, 80592, 110128, 18822, 5267, 1306, 62, 152537, 308, 7997, 401, 124427, 549, 35442, 225, 109, 15055, 25748, 147, 7119, 43712, 34, 767, 135366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63784, 119466, 17, 147808, 88214, 18, 656, 81, 32, 3296, 10280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name='microsoft/xprophetnet-large-wiki100-cased' , revision='1acad1643ddd54a44df6a1b797ada8373685d90e' , )
| 124
| 0
|
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
SCREAMING_SNAKE_CASE : Optional[Any] = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
SCREAMING_SNAKE_CASE : str = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
SCREAMING_SNAKE_CASE : Any = spec.loader.load_module()
SCREAMING_SNAKE_CASE : Tuple = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
SCREAMING_SNAKE_CASE : str = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
SCREAMING_SNAKE_CASE : Dict = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def UpperCamelCase_( ) -> List[Any]:
_lowercase : Optional[Any] = []
for config_class in list(CONFIG_MAPPING.values() ):
_lowercase : Any = False
# source code of `config_class`
_lowercase : str = inspect.getsource(lowerCamelCase_ )
_lowercase : Optional[Any] = _re_checkpoint.findall(lowerCamelCase_ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
_lowercase , _lowercase : List[str] = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
_lowercase : Union[str, Any] = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
_lowercase : List[Any] = True
break
_lowercase : List[Any] = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
_lowercase : Union[str, Any] = '\n'.join(sorted(lowerCamelCase_ ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 89
|
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def _UpperCamelCase ( lowerCAmelCase_ ) ->Optional[Any]:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4_E_0_0 and cp <= 0x9_F_F_F)
or (cp >= 0x3_4_0_0 and cp <= 0x4_D_B_F) #
or (cp >= 0x2_0_0_0_0 and cp <= 0x2_A_6_D_F) #
or (cp >= 0x2_A_7_0_0 and cp <= 0x2_B_7_3_F) #
or (cp >= 0x2_B_7_4_0 and cp <= 0x2_B_8_1_F) #
or (cp >= 0x2_B_8_2_0 and cp <= 0x2_C_E_A_F) #
or (cp >= 0xF_9_0_0 and cp <= 0xF_A_F_F)
or (cp >= 0x2_F_8_0_0 and cp <= 0x2_F_A_1_F) #
): #
return True
return False
def _UpperCamelCase ( lowerCAmelCase_ ) ->List[str]:
# word like '180' or '身高' or '神'
for char in word:
UpperCAmelCase = ord(lowerCAmelCase_ )
if not _is_chinese_char(lowerCAmelCase_ ):
return 0
return 1
def _UpperCamelCase ( lowerCAmelCase_ ) ->Optional[int]:
UpperCAmelCase = set()
for token in tokens:
UpperCAmelCase = len(lowerCAmelCase_ ) > 1 and is_chinese(lowerCAmelCase_ )
if chinese_word:
word_set.add(lowerCAmelCase_ )
UpperCAmelCase = list(lowerCAmelCase_ )
return word_list
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->Union[str, Any]:
if not chinese_word_set:
return bert_tokens
UpperCAmelCase = max([len(lowerCAmelCase_ ) for w in chinese_word_set] )
UpperCAmelCase = bert_tokens
UpperCAmelCase , UpperCAmelCase = 0, len(lowerCAmelCase_ )
while start < end:
UpperCAmelCase = True
if is_chinese(bert_word[start] ):
UpperCAmelCase = min(end - start , lowerCAmelCase_ )
for i in range(lowerCAmelCase_ , 1 , -1 ):
UpperCAmelCase = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
UpperCAmelCase = """##""" + bert_word[j]
UpperCAmelCase = start + i
UpperCAmelCase = False
break
if single_word:
start += 1
return bert_word
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ->Dict:
UpperCAmelCase = []
for i in range(0 , len(lowerCAmelCase_ ) , 1_0_0 ):
UpperCAmelCase = ltp_tokenizer.pipeline(lines[i : i + 1_0_0] , tasks=["""cws"""] ).cws
UpperCAmelCase = [get_chinese_word(lowerCAmelCase_ ) for r in res]
ltp_res.extend(lowerCAmelCase_ )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )
UpperCAmelCase = []
for i in range(0 , len(lowerCAmelCase_ ) , 1_0_0 ):
UpperCAmelCase = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=5_1_2 )
bert_res.extend(res["""input_ids"""] )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )
UpperCAmelCase = []
for input_ids, chinese_word in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase = []
for id in input_ids:
UpperCAmelCase = bert_tokenizer._convert_id_to_token(lowerCAmelCase_ )
input_tokens.append(lowerCAmelCase_ )
UpperCAmelCase = add_sub_symbol(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(lowerCAmelCase_ ):
if token[:2] == "##":
UpperCAmelCase = token[2:]
# save chinese tokens' pos
if len(lowerCAmelCase_ ) == 1 and _is_chinese_char(ord(lowerCAmelCase_ ) ):
ref_id.append(lowerCAmelCase_ )
ref_ids.append(lowerCAmelCase_ )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )
return ref_ids
def _UpperCamelCase ( lowerCAmelCase_ ) ->int:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
UpperCAmelCase = f.readlines()
UpperCAmelCase = [line.strip() for line in data if len(lowerCAmelCase_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
UpperCAmelCase = LTP(args.ltp ) # faster in GPU device
UpperCAmelCase = BertTokenizer.from_pretrained(args.bert )
UpperCAmelCase = prepare_ref(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
UpperCAmelCase = [json.dumps(lowerCAmelCase_ ) + """\n""" for ref in ref_ids]
f.writelines(lowerCAmelCase_ )
if __name__ == "__main__":
__a = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
required=False,
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""",
required=False,
type=str,
default="""./resources/ltp""",
help="""resources for LTP tokenizer, usually a path""",
)
parser.add_argument(
"""--bert""",
required=False,
type=str,
default="""./resources/robert""",
help="""resources for Bert tokenizer""",
)
parser.add_argument(
"""--save_path""",
required=False,
type=str,
default="""./resources/ref.txt""",
help="""path to save res""",
)
__a = parser.parse_args()
main(args)
| 377
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a_ = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 710
|
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase__ ( _UpperCAmelCase ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , )-> List[str]:
'''simple docstring'''
super().__init__()
self.register_modules(transformer=__UpperCAmelCase , vae=__UpperCAmelCase , scheduler=__UpperCAmelCase )
# create a imagenet -> id dictionary for easier use
lowerCAmelCase__ = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split("," ):
lowerCAmelCase__ = int(__UpperCAmelCase )
lowerCAmelCase__ = dict(sorted(self.labels.items() ) )
def UpperCAmelCase ( self , __UpperCAmelCase )-> List[int]:
'''simple docstring'''
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ = list(__UpperCAmelCase )
for l in label:
if l not in self.labels:
raise ValueError(
F"{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}." )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , __UpperCAmelCase , __UpperCAmelCase = 4.0 , __UpperCAmelCase = None , __UpperCAmelCase = 50 , __UpperCAmelCase = "pil" , __UpperCAmelCase = True , )-> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
lowerCAmelCase__ = len(__UpperCAmelCase )
lowerCAmelCase__ = self.transformer.config.sample_size
lowerCAmelCase__ = self.transformer.config.in_channels
lowerCAmelCase__ = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=__UpperCAmelCase , device=self.device , dtype=self.transformer.dtype , )
lowerCAmelCase__ = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
lowerCAmelCase__ = torch.tensor(__UpperCAmelCase , device=self.device ).reshape(-1 )
lowerCAmelCase__ = torch.tensor([1000] * batch_size , device=self.device )
lowerCAmelCase__ = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(__UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
lowerCAmelCase__ = latent_model_input[: len(__UpperCAmelCase ) // 2]
lowerCAmelCase__ = torch.cat([half, half] , dim=0 )
lowerCAmelCase__ = self.scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = t
if not torch.is_tensor(__UpperCAmelCase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
lowerCAmelCase__ = latent_model_input.device.type == "mps"
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ = torch.floataa if is_mps else torch.floataa
else:
lowerCAmelCase__ = torch.intaa if is_mps else torch.intaa
lowerCAmelCase__ = torch.tensor([timesteps] , dtype=__UpperCAmelCase , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
lowerCAmelCase__ = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowerCAmelCase__ = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
lowerCAmelCase__ = self.transformer(
__UpperCAmelCase , timestep=__UpperCAmelCase , class_labels=__UpperCAmelCase ).sample
# perform guidance
if guidance_scale > 1:
lowerCAmelCase__ , lowerCAmelCase__ = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
lowerCAmelCase__ , lowerCAmelCase__ = torch.split(__UpperCAmelCase , len(__UpperCAmelCase ) // 2 , dim=0 )
lowerCAmelCase__ = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
lowerCAmelCase__ = torch.cat([half_eps, half_eps] , dim=0 )
lowerCAmelCase__ = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
lowerCAmelCase__ , lowerCAmelCase__ = torch.split(__UpperCAmelCase , __UpperCAmelCase , dim=1 )
else:
lowerCAmelCase__ = noise_pred
# compute previous image: x_t -> x_t-1
lowerCAmelCase__ = self.scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ).prev_sample
if guidance_scale > 1:
lowerCAmelCase__ , lowerCAmelCase__ = latent_model_input.chunk(2 , dim=0 )
else:
lowerCAmelCase__ = latent_model_input
lowerCAmelCase__ = 1 / self.vae.config.scaling_factor * latents
lowerCAmelCase__ = self.vae.decode(__UpperCAmelCase ).sample
lowerCAmelCase__ = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCAmelCase__ = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCAmelCase__ = self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=__UpperCAmelCase )
| 115
| 0
|
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class UpperCAmelCase_ (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : List[Any] , a_ : str = "▁" , a_ : bool = True , a_ : Union[str, AddedToken] = "<unk>" , a_ : Union[str, AddedToken] = "</s>" , a_ : Union[str, AddedToken] = "<pad>" , )-> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Dict = {
"""pad""": {"""id""": 0, """token""": pad_token},
"""eos""": {"""id""": 1, """token""": eos_token},
"""unk""": {"""id""": 2, """token""": unk_token},
}
UpperCAmelCase_ : Dict = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
UpperCAmelCase_ : Optional[int] = token_dict["""token"""]
UpperCAmelCase_ : Tuple = Tokenizer(Unigram() )
UpperCAmelCase_ : Optional[Any] = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(""" {2,}""" ) , """ """ ),
normalizers.Lowercase(),
] )
UpperCAmelCase_ : Any = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=a_ , add_prefix_space=a_ ),
pre_tokenizers.Digits(individual_digits=a_ ),
pre_tokenizers.Punctuation(),
] )
UpperCAmelCase_ : str = decoders.Metaspace(replacement=a_ , add_prefix_space=a_ )
UpperCAmelCase_ : List[Any] = TemplateProcessing(
single=f'''$A {self.special_tokens['eos']['token']}''' , special_tokens=[(self.special_tokens["""eos"""]["""token"""], self.special_tokens["""eos"""]["""id"""])] , )
UpperCAmelCase_ : Dict = {
"""model""": """SentencePieceUnigram""",
"""replacement""": replacement,
"""add_prefix_space""": add_prefix_space,
}
super().__init__(a_ , a_ )
def a ( self : int , a_ : Union[str, List[str]] , a_ : int = 80_00 , a_ : bool = True , )-> int:
"""simple docstring"""
UpperCAmelCase_ : int = trainers.UnigramTrainer(
vocab_size=a_ , special_tokens=self.special_tokens_list , show_progress=a_ , )
if isinstance(a_ , a_ ):
UpperCAmelCase_ : str = [files]
self._tokenizer.train(a_ , trainer=a_ )
self.add_unk_id()
def a ( self : List[Any] , a_ : Union[Iterator[str], Iterator[Iterator[str]]] , a_ : int = 80_00 , a_ : bool = True , )-> Any:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = trainers.UnigramTrainer(
vocab_size=a_ , special_tokens=self.special_tokens_list , show_progress=a_ , )
self._tokenizer.train_from_iterator(a_ , trainer=a_ )
self.add_unk_id()
def a ( self : Dict )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : int = json.loads(self._tokenizer.to_str() )
UpperCAmelCase_ : Any = self.special_tokens["""unk"""]["""id"""]
UpperCAmelCase_ : Tuple = Tokenizer.from_str(json.dumps(a_ ) )
| 470
|
"""simple docstring"""
lowercase_ = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowercase_ = [{"type": "code", "content": INSTALL_CONTENT}]
lowercase_ = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 470
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 715
|
import sys
UpperCamelCase = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def __lowerCamelCase ( __lowerCAmelCase : str = N ) -> int:
__UpperCamelCase : List[Any] = -sys.maxsize - 1
for i in range(len(__lowerCAmelCase ) - 12 ):
__UpperCamelCase : List[str] = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
__UpperCamelCase : Any = product
return largest_product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 515
| 0
|
'''simple docstring'''
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCamelCase__ = logging.get_logger(__name__)
class lowerCamelCase_ ( enum.Enum ):
lowerCAmelCase__ = 0
lowerCAmelCase__ = 1
@add_end_docstrings(__a )
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'generated'
def __init__( self : List[str] , *_A : Optional[Any] , **_A : List[str] ):
'''simple docstring'''
super().__init__(*_A , **_A )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def lowercase_ ( self : Any , _A : List[Any]=None , _A : List[Any]=None , _A : int=None , _A : Any=None , _A : Union[str, Any]=None , _A : str=None , **_A : List[str] , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = {}
if truncation is not None:
UpperCAmelCase__ : Union[str, Any] = truncation
UpperCAmelCase__ : Any = generate_kwargs
UpperCAmelCase__ : Optional[Any] = {}
if return_tensors is not None and return_type is None:
UpperCAmelCase__ : Any = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
UpperCAmelCase__ : Tuple = return_type
if clean_up_tokenization_spaces is not None:
UpperCAmelCase__ : List[str] = clean_up_tokenization_spaces
if stop_sequence is not None:
UpperCAmelCase__ : Any = self.tokenizer.encode(_A , add_special_tokens=_A )
if len(_A ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
UpperCAmelCase__ : Union[str, Any] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def lowercase_ ( self : List[str] , _A : int , _A : int , _A : int ):
'''simple docstring'''
return True
def lowercase_ ( self : List[str] , *_A : List[Any] , _A : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model.config.prefix if self.model.config.prefix is not None else ''''''
if isinstance(args[0] , _A ):
if self.tokenizer.pad_token_id is None:
raise ValueError('''Please make sure that the tokenizer has a pad_token_id when using a batch input''' )
UpperCAmelCase__ : Tuple = ([prefix + arg for arg in args[0]],)
UpperCAmelCase__ : Dict = True
elif isinstance(args[0] , _A ):
UpperCAmelCase__ : List[str] = (prefix + args[0],)
UpperCAmelCase__ : Dict = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
UpperCAmelCase__ : List[str] = self.tokenizer(*_A , padding=_A , truncation=_A , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : int , *_A : int , **_A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = super().__call__(*_A , **_A )
if (
isinstance(args[0] , _A )
and all(isinstance(_A , _A ) for el in args[0] )
and all(len(_A ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def lowercase_ ( self : Union[str, Any] , _A : List[Any] , _A : Union[str, Any]=TruncationStrategy.DO_NOT_TRUNCATE , **_A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : str = self._parse_and_tokenize(_A , truncation=_A , **_A )
return inputs
def lowercase_ ( self : Tuple , _A : str , **_A : Any ):
'''simple docstring'''
if self.framework == "pt":
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = model_inputs['''input_ids'''].shape
elif self.framework == "tf":
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = tf.shape(model_inputs['''input_ids'''] ).numpy()
UpperCAmelCase__ : str = generate_kwargs.get('''min_length''' , self.model.config.min_length )
UpperCAmelCase__ : Optional[Any] = generate_kwargs.get('''max_length''' , self.model.config.max_length )
self.check_inputs(_A , generate_kwargs['''min_length'''] , generate_kwargs['''max_length'''] )
UpperCAmelCase__ : int = self.model.generate(**_A , **_A )
UpperCAmelCase__ : List[Any] = output_ids.shape[0]
if self.framework == "pt":
UpperCAmelCase__ : str = output_ids.reshape(_A , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
UpperCAmelCase__ : Union[str, Any] = tf.reshape(_A , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def lowercase_ ( self : Union[str, Any] , _A : Any , _A : Any=ReturnType.TEXT , _A : Optional[Any]=False ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
UpperCAmelCase__ : Any = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
UpperCAmelCase__ : List[str] = {
f"""{self.return_name}_text""": self.tokenizer.decode(
_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , )
}
records.append(_A )
return records
@add_end_docstrings(__a )
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'summary'
def __call__( self : Tuple , *_A : Optional[int] , **_A : Optional[int] ):
'''simple docstring'''
return super().__call__(*_A , **_A )
def lowercase_ ( self : Optional[Any] , _A : int , _A : int , _A : int ):
'''simple docstring'''
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
'''a summarization task, where outputs shorter than the input are typically wanted, you might '''
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(__a )
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'translation'
def lowercase_ ( self : Tuple , _A : int , _A : int , _A : int ):
'''simple docstring'''
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
'''increasing your max_length manually, e.g. translator(\'...\', max_length=400)''' )
return True
def lowercase_ ( self : List[Any] , *_A : Any , _A : Dict=TruncationStrategy.DO_NOT_TRUNCATE , _A : str=None , _A : Any=None ):
'''simple docstring'''
if getattr(self.tokenizer , '''_build_translation_inputs''' , _A ):
return self.tokenizer._build_translation_inputs(
*_A , return_tensors=self.framework , truncation=_A , src_lang=_A , tgt_lang=_A )
else:
return super()._parse_and_tokenize(*_A , truncation=_A )
def lowercase_ ( self : Union[str, Any] , _A : Optional[Any]=None , _A : Optional[int]=None , **_A : Dict ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = super()._sanitize_parameters(**_A )
if src_lang is not None:
UpperCAmelCase__ : int = src_lang
if tgt_lang is not None:
UpperCAmelCase__ : Union[str, Any] = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
UpperCAmelCase__ : List[Any] = kwargs.get('''task''' , self.task )
UpperCAmelCase__ : int = task.split('''_''' )
if task and len(_A ) == 4:
# translation, XX, to YY
UpperCAmelCase__ : Any = items[1]
UpperCAmelCase__ : Optional[int] = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : Union[str, Any] , *_A : int , **_A : Union[str, Any] ):
'''simple docstring'''
return super().__call__(*_A , **_A )
| 75
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__A : Optional[Any] = {
"configuration_mega": ["MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegaConfig", "MegaOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
"MEGA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegaForCausalLM",
"MegaForMaskedLM",
"MegaForMultipleChoice",
"MegaForQuestionAnswering",
"MegaForSequenceClassification",
"MegaForTokenClassification",
"MegaModel",
"MegaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__A : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 656
| 0
|
def lowerCAmelCase ( snake_case__ : int = 1000 )-> int:
A_ = 3
A_ = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 608
|
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def lowercase_ ( self ):
A_ = pipeline(
task="zero-shot-audio-classification" , model="hf-internal-testing/tiny-clap-htsat-unfused" )
A_ = load_dataset("ashraq/esc50" )
A_ = dataset["train"]["audio"][-1]["array"]
A_ = audio_classifier(__UpperCamelCase , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , [{"score": 0.501, "label": "Sound of a dog"}, {"score": 0.499, "label": "Sound of vaccum cleaner"}] , )
@unittest.skip("No models are available in TF" )
def lowercase_ ( self ):
pass
@slow
@require_torch
def lowercase_ ( self ):
A_ = pipeline(
task="zero-shot-audio-classification" , model="laion/clap-htsat-unfused" , )
# This is an audio of a dog
A_ = load_dataset("ashraq/esc50" )
A_ = dataset["train"]["audio"][-1]["array"]
A_ = audio_classifier(__UpperCamelCase , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , [
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
] , )
A_ = audio_classifier([audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
A_ = audio_classifier(
[audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] , batch_size=5 )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
@unittest.skip("No models are available in TF" )
def lowercase_ ( self ):
pass
| 608
| 1
|
from statistics import mean
import numpy as np
def UpperCamelCase ( __lowercase : list ,__lowercase : list ,__lowercase : list ,__lowercase : int ):
'''simple docstring'''
A_ : Tuple = 0
# Number of processes finished
A_ : Tuple = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
A_ : Tuple = [0] * no_of_process
# List to include calculation results
A_ : List[Any] = [0] * no_of_process
# Sort by arrival time.
A_ : Optional[Any] = [burst_time[i] for i in np.argsort(__lowercase )]
A_ : str = [process_name[i] for i in np.argsort(__lowercase )]
arrival_time.sort()
while no_of_process > finished_process_count:
A_ : Tuple = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
A_ : Union[str, Any] = arrival_time[i]
A_ : Dict = 0
# Index showing the location of the process being performed
A_ : List[str] = 0
# Saves the current response ratio.
A_ : str = 0
for i in range(0 ,__lowercase ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
A_ : Dict = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
A_ : List[str] = temp
A_ : Union[str, Any] = i
# Calculate the turn around time
A_ : Optional[int] = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
A_ : List[str] = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def UpperCamelCase ( __lowercase : list ,__lowercase : list ,__lowercase : list ,__lowercase : int ):
'''simple docstring'''
A_ : List[str] = [0] * no_of_process
for i in range(0 ,__lowercase ):
A_ : List[str] = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
_UpperCAmelCase = 5
_UpperCAmelCase = ["""A""", """B""", """C""", """D""", """E"""]
_UpperCAmelCase = [1, 2, 3, 4, 5]
_UpperCAmelCase = [1, 2, 3, 4, 5]
_UpperCAmelCase = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
_UpperCAmelCase = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print("""Process name \tArrival time \tBurst time \tTurn around time \tWaiting time""")
for i in range(0, no_of_process):
print(
F"""{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t"""
F"""{turn_around_time[i]}\t\t\t{waiting_time[i]}"""
)
print(F"""average waiting time : {mean(waiting_time):.5f}""")
print(F"""average turn around time : {mean(turn_around_time):.5f}""")
| 558
|
# flake8: noqa
# Lint as: python3
_UpperCAmelCase = [
"""VerificationMode""",
"""Version""",
"""disable_progress_bar""",
"""enable_progress_bar""",
"""is_progress_bar_enabled""",
"""experimental""",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 558
| 1
|
from ..utils import DummyObject, requires_backends
class _a ( metaclass=SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A :Optional[Any] = ["torch", "scipy"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
"""simple docstring"""
requires_backends(self , ["torch", "scipy"] )
@classmethod
def _A ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["torch", "scipy"] )
@classmethod
def _A ( cls , *__UpperCAmelCase , **__UpperCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["torch", "scipy"] )
| 207
|
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
lowerCamelCase = TypeVar("""T""")
class _a ( Generic[T] ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase = True ):
"""simple docstring"""
a__ : dict[T, list[T]] = {} # dictionary of lists
a__ : Dict = directed
def _A ( self , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__UpperCAmelCase )
self.adj_list[destination_vertex].append(__UpperCAmelCase )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__UpperCAmelCase )
a__ : Optional[Any] = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(__UpperCAmelCase )
a__ : Optional[int] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
a__ : str = [destination_vertex]
a__ : int = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__UpperCAmelCase )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__UpperCAmelCase )
a__ : List[str] = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
a__ : int = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
a__ : List[str] = [destination_vertex]
a__ : int = []
return self
def __repr__( self ):
"""simple docstring"""
return pformat(self.adj_list )
| 207
| 1
|
def lowercase_ (A : Dict ):
snake_case__ : set[int] = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
snake_case__ : set[int] = set()
return any(
node not in visited and depth_first_search(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
for node in graph )
def lowercase_ (A : Union[str, Any] , A : Any , A : str , A : Optional[int] ):
visited.add(lowercase_ )
rec_stk.add(lowercase_ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(lowercase_ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 478
|
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = '▁'
_lowerCamelCase = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
_lowerCamelCase = {
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
_lowerCamelCase = {
'facebook/s2t-small-librispeech-asr': 1024,
}
_lowerCamelCase = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
_lowerCamelCase = {'mustc': MUSTC_LANGS}
class __A ( lowerCamelCase__ ):
"""simple docstring"""
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = MAX_MODEL_INPUT_SIZES
UpperCAmelCase__ = ["""input_ids""", """attention_mask"""]
UpperCAmelCase__ = []
def __init__( self , a__ , a__ , a__="<s>" , a__="</s>" , a__="<pad>" , a__="<unk>" , a__=False , a__=False , a__=None , a__=None , a__ = None , **a__ , ):
"""simple docstring"""
_lowerCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a__ , eos_token=a__ , unk_token=a__ , pad_token=a__ , do_upper_case=a__ , do_lower_case=a__ , tgt_lang=a__ , lang_codes=a__ , sp_model_kwargs=self.sp_model_kwargs , **a__ , )
_lowerCamelCase : Optional[int] = do_upper_case
_lowerCamelCase : Optional[Any] = do_lower_case
_lowerCamelCase : Tuple = load_json(a__)
_lowerCamelCase : Union[str, Any] = {v: k for k, v in self.encoder.items()}
_lowerCamelCase : Tuple = spm_file
_lowerCamelCase : Any = load_spm(a__ , self.sp_model_kwargs)
if lang_codes is not None:
_lowerCamelCase : List[Any] = lang_codes
_lowerCamelCase : List[str] = LANGUAGES[lang_codes]
_lowerCamelCase : Any = [F"""<lang:{lang}>""" for lang in self.langs]
_lowerCamelCase : Optional[Any] = {lang: self.sp_model.PieceToId(F"""<lang:{lang}>""") for lang in self.langs}
_lowerCamelCase : List[str] = self.lang_tokens
_lowerCamelCase : str = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang)
else:
_lowerCamelCase : Any = {}
@property
def __snake_case ( self):
"""simple docstring"""
return len(self.encoder)
@property
def __snake_case ( self):
"""simple docstring"""
return self._tgt_lang
@tgt_lang.setter
def __snake_case ( self , a__):
"""simple docstring"""
_lowerCamelCase : Any = new_tgt_lang
self.set_tgt_lang_special_tokens(a__)
def __snake_case ( self , a__):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.lang_code_to_id[tgt_lang]
_lowerCamelCase : Any = [lang_code_id]
def __snake_case ( self , a__):
"""simple docstring"""
return self.sp_model.encode(a__ , out_type=a__)
def __snake_case ( self , a__):
"""simple docstring"""
return self.encoder.get(a__ , self.encoder[self.unk_token])
def __snake_case ( self , a__):
"""simple docstring"""
return self.decoder.get(a__ , self.unk_token)
def __snake_case ( self , a__):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : List[str] = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
_lowerCamelCase : List[Any] = self.sp_model.decode(a__)
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
_lowerCamelCase : Optional[int] = []
else:
current_sub_tokens.append(a__)
_lowerCamelCase : Tuple = self.sp_model.decode(a__)
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def __snake_case ( self , a__ , a__=None):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def __snake_case ( self , a__ , a__ = None , a__ = False):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__)
_lowerCamelCase : Tuple = [1] * len(self.prefix_tokens)
_lowerCamelCase : Tuple = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(a__)) + suffix_ones
return prefix_ones + ([0] * len(a__)) + ([0] * len(a__)) + suffix_ones
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.encoder.copy()
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.__dict__.copy()
_lowerCamelCase : str = None
return state
def __setstate__( self , a__):
"""simple docstring"""
_lowerCamelCase : Optional[int] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
_lowerCamelCase : List[Any] = {}
_lowerCamelCase : Dict = load_spm(self.spm_file , self.sp_model_kwargs)
def __snake_case ( self , a__ , a__ = None):
"""simple docstring"""
_lowerCamelCase : str = Path(a__)
assert save_dir.is_dir(), F"""{save_directory} should be a directory"""
_lowerCamelCase : Any = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
_lowerCamelCase : Optional[Any] = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , a__)
if os.path.abspath(self.spm_file) != os.path.abspath(a__) and os.path.isfile(self.spm_file):
copyfile(self.spm_file , a__)
elif not os.path.isfile(self.spm_file):
with open(a__ , '''wb''') as fi:
_lowerCamelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(a__)
return (str(a__), str(a__))
def __UpperCAmelCase( lowercase_ , lowercase_ ):
_lowerCamelCase : Optional[Any] = sentencepiece.SentencePieceProcessor(**lowercase_ )
spm.Load(str(lowercase_ ) )
return spm
def __UpperCAmelCase( lowercase_ ):
with open(lowercase_ , '''r''' ) as f:
return json.load(lowercase_ )
def __UpperCAmelCase( lowercase_ , lowercase_ ):
with open(lowercase_ , '''w''' ) as f:
json.dump(lowercase_ , lowercase_ , indent=2 )
| 114
| 0
|
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
A : Optional[Any] = logging.get_logger(__name__)
A : str = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
A : str = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def __lowerCamelCase ( __a :Union[str, Any] , __a :Union[str, Any] , __a :int , __a :int , __a :List[str] ) -> Dict:
"""simple docstring"""
for attribute in key.split(""".""" ):
A__ = getattr(_lowerCAmelCase , _lowerCAmelCase )
if weight_type is not None:
A__ = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
else:
A__ = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
A__ = value
elif weight_type == "weight_g":
A__ = value
elif weight_type == "weight_v":
A__ = value
elif weight_type == "bias":
A__ = value
else:
A__ = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def __lowerCamelCase ( __a :int , __a :int ) -> Tuple:
"""simple docstring"""
A__ = []
A__ = fairseq_model.state_dict()
A__ = hf_model.feature_extractor
for name, value in fairseq_dict.items():
A__ = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == """group""" , )
A__ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
A__ = True
if "*" in mapped_key:
A__ = name.split(_lowerCAmelCase )[0].split(""".""" )[-2]
A__ = mapped_key.replace("""*""" , _lowerCAmelCase )
if "weight_g" in name:
A__ = "weight_g"
elif "weight_v" in name:
A__ = "weight_v"
elif "bias" in name and "relative_attention_bias" not in name:
A__ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A__ = "weight"
else:
A__ = None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(F'Unused weights: {unused_weights}' )
def __lowerCamelCase ( __a :Optional[int] , __a :Tuple , __a :str , __a :Any , __a :Union[str, Any] ) -> Tuple:
"""simple docstring"""
A__ = full_name.split("""conv_layers.""" )[-1]
A__ = name.split(""".""" )
A__ = int(items[0] )
A__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
A__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
A__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
A__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
A__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(_lowerCAmelCase )
@torch.no_grad()
def __lowerCamelCase ( __a :str , __a :List[Any] , __a :Optional[int]=None ) -> List[str]:
"""simple docstring"""
A__ = torch.load(_lowerCAmelCase )
A__ = WavLMConfigOrig(checkpoint["""cfg"""] )
A__ = WavLMOrig(_lowerCAmelCase )
model.load_state_dict(checkpoint["""model"""] )
model.eval()
if config_path is not None:
A__ = WavLMConfig.from_pretrained(_lowerCAmelCase )
else:
A__ = WavLMConfig()
A__ = WavLMModel(_lowerCAmelCase )
recursively_load_weights(_lowerCAmelCase , _lowerCAmelCase )
hf_wavlm.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
A : Dict = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
A : Tuple = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 716
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A : int = logging.get_logger(__name__)
A : Optional[int] = {
'''facebook/convnextv2-tiny-1k-224''': '''https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json''',
}
class A (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = '''convnextv2'''
def __init__( self : str , __lowerCAmelCase : List[Any]=3 , __lowerCAmelCase : Optional[Any]=4 , __lowerCAmelCase : Optional[Any]=4 , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : List[str]="gelu" , __lowerCAmelCase : str=0.0_2 , __lowerCAmelCase : Tuple=1e-12 , __lowerCAmelCase : str=0.0 , __lowerCAmelCase : Union[str, Any]=2_24 , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Any=None , **__lowerCAmelCase : Optional[Any] , ) -> List[Any]:
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
A__ = num_channels
A__ = patch_size
A__ = num_stages
A__ = [96, 1_92, 3_84, 7_68] if hidden_sizes is None else hidden_sizes
A__ = [3, 3, 9, 3] if depths is None else depths
A__ = hidden_act
A__ = initializer_range
A__ = layer_norm_eps
A__ = drop_path_rate
A__ = image_size
A__ = ["""stem"""] + [f'stage{idx}' for idx in range(1 , len(self.depths ) + 1 )]
A__ , A__ = get_aligned_output_features_output_indices(
out_features=__lowerCAmelCase , out_indices=__lowerCAmelCase , stage_names=self.stage_names )
| 247
| 0
|
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
lowerCamelCase = None
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase = {
"""vocab_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/tokenizer.json""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/tokenizer.json""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/tokenizer.json""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/tokenizer.json""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/tokenizer.json""",
},
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCamelCase = {
"""t5-small""": 512,
"""t5-base""": 512,
"""t5-large""": 512,
"""t5-3b""": 512,
"""t5-11b""": 512,
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ['''input_ids''', '''attention_mask''']
UpperCamelCase = TaTokenizer
UpperCamelCase = []
def __init__( self : int , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : str=None , _UpperCAmelCase : Tuple="</s>" , _UpperCAmelCase : str="<unk>" , _UpperCAmelCase : List[Any]="<pad>" , _UpperCAmelCase : List[str]=100 , _UpperCAmelCase : str=None , **_UpperCAmelCase : List[str] , ) -> List[str]:
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
UpperCAmelCase_ = [F"""<extra_id_{i}>""" for i in range(_UpperCAmelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
UpperCAmelCase_ = len(set(filter(lambda _UpperCAmelCase : bool("extra_id_" in str(_UpperCAmelCase ) ) , _UpperCAmelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens" )
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , extra_ids=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
UpperCAmelCase_ = vocab_file
UpperCAmelCase_ = False if not self.vocab_file else True
UpperCAmelCase_ = extra_ids
@staticmethod
def lowercase__ ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any ) -> List[Any]:
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
UpperCAmelCase_ = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
F""" {pretrained_model_name_or_path} automatically truncating your input to"""
F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , _UpperCAmelCase , )
return max_model_length
def lowercase__ ( self : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(_UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ = os.path.join(
_UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ):
copyfile(self.vocab_file , _UpperCAmelCase )
logger.info(F"""Copy vocab file to {out_vocab_file}""" )
return (out_vocab_file,)
def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase_ = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
UpperCAmelCase_ = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase_ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def lowercase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
return list(
set(filter(lambda _UpperCAmelCase : bool(re.search(r"<extra_id_\d+>" , _UpperCAmelCase ) ) is not None , self.additional_special_tokens ) ) )
def lowercase__ ( self : int ) -> Any:
'''simple docstring'''
return [self.convert_tokens_to_ids(_UpperCAmelCase ) for token in self.get_sentinel_tokens()]
| 82
|
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class __snake_case :
"""simple docstring"""
def __init__( self , _UpperCamelCase , _UpperCamelCase=sys.maxsize ) -> Any:
"""simple docstring"""
__snake_case = """bilinear"""
__snake_case = max_size
__snake_case = short_edge_length
def __call__( self , _UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
__snake_case = []
for img in imgs:
__snake_case , __snake_case = img.shape[:2]
# later: provide list and randomly choose index for resize
__snake_case = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
__snake_case = size * 1.0 / min(_UpperCamelCase , _UpperCamelCase )
if h < w:
__snake_case , __snake_case = size, scale * w
else:
__snake_case , __snake_case = scale * h, size
if max(_UpperCamelCase , _UpperCamelCase ) > self.max_size:
__snake_case = self.max_size * 1.0 / max(_UpperCamelCase , _UpperCamelCase )
__snake_case = newh * scale
__snake_case = neww * scale
__snake_case = int(neww + 0.5 )
__snake_case = int(newh + 0.5 )
if img.dtype == np.uinta:
__snake_case = Image.fromarray(_UpperCamelCase )
__snake_case = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
__snake_case = np.asarray(_UpperCamelCase )
else:
__snake_case = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
__snake_case = nn.functional.interpolate(
_UpperCamelCase , (newh, neww) , mode=self.interp_method , align_corners=_UpperCamelCase ).squeeze(0 )
img_augs.append(_UpperCamelCase )
return img_augs
class __snake_case :
"""simple docstring"""
def __init__( self , _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
__snake_case = cfg.INPUT.FORMAT
__snake_case = cfg.SIZE_DIVISIBILITY
__snake_case = cfg.PAD_VALUE
__snake_case = cfg.INPUT.MAX_SIZE_TEST
__snake_case = cfg.MODEL.DEVICE
__snake_case = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
__snake_case = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
__snake_case = lambda _UpperCamelCase : (x - self.pixel_mean) / self.pixel_std
def a ( self , _UpperCamelCase ) -> int:
"""simple docstring"""
__snake_case = tuple(max(_UpperCamelCase ) for s in zip(*[img.shape for img in images] ) )
__snake_case = [im.shape[-2:] for im in images]
__snake_case = [
nn.functional.pad(
_UpperCamelCase , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(_UpperCamelCase , _UpperCamelCase )
]
return torch.stack(_UpperCamelCase ), torch.tensor(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase=False ) -> Union[str, Any]:
"""simple docstring"""
with torch.no_grad():
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
__snake_case = [images]
if single_image:
assert len(_UpperCamelCase ) == 1
for i in range(len(_UpperCamelCase ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(_UpperCamelCase , images.pop(_UpperCamelCase ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
_UpperCamelCase , torch.as_tensor(img_tensorize(images.pop(_UpperCamelCase ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
__snake_case = torch.tensor([im.shape[:2] for im in images] )
__snake_case = self.aug(_UpperCamelCase )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
__snake_case = [self.normalizer(_UpperCamelCase ) for x in images]
# now pad them to do the following operations
__snake_case , __snake_case = self.pad(_UpperCamelCase )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
__snake_case = torch.true_divide(_UpperCamelCase , _UpperCamelCase )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def lowerCamelCase__ ( __A :Union[str, Any] ,__A :List[Any] ):
"""simple docstring"""
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def lowerCamelCase__ ( __A :Tuple ,__A :Tuple[int, int] ):
"""simple docstring"""
assert torch.isfinite(__A ).all(), "Box tensor contains infinite or NaN!"
__snake_case , __snake_case = box_size
tensor[:, 0].clamp_(min=0 ,max=__A )
tensor[:, 1].clamp_(min=0 ,max=__A )
tensor[:, 2].clamp_(min=0 ,max=__A )
tensor[:, 3].clamp_(min=0 ,max=__A )
| 268
| 0
|
'''simple docstring'''
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowerCamelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = VQModel
_SCREAMING_SNAKE_CASE = "sample"
@property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , __snake_case : Tuple=(32, 32) ):
'''simple docstring'''
_snake_case: Dict = 4
_snake_case: str = 3
_snake_case: int = floats_tensor((batch_size, num_channels) + sizes ).to(__snake_case )
return {"sample": image}
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
return (3, 32, 32)
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
return (3, 32, 32)
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
_snake_case: str = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 3,
}
_snake_case: Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
_snake_case , _snake_case: str = VQModel.from_pretrained('fusing/vqgan-dummy' , output_loading_info=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(__snake_case )
_snake_case: List[Any] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case: str = VQModel.from_pretrained('fusing/vqgan-dummy' )
model.to(__snake_case ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
_snake_case: Tuple = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
_snake_case: Tuple = image.to(__snake_case )
with torch.no_grad():
_snake_case: Dict = model(__snake_case ).sample
_snake_case: int = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_snake_case: List[str] = torch.tensor([-0.0_153, -0.4_044, -0.1_880, -0.5_161, -0.2_418, -0.4_072, -0.1_612, -0.0_633, -0.0_143] )
# fmt: on
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-3 ) )
| 273
|
'''simple docstring'''
A : List[str] = '\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
A : List[str] = [{'type': 'code', 'content': INSTALL_CONTENT}]
A : Dict = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 273
| 1
|
"""simple docstring"""
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
_UpperCamelCase = logging.get_logger(__name__)
def _A( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
A__ : Any = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(lowerCAmelCase ) == len(lowerCAmelCase ), F'''{len(lowerCAmelCase )} != {len(lowerCAmelCase )}'''
dest_layers.load_state_dict(layers_to_copy.state_dict() )
_UpperCamelCase = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
_UpperCamelCase = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def _A( lowerCAmelCase , lowerCAmelCase ):
try:
A__ : Any = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F'''no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'''
F''' {n_student}''' )
return list(range(lowerCAmelCase ) )
def _A( lowerCAmelCase , lowerCAmelCase ):
if n_student > n_teacher:
raise ValueError(F'''Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}''' )
elif n_teacher == n_student:
return list(range(lowerCAmelCase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def _A( lowerCAmelCase , lowerCAmelCase = "student" , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase=False , lowerCAmelCase=None , lowerCAmelCase=None , **lowerCAmelCase , ):
A__ : Any = """encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."""
assert (e is not None) or (d is not None), _msg
if isinstance(lowerCAmelCase , lowerCAmelCase ):
AutoTokenizer.from_pretrained(lowerCAmelCase ).save_pretrained(lowerCAmelCase ) # purely for convenience
A__ : int = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase ).eval()
else:
assert isinstance(lowerCAmelCase , lowerCAmelCase ), F'''teacher must be a model or string got type {type(lowerCAmelCase )}'''
A__ : str = teacher.config.to_diff_dict()
try:
A__ , A__ : List[str] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
A__ : Optional[int] = teacher_e
if d is None:
A__ : List[Any] = teacher_d
init_kwargs.update({"""encoder_layers""": e, """decoder_layers""": d} )
except AttributeError: # T5
if hasattr(teacher.config , """num_encoder_layers""" ):
A__ , A__ : Union[str, Any] = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
A__ , A__ : Optional[Any] = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
A__ : Any = teacher_e
if d is None:
A__ : Any = teacher_d
if hasattr(teacher.config , """num_encoder_layers""" ):
init_kwargs.update({"""num_encoder_layers""": e, """num_decoder_layers""": d} )
else:
init_kwargs.update({"""num_layers""": e, """num_decoder_layers""": d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(lowerCAmelCase )
# Copy weights
A__ : Optional[int] = teacher.config_class(**lowerCAmelCase )
A__ : Optional[Any] = AutoModelForSeqaSeqLM.from_config(lowerCAmelCase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
A__ : List[Any] = student.load_state_dict(teacher.state_dict() , strict=lowerCAmelCase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
A__ , A__ : Optional[Any] = list(range(lowerCAmelCase ) ), list(range(lowerCAmelCase ) )
logger.info(
F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'''
F''' {save_path}''' )
student.save_pretrained(lowerCAmelCase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
A__ : int = pick_layers_to_copy(lowerCAmelCase , lowerCAmelCase )
if d_layers_to_copy is None:
A__ : Tuple = pick_layers_to_copy(lowerCAmelCase , lowerCAmelCase )
try:
if hasattr(
lowerCAmelCase , """prophetnet""" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , lowerCAmelCase )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , lowerCAmelCase )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , lowerCAmelCase )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , lowerCAmelCase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , lowerCAmelCase )
copy_layers(teacher.decoder.block , student.decoder.block , lowerCAmelCase )
logger.info(
F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}''' )
A__ : Union[str, Any] = {
"""teacher_type""": teacher.config.model_type,
"""copied_encoder_layers""": e_layers_to_copy,
"""copied_decoder_layers""": d_layers_to_copy,
}
student.save_pretrained(lowerCAmelCase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 363
|
"""simple docstring"""
import heapq
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowercase ,[-1 * len(lowercase ), (key, value)] )
# chosen_vertices = set of chosen vertices
_UpperCAmelCase = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
_UpperCAmelCase = heapq.heappop(lowercase )[1][0]
chosen_vertices.add(lowercase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
_UpperCAmelCase = elem[1][1].index(lowercase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowercase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
| 277
| 0
|
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
a__ : Optional[Any] =sys.version_info >= (3, 10)
def lowercase__ ( __lowercase : Any=None , __lowercase : List[Any]=None ) -> Union[str, Any]:
"""simple docstring"""
return field(default_factory=lambda: default , metadata=__lowercase )
@dataclass
class snake_case :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int
SCREAMING_SNAKE_CASE_ : float
SCREAMING_SNAKE_CASE_ : str
SCREAMING_SNAKE_CASE_ : bool
@dataclass
class snake_case :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int =42
SCREAMING_SNAKE_CASE_ : str =field(default="toto" , metadata={"help": "help message"} )
@dataclass
class snake_case :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : bool =False
SCREAMING_SNAKE_CASE_ : bool =True
SCREAMING_SNAKE_CASE_ : Optional[bool] =None
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] ="titi"
SCREAMING_SNAKE_CASE_ : str ="toto"
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] ="titi"
SCREAMING_SNAKE_CASE_ : List[Any] ="toto"
SCREAMING_SNAKE_CASE_ : int =42
@dataclass
class snake_case :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : BasicEnum ="toto"
def _lowerCamelCase ( self : Optional[Any] ):
__A = BasicEnum(self.foo )
@dataclass
class snake_case :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : MixedTypeEnum ="toto"
def _lowerCamelCase ( self : List[Any] ):
__A = MixedTypeEnum(self.foo )
@dataclass
class snake_case :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] =None
SCREAMING_SNAKE_CASE_ : Optional[float] =field(default=__lowerCamelCase , metadata={"help": "help message"} )
SCREAMING_SNAKE_CASE_ : Optional[str] =None
SCREAMING_SNAKE_CASE_ : Optional[List[str]] =list_field(default=[] )
SCREAMING_SNAKE_CASE_ : Optional[List[int]] =list_field(default=[] )
@dataclass
class snake_case :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[int] =list_field(default=[] )
SCREAMING_SNAKE_CASE_ : List[int] =list_field(default=[1, 2, 3] )
SCREAMING_SNAKE_CASE_ : List[str] =list_field(default=["Hallo", "Bonjour", "Hello"] )
SCREAMING_SNAKE_CASE_ : List[float] =list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class snake_case :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[int] =field()
SCREAMING_SNAKE_CASE_ : str =field()
SCREAMING_SNAKE_CASE_ : BasicEnum =field()
def _lowerCamelCase ( self : Any ):
__A = BasicEnum(self.required_enum )
@dataclass
class snake_case :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int
SCREAMING_SNAKE_CASE_ : "BasicEnum" =field()
SCREAMING_SNAKE_CASE_ : "Optional[bool]" =None
SCREAMING_SNAKE_CASE_ : "str" =field(default="toto" , metadata={"help": "help message"} )
SCREAMING_SNAKE_CASE_ : "List[str]" =list_field(default=["Hallo", "Bonjour", "Hello"] )
if is_python_no_less_than_3_10:
@dataclass
class snake_case :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : bool =False
SCREAMING_SNAKE_CASE_ : bool =True
SCREAMING_SNAKE_CASE_ : bool | None =None
@dataclass
class snake_case :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int | None =None
SCREAMING_SNAKE_CASE_ : float | None =field(default=__lowerCamelCase , metadata={"help": "help message"} )
SCREAMING_SNAKE_CASE_ : str | None =None
SCREAMING_SNAKE_CASE_ : list[str] | None =list_field(default=[] )
SCREAMING_SNAKE_CASE_ : list[int] | None =list_field(default=[] )
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def _lowerCamelCase ( self : Tuple , __A : argparse.ArgumentParser , __A : argparse.ArgumentParser ):
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
__A = {k: v for k, v in vars(__A ).items() if k != 'container'}
__A = {k: v for k, v in vars(__A ).items() if k != 'container'}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('choices' , __A ) and yy.get('choices' , __A ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['type'](__A ) , yy['type'](__A ) )
del xx["type"], yy["type"]
self.assertEqual(__A , __A )
def _lowerCamelCase ( self : Optional[Any] ):
__A = HfArgumentParser(__A )
__A = argparse.ArgumentParser()
expected.add_argument('--foo' , type=__A , required=__A )
expected.add_argument('--bar' , type=__A , required=__A )
expected.add_argument('--baz' , type=__A , required=__A )
expected.add_argument('--flag' , type=__A , default=__A , const=__A , nargs='?' )
self.argparsersEqual(__A , __A )
__A = ['--foo', '1', '--baz', 'quux', '--bar', '0.5']
((__A ) , ) = parser.parse_args_into_dataclasses(__A , look_for_args_file=__A )
self.assertFalse(example.flag )
def _lowerCamelCase ( self : Any ):
__A = HfArgumentParser(__A )
__A = argparse.ArgumentParser()
expected.add_argument('--foo' , default=4_2 , type=__A )
expected.add_argument('--baz' , default='toto' , type=__A , help='help message' )
self.argparsersEqual(__A , __A )
def _lowerCamelCase ( self : Tuple ):
__A = argparse.ArgumentParser()
expected.add_argument('--foo' , type=__A , default=__A , const=__A , nargs='?' )
expected.add_argument('--baz' , type=__A , default=__A , const=__A , nargs='?' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('--no_baz' , action='store_false' , default=__A , dest='baz' )
expected.add_argument('--opt' , type=__A , default=__A )
__A = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__A )
for dataclass_type in dataclass_types:
__A = HfArgumentParser(__A )
self.argparsersEqual(__A , __A )
__A = parser.parse_args([] )
self.assertEqual(__A , Namespace(foo=__A , baz=__A , opt=__A ) )
__A = parser.parse_args(['--foo', '--no_baz'] )
self.assertEqual(__A , Namespace(foo=__A , baz=__A , opt=__A ) )
__A = parser.parse_args(['--foo', '--baz'] )
self.assertEqual(__A , Namespace(foo=__A , baz=__A , opt=__A ) )
__A = parser.parse_args(['--foo', 'True', '--baz', 'True', '--opt', 'True'] )
self.assertEqual(__A , Namespace(foo=__A , baz=__A , opt=__A ) )
__A = parser.parse_args(['--foo', 'False', '--baz', 'False', '--opt', 'False'] )
self.assertEqual(__A , Namespace(foo=__A , baz=__A , opt=__A ) )
def _lowerCamelCase ( self : Optional[Any] ):
__A = HfArgumentParser(__A )
__A = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=['titi', 'toto', 4_2] , type=make_choice_type_function(['titi', 'toto', 4_2] ) , )
self.argparsersEqual(__A , __A )
__A = parser.parse_args([] )
self.assertEqual(args.foo , 'toto' )
__A = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
__A = parser.parse_args(['--foo', 'titi'] )
self.assertEqual(args.foo , 'titi' )
__A = parser.parse_args_into_dataclasses(['--foo', 'titi'] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
__A = parser.parse_args(['--foo', '42'] )
self.assertEqual(args.foo , 4_2 )
__A = parser.parse_args_into_dataclasses(['--foo', '42'] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def _lowerCamelCase ( self : int ):
@dataclass
class snake_case :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Literal["titi", "toto", 42] ="toto"
__A = HfArgumentParser(__A )
__A = argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=('titi', 'toto', 4_2) , type=make_choice_type_function(['titi', 'toto', 4_2] ) , )
self.argparsersEqual(__A , __A )
__A = parser.parse_args([] )
self.assertEqual(args.foo , 'toto' )
__A = parser.parse_args(['--foo', 'titi'] )
self.assertEqual(args.foo , 'titi' )
__A = parser.parse_args(['--foo', '42'] )
self.assertEqual(args.foo , 4_2 )
def _lowerCamelCase ( self : List[Any] ):
__A = HfArgumentParser(__A )
__A = argparse.ArgumentParser()
expected.add_argument('--foo_int' , nargs='+' , default=[] , type=__A )
expected.add_argument('--bar_int' , nargs='+' , default=[1, 2, 3] , type=__A )
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=__A )
expected.add_argument('--foo_float' , nargs='+' , default=[0.1, 0.2, 0.3] , type=__A )
self.argparsersEqual(__A , __A )
__A = parser.parse_args([] )
self.assertEqual(
__A , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['Hallo', 'Bonjour', 'Hello'] , foo_float=[0.1, 0.2, 0.3] ) , )
__A = parser.parse_args('--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'.split() )
self.assertEqual(__A , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['a', 'b', 'c'] , foo_float=[0.1, 0.7] ) )
def _lowerCamelCase ( self : List[Any] ):
__A = argparse.ArgumentParser()
expected.add_argument('--foo' , default=__A , type=__A )
expected.add_argument('--bar' , default=__A , type=__A , help='help message' )
expected.add_argument('--baz' , default=__A , type=__A )
expected.add_argument('--ces' , nargs='+' , default=[] , type=__A )
expected.add_argument('--des' , nargs='+' , default=[] , type=__A )
__A = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__A )
for dataclass_type in dataclass_types:
__A = HfArgumentParser(__A )
self.argparsersEqual(__A , __A )
__A = parser.parse_args([] )
self.assertEqual(__A , Namespace(foo=__A , bar=__A , baz=__A , ces=[] , des=[] ) )
__A = parser.parse_args('--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'.split() )
self.assertEqual(__A , Namespace(foo=1_2 , bar=3.14 , baz='42' , ces=['a', 'b', 'c'] , des=[1, 2, 3] ) )
def _lowerCamelCase ( self : Tuple ):
__A = HfArgumentParser(__A )
__A = argparse.ArgumentParser()
expected.add_argument('--required_list' , nargs='+' , type=__A , required=__A )
expected.add_argument('--required_str' , type=__A , required=__A )
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=__A , )
self.argparsersEqual(__A , __A )
def _lowerCamelCase ( self : List[str] ):
__A = HfArgumentParser(__A )
__A = argparse.ArgumentParser()
expected.add_argument('--foo' , type=__A , required=__A )
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=__A , )
expected.add_argument('--opt' , type=__A , default=__A )
expected.add_argument('--baz' , default='toto' , type=__A , help='help message' )
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=__A )
self.argparsersEqual(__A , __A )
def _lowerCamelCase ( self : Any ):
__A = HfArgumentParser(__A )
__A = {
'foo': 1_2,
'bar': 3.14,
'baz': '42',
'flag': True,
}
__A = parser.parse_dict(__A )[0]
__A = BasicExample(**__A )
self.assertEqual(__A , __A )
def _lowerCamelCase ( self : Optional[Any] ):
__A = HfArgumentParser(__A )
__A = {
'foo': 1_2,
'bar': 3.14,
'baz': '42',
'flag': True,
'extra': 4_2,
}
self.assertRaises(__A , parser.parse_dict , __A , allow_extra_keys=__A )
def _lowerCamelCase ( self : int ):
__A = HfArgumentParser(__A )
__A = {
'foo': 1_2,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__A = os.path.join(__A , 'temp_json' )
os.mkdir(__A )
with open(temp_local_path + '.json' , 'w+' ) as f:
json.dump(__A , __A )
__A = parser.parse_yaml_file(Path(temp_local_path + '.json' ) )[0]
__A = BasicExample(**__A )
self.assertEqual(__A , __A )
def _lowerCamelCase ( self : int ):
__A = HfArgumentParser(__A )
__A = {
'foo': 1_2,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__A = os.path.join(__A , 'temp_yaml' )
os.mkdir(__A )
with open(temp_local_path + '.yaml' , 'w+' ) as f:
yaml.dump(__A , __A )
__A = parser.parse_yaml_file(Path(temp_local_path + '.yaml' ) )[0]
__A = BasicExample(**__A )
self.assertEqual(__A , __A )
def _lowerCamelCase ( self : Dict ):
__A = HfArgumentParser(__A )
self.assertIsNotNone(__A )
| 700
|
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Optional[int] =logging.get_logger(__name__)
a__ : int ={
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''',
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any ="xlnet"
SCREAMING_SNAKE_CASE_ : List[str] =["mems"]
SCREAMING_SNAKE_CASE_ : Dict ={
"n_token": "vocab_size", # Backward compatibility
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : List[Any] , __A : Optional[Any]=3_2_0_0_0 , __A : int=1_0_2_4 , __A : Tuple=2_4 , __A : Dict=1_6 , __A : str=4_0_9_6 , __A : List[str]="gelu" , __A : int=True , __A : str="bi" , __A : List[str]=0.02 , __A : List[Any]=1e-12 , __A : Optional[Any]=0.1 , __A : str=5_1_2 , __A : Any=None , __A : str=True , __A : Dict=False , __A : str=False , __A : Tuple=-1 , __A : List[Any]=False , __A : str="last" , __A : Optional[Any]=True , __A : Optional[int]="tanh" , __A : Any=0.1 , __A : List[str]=5 , __A : Tuple=5 , __A : Dict=5 , __A : str=1 , __A : Optional[Any]=2 , **__A : List[Any] , ):
__UpperCamelCase = vocab_size
__UpperCamelCase = d_model
__UpperCamelCase = n_layer
__UpperCamelCase = n_head
if d_model % n_head != 0:
raise ValueError(f'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f'''`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})''' )
__UpperCamelCase = d_model // n_head
__UpperCamelCase = ff_activation
__UpperCamelCase = d_inner
__UpperCamelCase = untie_r
__UpperCamelCase = attn_type
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = dropout
__UpperCamelCase = mem_len
__UpperCamelCase = reuse_len
__UpperCamelCase = bi_data
__UpperCamelCase = clamp_len
__UpperCamelCase = same_length
__UpperCamelCase = summary_type
__UpperCamelCase = summary_use_proj
__UpperCamelCase = summary_activation
__UpperCamelCase = summary_last_dropout
__UpperCamelCase = start_n_top
__UpperCamelCase = end_n_top
__UpperCamelCase = bos_token_id
__UpperCamelCase = pad_token_id
__UpperCamelCase = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'
' instead.' , __A , )
__UpperCamelCase = kwargs['use_cache']
__UpperCamelCase = use_mems_eval
__UpperCamelCase = use_mems_train
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
@property
def _lowerCamelCase ( self : List[str] ):
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def _lowerCamelCase ( self : int , __A : Optional[int] ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 434
| 0
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class a__( unittest.TestCase ):
'''simple docstring'''
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = """laion/clap-htsat-unfused"""
lowerCAmelCase = tempfile.mkdtemp()
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
return RobertaTokenizer.from_pretrained(self.checkpoint , **__lowerCAmelCase)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **__lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_feature_extractor()
lowerCAmelCase = ClapProcessor(tokenizer=__lowerCAmelCase , feature_extractor=__lowerCAmelCase)
processor.save_pretrained(self.tmpdirname)
lowerCAmelCase = ClapProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.tokenizer , __lowerCAmelCase)
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string())
self.assertIsInstance(processor.feature_extractor , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor())
processor.save_pretrained(self.tmpdirname)
lowerCAmelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""")
lowerCAmelCase = self.get_feature_extractor(do_normalize=__lowerCAmelCase , padding_value=1.0)
lowerCAmelCase = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowerCAmelCase , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , __lowerCAmelCase)
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string())
self.assertIsInstance(processor.feature_extractor , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_feature_extractor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = ClapProcessor(tokenizer=__lowerCAmelCase , feature_extractor=__lowerCAmelCase)
lowerCAmelCase = floats_list((3, 1000))
lowerCAmelCase = feature_extractor(__lowerCAmelCase , return_tensors="""np""")
lowerCAmelCase = processor(audios=__lowerCAmelCase , return_tensors="""np""")
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_feature_extractor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = ClapProcessor(tokenizer=__lowerCAmelCase , feature_extractor=__lowerCAmelCase)
lowerCAmelCase = """This is a test string"""
lowerCAmelCase = processor(text=__lowerCAmelCase)
lowerCAmelCase = tokenizer(__lowerCAmelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_feature_extractor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = ClapProcessor(tokenizer=__lowerCAmelCase , feature_extractor=__lowerCAmelCase)
lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase = processor.batch_decode(__lowerCAmelCase)
lowerCAmelCase = tokenizer.batch_decode(__lowerCAmelCase)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.get_feature_extractor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = ClapProcessor(tokenizer=__lowerCAmelCase , feature_extractor=__lowerCAmelCase)
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 370
|
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
__lowercase = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
__lowercase = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
__lowercase = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__( datasets.Metric ):
'''simple docstring'''
def a_ ( self):
"""simple docstring"""
if version.parse(scb.__version__) < version.parse("""1.4.12"""):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""")
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""http://www.cs.umd.edu/~snover/tercom/""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence"""),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""") , id="""references"""),
}) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#ter"""] , reference_urls=[
"""https://github.com/jhclark/tercom""",
] , )
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = False , ):
"""simple docstring"""
lowerCAmelCase = len(references[0])
if any(len(__lowerCAmelCase) != references_per_prediction for refs in references):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""")
lowerCAmelCase = [[refs[i] for refs in references] for i in range(__lowerCAmelCase)]
lowerCAmelCase = TER(
normalized=__lowerCAmelCase , no_punct=__lowerCAmelCase , asian_support=__lowerCAmelCase , case_sensitive=__lowerCAmelCase , )
lowerCAmelCase = sb_ter.corpus_score(__lowerCAmelCase , __lowerCAmelCase)
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 370
| 1
|
"""simple docstring"""
__lowerCamelCase = 6_55_21
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ = 1
A__ = 0
for plain_chr in plain_text:
A__ = (a + ord(UpperCamelCase__ )) % MOD_ADLER
A__ = (b + a) % MOD_ADLER
return (b << 16) | a
| 536
|
"""simple docstring"""
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class UpperCamelCase__( __A ):
def snake_case__ ( self ,__UpperCAmelCase ) -> float:
return 0.0
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
A__ = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = 512
A__ = [1] + [0] * (size - 1)
A__ = [filter_type.process(UpperCamelCase__ ) for item in inputs]
A__ = [0] * (samplerate - size) # zero-padding
outputs += filler
A__ = np.abs(np.fft.fft(UpperCamelCase__ ) )
A__ = 20 * np.logaa(UpperCamelCase__ )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
# Display within reasonable bounds
A__ = get_bounds(UpperCamelCase__ , UpperCamelCase__ )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel('Gain (dB)' )
plt.plot(UpperCamelCase__ )
plt.show()
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = 512
A__ = [1] + [0] * (size - 1)
A__ = [filter_type.process(UpperCamelCase__ ) for item in inputs]
A__ = [0] * (samplerate - size) # zero-padding
outputs += filler
A__ = np.angle(np.fft.fft(UpperCamelCase__ ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('Phase shift (Radians)' )
plt.plot(np.unwrap(UpperCamelCase__ , -2 * pi ) )
plt.show()
| 536
| 1
|
"""simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
A = {
'''text_branch''': '''text_model''',
'''audio_branch''': '''audio_model.audio_encoder''',
'''attn''': '''attention.self''',
'''self.proj''': '''output.dense''',
'''attention.self_mask''': '''attn_mask''',
'''mlp.fc1''': '''intermediate.dense''',
'''mlp.fc2''': '''output.dense''',
'''norm1''': '''layernorm_before''',
'''norm2''': '''layernorm_after''',
'''bn0''': '''batch_norm''',
}
A = AutoFeatureExtractor.from_pretrained('''laion/clap-htsat-unfused''', truncation='''rand_trunc''')
def __A ( a_ :Any , a_ :Union[str, Any]=False) -> Any:
__a , __a : str = create_model(
'''HTSAT-tiny''' , '''roberta''' , a_ , precision='''fp32''' , device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' , enable_fusion=a_ , fusion_type='''aff_2d''' if enable_fusion else None , )
return model, model_cfg
def __A ( a_ :Union[str, Any]) -> List[str]:
__a : str = {}
__a : str = R'''.*sequential.(\d+).*'''
__a : Optional[int] = R'''.*_projection.(\d+).*'''
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__a : List[Any] = key.replace(a_ , a_)
if re.match(a_ , a_):
# replace sequential layers with list
__a : str = re.match(a_ , a_).group(1)
__a : Union[str, Any] = key.replace(F"""sequential.{sequential_layer}.""" , F"""layers.{int(a_)//3}.linear.""")
elif re.match(a_ , a_):
__a : Tuple = int(re.match(a_ , a_).group(1))
# Because in CLAP they use `nn.Sequential`...
__a : Tuple = 1 if projecton_layer == 0 else 2
__a : Optional[Any] = key.replace(F"""_projection.{projecton_layer}.""" , F"""_projection.linear{transformers_projection_layer}.""")
if "audio" and "qkv" in key:
# split qkv into query key and value
__a : Optional[Any] = value
__a : Tuple = mixed_qkv.size(0) // 3
__a : List[str] = mixed_qkv[:qkv_dim]
__a : Optional[int] = mixed_qkv[qkv_dim : qkv_dim * 2]
__a : Tuple = mixed_qkv[qkv_dim * 2 :]
__a : Union[str, Any] = query_layer
__a : int = key_layer
__a : Tuple = value_layer
else:
__a : str = value
return model_state_dict
def __A ( a_ :str , a_ :Optional[int] , a_ :Optional[int] , a_ :Optional[Any]=False) -> List[Any]:
__a , __a : List[str] = init_clap(a_ , enable_fusion=a_)
clap_model.eval()
__a : Optional[Any] = clap_model.state_dict()
__a : List[str] = rename_state_dict(a_)
__a : Optional[int] = ClapConfig()
__a : int = enable_fusion
__a : Optional[int] = ClapModel(a_)
# ignore the spectrogram embedding layer
model.load_state_dict(a_ , strict=a_)
model.save_pretrained(a_)
transformers_config.save_pretrained(a_)
if __name__ == "__main__":
A = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument('''--enable_fusion''', action='''store_true''', help='''Whether to enable fusion or not''')
A = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 52
|
'''simple docstring'''
from typing import Any
import numpy as np
def lowerCamelCase__ ( __lowerCamelCase : np.ndarray ):
'''simple docstring'''
return np.array_equal(__lowerCamelCase , matrix.conjugate().T )
def lowerCamelCase__ ( __lowerCamelCase : np.ndarray , __lowerCamelCase : np.ndarray ):
'''simple docstring'''
_UpperCAmelCase : str =v.conjugate().T
_UpperCAmelCase : Optional[int] =v_star.dot(__lowerCamelCase )
assert isinstance(__lowerCamelCase , np.ndarray )
return (v_star_dot.dot(__lowerCamelCase )) / (v_star.dot(__lowerCamelCase ))
def lowerCamelCase__ ( ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
_UpperCAmelCase : List[str] =np.array([[1], [2], [3]] )
assert is_hermitian(__lowerCamelCase ), f"{a} is not hermitian."
print(rayleigh_quotient(__lowerCamelCase , __lowerCamelCase ) )
_UpperCAmelCase : List[str] =np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(__lowerCamelCase ), f"{a} is not hermitian."
assert rayleigh_quotient(__lowerCamelCase , __lowerCamelCase ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 446
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""}
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :str = """openai-gpt"""
__magic_name__ :str = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __UpperCAmelCase=4_0_4_7_8 , __UpperCAmelCase=5_1_2 , __UpperCAmelCase=7_6_8 , __UpperCAmelCase=1_2 , __UpperCAmelCase=1_2 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1E-5 , __UpperCAmelCase=0.02 , __UpperCAmelCase="cls_index" , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=0.1 , **__UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = vocab_size
lowerCAmelCase__ :str = n_positions
lowerCAmelCase__ :List[Any] = n_embd
lowerCAmelCase__ :Any = n_layer
lowerCAmelCase__ :Dict = n_head
lowerCAmelCase__ :Optional[Any] = afn
lowerCAmelCase__ :Dict = resid_pdrop
lowerCAmelCase__ :Optional[Any] = embd_pdrop
lowerCAmelCase__ :Dict = attn_pdrop
lowerCAmelCase__ :Union[str, Any] = layer_norm_epsilon
lowerCAmelCase__ :Union[str, Any] = initializer_range
lowerCAmelCase__ :str = summary_type
lowerCAmelCase__ :Optional[int] = summary_use_proj
lowerCAmelCase__ :Tuple = summary_activation
lowerCAmelCase__ :Optional[Any] = summary_first_dropout
lowerCAmelCase__ :Optional[Any] = summary_proj_to_labels
super().__init__(**__UpperCAmelCase )
| 701
|
"""simple docstring"""
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class _lowerCAmelCase ( ctypes.Structure ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)]
def __A () ->Dict:
"""simple docstring"""
if os.name == "nt":
lowerCAmelCase__ :Optional[Any] = CursorInfo()
lowerCAmelCase__ :int = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_SCREAMING_SNAKE_CASE , ctypes.byref(_SCREAMING_SNAKE_CASE ) )
lowerCAmelCase__ :Any = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(_SCREAMING_SNAKE_CASE , ctypes.byref(_SCREAMING_SNAKE_CASE ) )
elif os.name == "posix":
sys.stdout.write('\033[?25l' )
sys.stdout.flush()
def __A () ->Any:
"""simple docstring"""
if os.name == "nt":
lowerCAmelCase__ :List[Any] = CursorInfo()
lowerCAmelCase__ :Optional[Any] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_SCREAMING_SNAKE_CASE , ctypes.byref(_SCREAMING_SNAKE_CASE ) )
lowerCAmelCase__ :Dict = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(_SCREAMING_SNAKE_CASE , ctypes.byref(_SCREAMING_SNAKE_CASE ) )
elif os.name == "posix":
sys.stdout.write('\033[?25h' )
sys.stdout.flush()
@contextmanager
def __A () ->Any:
"""simple docstring"""
try:
hide_cursor()
yield
finally:
show_cursor()
| 560
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A_ : List[str] = logging.get_logger(__name__)
A_ : Optional[Any] = {
"microsoft/swin-tiny-patch4-window7-224": (
"https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''swin'''
lowerCamelCase__ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , __SCREAMING_SNAKE_CASE=2_2_4 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=9_6 , __SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , __SCREAMING_SNAKE_CASE=[3, 6, 1_2, 2_4] , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=4.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-5 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = image_size
snake_case__ : Dict = patch_size
snake_case__ : Union[str, Any] = num_channels
snake_case__ : List[str] = embed_dim
snake_case__ : Optional[Any] = depths
snake_case__ : Dict = len(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = num_heads
snake_case__ : str = window_size
snake_case__ : Union[str, Any] = mlp_ratio
snake_case__ : List[str] = qkv_bias
snake_case__ : List[str] = hidden_dropout_prob
snake_case__ : Optional[Any] = attention_probs_dropout_prob
snake_case__ : Tuple = drop_path_rate
snake_case__ : Tuple = hidden_act
snake_case__ : Union[str, Any] = use_absolute_embeddings
snake_case__ : int = layer_norm_eps
snake_case__ : List[str] = initializer_range
snake_case__ : Optional[Any] = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case__ : Optional[Any] = int(embed_dim * 2 ** (len(__SCREAMING_SNAKE_CASE ) - 1) )
snake_case__ : Tuple = ["""stem"""] + [f"stage{idx}" for idx in range(1 , len(__SCREAMING_SNAKE_CASE ) + 1 )]
snake_case__ , snake_case__ : List[Any] = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __UpperCamelCase ( self ):
return 1e-4
| 38
|
'''simple docstring'''
from maths.prime_check import is_prime
def _A ( _lowerCAmelCase ):
"""simple docstring"""
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__lowercase =f"""Input value of [number={number}] must be an integer"""
raise TypeError(_lowerCAmelCase )
if is_prime(_lowerCAmelCase ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 474
| 0
|
'''simple docstring'''
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : str = logging.get_logger(__name__)
UpperCamelCase : List[str] = {
'kakaobrain/align-base': 'https://huggingface.co/kakaobrain/align-base/resolve/main/config.json',
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'align_text_model'
def __init__( self ,_lowerCAmelCase=3_05_22 ,_lowerCAmelCase=7_68 ,_lowerCAmelCase=12 ,_lowerCAmelCase=12 ,_lowerCAmelCase=30_72 ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=5_12 ,_lowerCAmelCase=2 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=1E-12 ,_lowerCAmelCase=0 ,_lowerCAmelCase="absolute" ,_lowerCAmelCase=True ,**_lowerCAmelCase ,):
super().__init__(**_lowerCAmelCase )
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = hidden_act
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = position_embedding_type
lowerCamelCase__ = use_cache
lowerCamelCase__ = pad_token_id
@classmethod
def UpperCamelCase_ ( cls ,_lowerCAmelCase ,**_lowerCAmelCase ):
cls._set_token_in_kwargs(_lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ = cls.get_config_dict(_lowerCAmelCase ,**_lowerCAmelCase )
# get the text config dict if we are loading from AlignConfig
if config_dict.get("""model_type""" ) == "align":
lowerCamelCase__ = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_lowerCAmelCase ,**_lowerCAmelCase )
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'align_vision_model'
def __init__( self ,_lowerCAmelCase = 3 ,_lowerCAmelCase = 6_00 ,_lowerCAmelCase = 2.0 ,_lowerCAmelCase = 3.1 ,_lowerCAmelCase = 8 ,_lowerCAmelCase = [3, 3, 5, 3, 5, 5, 3] ,_lowerCAmelCase = [32, 16, 24, 40, 80, 1_12, 1_92] ,_lowerCAmelCase = [16, 24, 40, 80, 1_12, 1_92, 3_20] ,_lowerCAmelCase = [] ,_lowerCAmelCase = [1, 2, 2, 2, 1, 2, 1] ,_lowerCAmelCase = [1, 2, 2, 3, 3, 4, 1] ,_lowerCAmelCase = [1, 6, 6, 6, 6, 6, 6] ,_lowerCAmelCase = 0.25 ,_lowerCAmelCase = "swish" ,_lowerCAmelCase = 25_60 ,_lowerCAmelCase = "mean" ,_lowerCAmelCase = 0.02 ,_lowerCAmelCase = 0.001 ,_lowerCAmelCase = 0.99 ,_lowerCAmelCase = 0.2 ,**_lowerCAmelCase ,):
super().__init__(**_lowerCAmelCase )
lowerCamelCase__ = num_channels
lowerCamelCase__ = image_size
lowerCamelCase__ = width_coefficient
lowerCamelCase__ = depth_coefficient
lowerCamelCase__ = depth_divisor
lowerCamelCase__ = kernel_sizes
lowerCamelCase__ = in_channels
lowerCamelCase__ = out_channels
lowerCamelCase__ = depthwise_padding
lowerCamelCase__ = strides
lowerCamelCase__ = num_block_repeats
lowerCamelCase__ = expand_ratios
lowerCamelCase__ = squeeze_expansion_ratio
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dim
lowerCamelCase__ = pooling_type
lowerCamelCase__ = initializer_range
lowerCamelCase__ = batch_norm_eps
lowerCamelCase__ = batch_norm_momentum
lowerCamelCase__ = drop_connect_rate
lowerCamelCase__ = sum(_lowerCAmelCase ) * 4
@classmethod
def UpperCamelCase_ ( cls ,_lowerCAmelCase ,**_lowerCAmelCase ):
cls._set_token_in_kwargs(_lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ = cls.get_config_dict(_lowerCAmelCase ,**_lowerCAmelCase )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("""model_type""" ) == "align":
lowerCamelCase__ = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_lowerCAmelCase ,**_lowerCAmelCase )
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'align'
_UpperCamelCase = True
def __init__( self ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=6_40 ,_lowerCAmelCase=1.0 ,_lowerCAmelCase=0.02 ,**_lowerCAmelCase ,):
super().__init__(**_lowerCAmelCase )
if text_config is None:
lowerCamelCase__ = {}
logger.info("""text_config is None. Initializing the AlignTextConfig with default values.""" )
if vision_config is None:
lowerCamelCase__ = {}
logger.info("""vision_config is None. Initializing the AlignVisionConfig with default values.""" )
lowerCamelCase__ = AlignTextConfig(**_lowerCAmelCase )
lowerCamelCase__ = AlignVisionConfig(**_lowerCAmelCase )
lowerCamelCase__ = projection_dim
lowerCamelCase__ = temperature_init_value
lowerCamelCase__ = initializer_range
@classmethod
def UpperCamelCase_ ( cls ,_lowerCAmelCase ,_lowerCAmelCase ,**_lowerCAmelCase ):
return cls(text_config=text_config.to_dict() ,vision_config=vision_config.to_dict() ,**_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = copy.deepcopy(self.__dict__ )
lowerCamelCase__ = self.text_config.to_dict()
lowerCamelCase__ = self.vision_config.to_dict()
lowerCamelCase__ = self.__class__.model_type
return output
| 9
|
'''simple docstring'''
import numpy
# List of input, output pairs
UpperCamelCase : List[Any] = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
UpperCamelCase : Optional[int] = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50))
UpperCamelCase : int = [2, 4, 1, 5]
UpperCamelCase : int = len(train_data)
UpperCamelCase : Dict = 0.009
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : str="train" ):
return calculate_hypothesis_value(__lowerCAmelCase , __lowerCAmelCase ) - output(
__lowerCAmelCase , __lowerCAmelCase )
def A__ ( __lowerCAmelCase : Any ):
lowerCamelCase__ = 0
for i in range(len(__lowerCAmelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def A__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def A__ ( __lowerCAmelCase : int , __lowerCAmelCase : Dict ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def A__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any]=m ):
lowerCamelCase__ = 0
for i in range(__lowerCAmelCase ):
if index == -1:
summation_value += _error(__lowerCAmelCase )
else:
summation_value += _error(__lowerCAmelCase ) * train_data[i][0][index]
return summation_value
def A__ ( __lowerCAmelCase : List[Any] ):
lowerCamelCase__ = summation_of_cost_derivative(__lowerCAmelCase , __lowerCAmelCase ) / m
return cost_derivative_value
def A__ ( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
lowerCamelCase__ = 0.00_0002
lowerCamelCase__ = 0
lowerCamelCase__ = 0
while True:
j += 1
lowerCamelCase__ = [0, 0, 0, 0]
for i in range(0 , len(__lowerCAmelCase ) ):
lowerCamelCase__ = get_cost_derivative(i - 1 )
lowerCamelCase__ = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
__lowerCAmelCase , __lowerCAmelCase , atol=__lowerCAmelCase , rtol=__lowerCAmelCase , ):
break
lowerCamelCase__ = temp_parameter_vector
print(("""Number of iterations:""", j) )
def A__ ( ):
for i in range(len(__lowerCAmelCase ) ):
print(("""Actual output value:""", output(__lowerCAmelCase , """test""" )) )
print(("""Hypothesis output:""", calculate_hypothesis_value(__lowerCAmelCase , """test""" )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 9
| 1
|
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCamelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCamelCase_ : Dict = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
UpperCamelCase_ : Any = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
UpperCamelCase_ : Tuple = {
"""facebook/blenderbot_small-90M""": 512,
}
class __lowercase ( __snake_case ):
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = BlenderbotSmallTokenizer
def __init__(self : Optional[Any] , snake_case : Tuple=None , snake_case : Optional[Any]=None , snake_case : Union[str, Any]="<|endoftext|>" , snake_case : Any="<|endoftext|>" , snake_case : Dict="<|endoftext|>" , snake_case : Any=False , snake_case : Optional[int]=True , **snake_case : int , ) -> Tuple:
super().__init__(
ByteLevelBPETokenizer(
vocab=snake_case , merges=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case , ) , bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , **snake_case , )
_lowercase : Tuple = add_prefix_space
def _a(self : str , snake_case : Any , snake_case : str=None ) -> Dict:
_lowercase : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _a(self : Union[str, Any] , snake_case : List[int] , snake_case : Optional[List[int]] = None ) -> List[int]:
_lowercase : Optional[int] = [self.sep_token_id]
_lowercase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 461
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ : Optional[int] = logging.get_logger(__name__)
class __lowercase ( __snake_case ):
_A = "timm_backbone"
def __init__(self : Any , snake_case : List[Any]=None , snake_case : int=3 , snake_case : Dict=True , snake_case : Union[str, Any]=True , snake_case : Any=None , **snake_case : int , ) -> Optional[int]:
super().__init__(**snake_case )
_lowercase : Dict = backbone
_lowercase : Optional[Any] = num_channels
_lowercase : Union[str, Any] = features_only
_lowercase : Tuple = use_pretrained_backbone
_lowercase : List[str] = True
_lowercase : Tuple = out_indices if out_indices is not None else (-1,)
| 461
| 1
|
"""simple docstring"""
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
__a : Any = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='AutoTokenizer'
_SCREAMING_SNAKE_CASE =['tokenizer']
_SCREAMING_SNAKE_CASE ={
'semantic_prompt': 1,
'coarse_prompt': 2,
'fine_prompt': 2,
}
def __init__( self: Dict , __A: str , __A: List[Any]=None ):
'''simple docstring'''
super().__init__(__A )
a__ = speaker_embeddings
@classmethod
def lowercase ( cls: Optional[int] , __A: Optional[Any] , __A: Optional[Any]="speaker_embeddings_path.json" , **__A: Union[str, Any] ):
'''simple docstring'''
if speaker_embeddings_dict_path is not None:
a__ = get_file_from_repo(
__A , __A , subfolder=kwargs.pop('''subfolder''' , __A ) , cache_dir=kwargs.pop('''cache_dir''' , __A ) , force_download=kwargs.pop('''force_download''' , __A ) , proxies=kwargs.pop('''proxies''' , __A ) , resume_download=kwargs.pop('''resume_download''' , __A ) , local_files_only=kwargs.pop('''local_files_only''' , __A ) , use_auth_token=kwargs.pop('''use_auth_token''' , __A ) , revision=kwargs.pop('''revision''' , __A ) , )
if speaker_embeddings_path is None:
logger.warning(
F'`{os.path.join(__A , __A )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.' )
a__ = None
else:
with open(__A ) as speaker_embeddings_json:
a__ = json.load(__A )
else:
a__ = None
a__ = AutoTokenizer.from_pretrained(__A , **__A )
return cls(tokenizer=__A , speaker_embeddings=__A )
def lowercase ( self: Union[str, Any] , __A: Optional[int] , __A: List[Any]="speaker_embeddings_path.json" , __A: List[Any]="speaker_embeddings" , __A: bool = False , **__A: Union[str, Any] , ):
'''simple docstring'''
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(__A , __A , '''v2''' ) , exist_ok=__A )
a__ = {}
a__ = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
a__ = self._load_voice_preset(__A )
a__ = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['''repo_or_path'''] , __A , F'{prompt_key}_{key}' ) , voice_preset[key] , allow_pickle=__A , )
a__ = os.path.join(__A , F'{prompt_key}_{key}.npy' )
a__ = tmp_dict
with open(os.path.join(__A , __A ) , '''w''' ) as fp:
json.dump(__A , __A )
super().save_pretrained(__A , __A , **__A )
def lowercase ( self: str , __A: str = None , **__A: Any ):
'''simple docstring'''
a__ = self.speaker_embeddings[voice_preset]
a__ = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].' )
a__ = get_file_from_repo(
self.speaker_embeddings.get('''repo_or_path''' , '''/''' ) , voice_preset_paths[key] , subfolder=kwargs.pop('''subfolder''' , __A ) , cache_dir=kwargs.pop('''cache_dir''' , __A ) , force_download=kwargs.pop('''force_download''' , __A ) , proxies=kwargs.pop('''proxies''' , __A ) , resume_download=kwargs.pop('''resume_download''' , __A ) , local_files_only=kwargs.pop('''local_files_only''' , __A ) , use_auth_token=kwargs.pop('''use_auth_token''' , __A ) , revision=kwargs.pop('''revision''' , __A ) , )
if path is None:
raise ValueError(
F'`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.' )
a__ = np.load(__A )
return voice_preset_dict
def lowercase ( self: Optional[int] , __A: Optional[dict] = None ):
'''simple docstring'''
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'Voice preset unrecognized, missing {key} as a key.' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
def __call__( self: Optional[int] , __A: Union[str, Any]=None , __A: str=None , __A: List[Any]="pt" , __A: Any=256 , __A: Tuple=False , __A: Tuple=True , __A: Any=False , **__A: Dict , ):
'''simple docstring'''
if voice_preset is not None and not isinstance(__A , __A ):
if (
isinstance(__A , __A )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
a__ = self._load_voice_preset(__A )
else:
if isinstance(__A , __A ) and not voice_preset.endswith('''.npz''' ):
a__ = voice_preset + '''.npz'''
a__ = np.load(__A )
if voice_preset is not None:
self._validate_voice_preset_dict(__A , **__A )
a__ = BatchFeature(data=__A , tensor_type=__A )
a__ = self.tokenizer(
__A , return_tensors=__A , padding='''max_length''' , max_length=__A , return_attention_mask=__A , return_token_type_ids=__A , add_special_tokens=__A , **__A , )
if voice_preset is not None:
a__ = voice_preset
return encoded_text
| 200
|
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self: List[str] , __A: str , __A: Any=2 , __A: str=3 , __A: Tuple=4 , __A: Dict=2 , __A: List[Any]=7 , __A: Any=True , __A: Any=True , __A: List[str]=True , __A: Optional[int]=True , __A: Optional[int]=99 , __A: Tuple=36 , __A: List[str]=2 , __A: Dict=4 , __A: List[str]=37 , __A: Optional[int]="gelu" , __A: Optional[int]=0.1 , __A: Tuple=0.1 , __A: List[Any]=512 , __A: List[str]=16 , __A: Any=2 , __A: Union[str, Any]=0.0_2 , __A: Optional[int]=6 , __A: Union[str, Any]=6 , __A: Union[str, Any]=3 , __A: Tuple=4 , __A: Optional[int]=None , __A: Optional[Any]=1000 , ):
'''simple docstring'''
a__ = parent
a__ = batch_size
a__ = num_channels
a__ = image_size
a__ = patch_size
a__ = is_training
a__ = use_input_mask
a__ = use_token_type_ids
a__ = use_labels
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = type_sequence_label_size
a__ = initializer_range
a__ = coordinate_size
a__ = shape_size
a__ = num_labels
a__ = num_choices
a__ = scope
a__ = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
a__ = text_seq_length
a__ = (image_size // patch_size) ** 2 + 1
a__ = self.text_seq_length + self.image_seq_length
def lowercase ( self: Optional[int] ):
'''simple docstring'''
a__ = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
a__ = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
a__ = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
a__ = bbox[i, j, 3]
a__ = bbox[i, j, 1]
a__ = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
a__ = bbox[i, j, 2]
a__ = bbox[i, j, 0]
a__ = tmp_coordinate
a__ = tf.constant(__A )
a__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ = None
if self.use_input_mask:
a__ = random_attention_mask([self.batch_size, self.text_seq_length] )
a__ = None
if self.use_token_type_ids:
a__ = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
a__ = None
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
a__ = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowercase ( self: List[str] , __A: List[str] , __A: List[str] , __A: List[str] , __A: int , __A: Any , __A: Any ):
'''simple docstring'''
a__ = TFLayoutLMvaModel(config=__A )
# text + image
a__ = model(__A , pixel_values=__A , training=__A )
a__ = model(
__A , bbox=__A , pixel_values=__A , attention_mask=__A , token_type_ids=__A , training=__A , )
a__ = model(__A , bbox=__A , pixel_values=__A , training=__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
a__ = model(__A , training=__A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
a__ = model({'''pixel_values''': pixel_values} , training=__A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def lowercase ( self: Optional[int] , __A: Any , __A: str , __A: List[str] , __A: List[str] , __A: List[str] , __A: Optional[Any] , __A: Any ):
'''simple docstring'''
a__ = self.num_labels
a__ = TFLayoutLMvaForSequenceClassification(config=__A )
a__ = model(
__A , bbox=__A , pixel_values=__A , attention_mask=__A , token_type_ids=__A , labels=__A , training=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self: str , __A: List[str] , __A: int , __A: str , __A: Any , __A: str , __A: str , __A: str ):
'''simple docstring'''
a__ = self.num_labels
a__ = TFLayoutLMvaForTokenClassification(config=__A )
a__ = model(
__A , bbox=__A , pixel_values=__A , attention_mask=__A , token_type_ids=__A , labels=__A , training=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def lowercase ( self: Union[str, Any] , __A: Any , __A: List[Any] , __A: Any , __A: List[str] , __A: Any , __A: Optional[int] , __A: Tuple ):
'''simple docstring'''
a__ = 2
a__ = TFLayoutLMvaForQuestionAnswering(config=__A )
a__ = model(
__A , bbox=__A , pixel_values=__A , attention_mask=__A , token_type_ids=__A , start_positions=__A , end_positions=__A , training=__A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase ( self: int ):
'''simple docstring'''
a__ = self.prepare_config_and_inputs()
((a__) ,(a__) ,(a__) ,(a__) ,(a__) ,(a__) ,(a__) ,(a__)) = config_and_inputs
a__ = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE =(
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
_SCREAMING_SNAKE_CASE =(
{'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =False
def lowercase ( self: List[str] , __A: Dict , __A: Optional[Any] , __A: str , __A: Union[str, Any] , __A: Optional[Any] ):
'''simple docstring'''
return True
def lowercase ( self: Dict , __A: Optional[Any] , __A: Any , __A: List[Any]=False ):
'''simple docstring'''
a__ = copy.deepcopy(__A )
if model_class in get_values(__A ):
a__ = {
k: tf.tile(tf.expand_dims(__A , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(__A , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__A ):
a__ = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__A ):
a__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
a__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__A ):
a__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__A ):
a__ = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def lowercase ( self: List[str] ):
'''simple docstring'''
a__ = TFLayoutLMvaModelTester(self )
a__ = ConfigTester(self , config_class=__A , hidden_size=37 )
def lowercase ( self: Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase ( self: Any ):
'''simple docstring'''
a__ ,a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = model_class(__A )
if getattr(__A , '''hf_compute_loss''' , __A ):
# The number of elements in the loss should be the same as the number of elements in the label
a__ = self._prepare_for_class(inputs_dict.copy() , __A , return_labels=__A )
a__ = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=__A )[0]
]
a__ = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
a__ = self._prepare_for_class(inputs_dict.copy() , __A , return_labels=__A )
a__ = prepared_for_class.pop('''input_ids''' )
a__ = model(__A , **__A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
a__ = self._prepare_for_class(inputs_dict.copy() , __A , return_labels=__A )
a__ = prepared_for_class.pop('''input_ids''' )
if "labels" in prepared_for_class:
a__ = prepared_for_class['''labels'''].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
a__ = -100
a__ = tf.convert_to_tensor(__A )
a__ = model(__A , **__A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
a__ = self._prepare_for_class(inputs_dict.copy() , __A , return_labels=__A )
a__ = model(__A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
a__ = self._prepare_for_class(inputs_dict.copy() , __A , return_labels=__A )
# Get keys that were added with the _prepare_for_class function
a__ = prepared_for_class.keys() - inputs_dict.keys()
a__ = inspect.signature(model.call ).parameters
a__ = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
a__ = {0: '''input_ids'''}
for label_key in label_keys:
a__ = signature_names.index(__A )
a__ = label_key
a__ = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
a__ = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
a__ = prepared_for_class[value]
a__ = tuple(__A )
# Send to model
a__ = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def lowercase ( self: Optional[int] ):
'''simple docstring'''
(
(
a__
) ,(
a__
) ,(
a__
) ,(
a__
) ,(
a__
) ,(
a__
) ,(
a__
) ,(
a__
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__A , __A , __A , __A , __A , __A )
def lowercase ( self: Dict ):
'''simple docstring'''
(
(
a__
) ,(
a__
) ,(
a__
) ,(
a__
) ,(
a__
) ,(
a__
) ,(
a__
) ,(
a__
) ,
) = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a__ = type
self.model_tester.create_and_check_model(__A , __A , __A , __A , __A , __A )
def lowercase ( self: int ):
'''simple docstring'''
(
(
a__
) ,(
a__
) ,(
a__
) ,(
a__
) ,(
a__
) ,(
a__
) ,(
a__
) ,(
a__
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
__A , __A , __A , __A , __A , __A , __A )
def lowercase ( self: List[str] ):
'''simple docstring'''
(
(
a__
) ,(
a__
) ,(
a__
) ,(
a__
) ,(
a__
) ,(
a__
) ,(
a__
) ,(
a__
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
__A , __A , __A , __A , __A , __A , __A )
def lowercase ( self: Tuple ):
'''simple docstring'''
(
(
a__
) ,(
a__
) ,(
a__
) ,(
a__
) ,(
a__
) ,(
a__
) ,(
a__
) ,(
a__
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
__A , __A , __A , __A , __A , __A , __A )
@slow
def lowercase ( self: Union[str, Any] ):
'''simple docstring'''
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = TFLayoutLMvaModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def SCREAMING_SNAKE_CASE ( ):
a__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase ( self: Any ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=__A ) if is_vision_available() else None
@slow
def lowercase ( self: Union[str, Any] ):
'''simple docstring'''
a__ = TFLayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' )
a__ = self.default_image_processor
a__ = prepare_img()
a__ = image_processor(images=__A , return_tensors='''tf''' ).pixel_values
a__ = tf.constant([[1, 2]] )
a__ = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
a__ = model(input_ids=__A , bbox=__A , pixel_values=__A , training=__A )
# verify the logits
a__ = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , __A )
a__ = tf.constant(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __A , atol=1e-4 ) )
| 200
| 1
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = ["""image_processor""", """tokenizer"""]
_lowerCamelCase = """AutoImageProcessor"""
_lowerCamelCase = """AutoTokenizer"""
def __init__( self , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
super().__init__(__lowerCamelCase , __lowerCamelCase )
__A : Optional[int] = self.image_processor
def __call__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__A : List[str] = self.tokenizer(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase )
if images is not None:
__A : Union[str, Any] = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase )
if text is not None and images is not None:
__A : Optional[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCamelCase ) , tensor_type=__lowerCamelCase )
def UpperCamelCase__( self , *__lowerCamelCase , **__lowerCamelCase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def UpperCamelCase__( self , *__lowerCamelCase , **__lowerCamelCase ):
'''simple docstring'''
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
def UpperCamelCase__( self ):
'''simple docstring'''
return ["input_ids", "attention_mask", "pixel_values"]
| 177
|
"""simple docstring"""
import requests
a_ = """https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="""
def __lowercase ( snake_case_ : str ) ->None:
'''simple docstring'''
__A : str = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['''articles'''] ,1 ):
print(F"""{i}.) {article['title']}""" )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="""<Your BBC News API key goes here>""")
| 177
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class SCREAMING_SNAKE_CASE__ :
_lowerCAmelCase = 4_2
_lowerCAmelCase = 4_2
class SCREAMING_SNAKE_CASE__ :
def __init__(self , _lowercase ):
'''simple docstring'''
__a : list[list[Edge]] = [[] for _ in range(_lowercase )]
__a : Dict = size
def __getitem__(self , _lowercase ):
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self._size
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if weight not in (0, 1):
raise ValueError("""Edge weight must be either 0 or 1.""" )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("""Vertex indexes must be in [0; size).""" )
self._graph[from_vertex].append(Edge(_lowercase , _lowercase ) )
def lowerCAmelCase__(self , _lowercase , _lowercase ):
'''simple docstring'''
__a : Optional[int] = deque([start_vertex] )
__a : list[int | None] = [None] * self.size
__a : Tuple = 0
while queue:
__a : Union[str, Any] = queue.popleft()
__a : Any = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
__a : Union[str, Any] = current_distance + edge.weight
__a : Dict = distances[edge.destination_vertex]
if (
isinstance(_lowercase , _lowercase )
and new_distance >= dest_vertex_distance
):
continue
__a : Optional[Any] = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("""No path from start_vertex to finish_vertex.""" )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710
|
"""simple docstring"""
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__(self , _lowercase , _lowercase = 13 , _lowercase = 64 , _lowercase = 2 , _lowercase = 3 , _lowercase = 3 , _lowercase = True , _lowercase = True , _lowercase = 128 , _lowercase=[16, 32, 64, 128] , _lowercase = 7 , _lowercase = 4 , _lowercase = 37 , _lowercase = "gelu" , _lowercase = 0.1 , _lowercase = 0.1 , _lowercase = 10 , _lowercase = 0.02 , _lowercase = 2 , _lowercase = 1 , _lowercase = 128 , _lowercase = [2, 2, 2, 2] , _lowercase = 2 , _lowercase = 2 , ):
'''simple docstring'''
__a : str = parent
__a : List[Any] = batch_size
__a : int = image_size
__a : Tuple = patch_size
__a : str = num_channels
__a : Union[str, Any] = is_training
__a : List[Any] = use_labels
__a : int = hidden_size
__a : Optional[Any] = num_hidden_layers
__a : List[Any] = num_attention_heads
__a : Dict = intermediate_size
__a : str = hidden_act
__a : Dict = hidden_dropout_prob
__a : str = attention_probs_dropout_prob
__a : Optional[int] = type_sequence_label_size
__a : Dict = initializer_range
__a : Dict = encoder_stride
__a : int = num_attention_outputs
__a : List[Any] = embed_dim
__a : Optional[Any] = embed_dim + 1
__a : Optional[Any] = resolution
__a : Optional[Any] = depths
__a : Union[str, Any] = hidden_sizes
__a : List[str] = dim
__a : Any = mlp_expansion_ratio
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : str = None
if self.use_labels:
__a : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : List[str] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__(self ):
'''simple docstring'''
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowercase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Optional[Any] = TFEfficientFormerModel(config=_lowercase )
__a : List[Any] = model(_lowercase , training=_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Optional[Any] = self.type_sequence_label_size
__a : Any = TFEfficientFormerForImageClassification(_lowercase )
__a : Union[str, Any] = model(_lowercase , labels=_lowercase , training=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__a : Optional[Any] = 1
__a : int = TFEfficientFormerForImageClassification(_lowercase )
__a : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a : str = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = self.prepare_config_and_inputs()
__a , __a , __a : Tuple = config_and_inputs
__a : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( __snake_case , __snake_case , unittest.TestCase ):
_lowerCAmelCase = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
_lowerCAmelCase = (
{
"feature-extraction": TFEfficientFormerModel,
"image-classification": (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = TFEfficientFormerModelTester(self )
__a : Any = ConfigTester(
self , config_class=_lowercase , has_text_modality=_lowercase , hidden_size=37 )
def lowerCAmelCase__(self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""EfficientFormer does not use inputs_embeds""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
pass
@unittest.skip(reason="""EfficientFormer does not support input and output embeddings""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
pass
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Dict = model_class(_lowercase )
__a : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : Optional[Any] = [*signature.parameters.keys()]
__a : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
def check_hidden_states_output(_lowercase , _lowercase , _lowercase ):
__a : Tuple = model_class(_lowercase )
__a : int = model(**self._prepare_for_class(_lowercase , _lowercase ) , training=_lowercase )
__a : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__a : str = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_lowercase ) , _lowercase )
if hasattr(self.model_tester , """encoder_seq_length""" ):
__a : Any = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , """chunk_length""" ) and self.model_tester.chunk_length > 1:
__a : int = seq_length * self.model_tester.chunk_length
else:
__a : Any = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
__a : Optional[int] = outputs.decoder_hidden_states
self.asseretIsInstance(_lowercase , (list, tuple) )
self.assertEqual(len(_lowercase ) , _lowercase )
__a : Any = getattr(self.model_tester , """seq_length""" , _lowercase )
__a : List[Any] = getattr(self.model_tester , """decoder_seq_length""" , _lowercase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
__a , __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Dict = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a : int = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase=False ):
'''simple docstring'''
__a : Any = super()._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
@unittest.skip(reason="""EfficientFormer does not implement masked image modeling yet""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase )
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Union[str, Any] = TFEfficientFormerModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__a : int = True
__a : Optional[int] = getattr(self.model_tester , """seq_length""" , _lowercase )
__a : Dict = getattr(self.model_tester , """encoder_seq_length""" , _lowercase )
__a : Dict = getattr(self.model_tester , """key_length""" , _lowercase )
__a : int = getattr(self.model_tester , """chunk_length""" , _lowercase )
if chunk_length is not None and hasattr(self.model_tester , """num_hashes""" ):
__a : List[str] = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
__a : List[Any] = True
__a : Tuple = False
__a : List[Any] = True
__a : int = model_class(_lowercase )
__a : List[Any] = model(**self._prepare_for_class(_lowercase , _lowercase ) , training=_lowercase )
__a : Dict = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__a : Optional[Any] = True
__a : List[str] = model_class(_lowercase )
__a : Dict = model(**self._prepare_for_class(_lowercase , _lowercase ) , training=_lowercase )
__a : int = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
__a : Dict = model_class(_lowercase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
__a : Optional[Any] = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=_lowercase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
__a : Optional[Any] = model(_lowercase )
self.assertTrue(outputs_dict is not None )
def __magic_name__ ( ):
__a : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__(self ):
'''simple docstring'''
return (
EfficientFormerImageProcessor.from_pretrained("""snap-research/efficientformer-l1-300""" )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = TFEfficientFormerForImageClassification.from_pretrained("""snap-research/efficientformer-l1-300""" )
__a : Optional[Any] = self.default_image_processor
__a : List[str] = prepare_img()
__a : int = image_processor(images=_lowercase , return_tensors="""tf""" )
# forward pass
__a : Optional[Any] = model(**_lowercase , training=_lowercase )
# verify the logits
__a : str = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowercase )
__a : Dict = tf.constant([-0.0555, 0.4825, -0.0852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 ) )
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"""snap-research/efficientformer-l1-300""" )
__a : Any = self.default_image_processor
__a : str = prepare_img()
__a : str = image_processor(images=_lowercase , return_tensors="""tf""" )
# forward pass
__a : List[Any] = model(**_lowercase , training=_lowercase )
# verify the logits
__a : int = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowercase )
__a : List[str] = tf.constant([-0.1312, 0.4353, -1.0499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 ) )
| 63
| 0
|
'''simple docstring'''
import operator as op
lowerCAmelCase = """scaler.pt"""
lowerCAmelCase = """pytorch_model"""
lowerCAmelCase = """random_states"""
lowerCAmelCase = """optimizer"""
lowerCAmelCase = """scheduler"""
lowerCAmelCase = """pytorch_model.bin"""
lowerCAmelCase = """pytorch_model.bin.index.json"""
lowerCAmelCase = """model.safetensors"""
lowerCAmelCase = """model.safetensors.index.json"""
lowerCAmelCase = """1.10.2"""
lowerCAmelCase = """py38"""
lowerCAmelCase = """4.17.0"""
lowerCAmelCase = ["""ml.p3.16xlarge""", """ml.p3dn.24xlarge""", """ml.p4dn.24xlarge"""]
lowerCAmelCase = ["""FULL_SHARD""", """SHARD_GRAD_OP""", """NO_SHARD""", """HYBRID_SHARD""", """HYBRID_SHARD_ZERO2"""]
lowerCAmelCase = ["""TRANSFORMER_BASED_WRAP""", """SIZE_BASED_WRAP""", """NO_WRAP"""]
lowerCAmelCase = ["""BACKWARD_PRE""", """BACKWARD_POST""", """NO_PREFETCH"""]
lowerCAmelCase = ["""FULL_STATE_DICT""", """LOCAL_STATE_DICT""", """SHARDED_STATE_DICT"""]
lowerCAmelCase = """2.0.1"""
lowerCAmelCase = ["""pdsh""", """standard""", """openmpi""", """mvapich"""]
lowerCAmelCase = ["""default""", """reduce-overhead""", """max-autotune"""]
lowerCAmelCase = {""">""": op.gt, """>=""": op.ge, """==""": op.eq, """!=""": op.ne, """<=""": op.le, """<""": op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
lowerCAmelCase = [
"""nnodes""",
"""nproc_per_node""",
"""rdzv_backend""",
"""rdzv_endpoint""",
"""rdzv_id""",
"""rdzv_conf""",
"""standalone""",
"""max_restarts""",
"""monitor_interval""",
"""start_method""",
"""role""",
"""module""",
"""m""",
"""no_python""",
"""run_path""",
"""log_dir""",
"""r""",
"""redirects""",
"""t""",
"""tee""",
"""node_rank""",
"""master_addr""",
"""master_port""",
]
lowerCAmelCase = ["""DEEPSPEED""", """MULTI_GPU""", """FSDP""", """MEGATRON_LM"""]
lowerCAmelCase = ["""DEEPSPEED""", """MULTI_XPU""", """FSDP"""]
| 292
|
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase_ ( __UpperCamelCase : dict , __UpperCamelCase : str ) -> set[str]:
"""simple docstring"""
_A , _A = set(__UpperCamelCase ), [start]
while stack:
_A = stack.pop()
explored.add(__UpperCamelCase )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(__UpperCamelCase )
return explored
lowerCAmelCase = {
"""A""": ["""B""", """C""", """D"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F"""],
"""D""": ["""B""", """D"""],
"""E""": ["""B""", """F"""],
"""F""": ["""C""", """E""", """G"""],
"""G""": ["""F"""],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, """A"""))
| 292
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {
"configuration_mobilevit": ["MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileViTConfig", "MobileViTOnnxConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["MobileViTFeatureExtractor"]
lowercase_ = ["MobileViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileViTForImageClassification",
"MobileViTForSemanticSegmentation",
"MobileViTModel",
"MobileViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileViTForImageClassification",
"TFMobileViTForSemanticSegmentation",
"TFMobileViTModel",
"TFMobileViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 65
|
"""simple docstring"""
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def lowercase ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str] ) -> Any:
if isinstance(lowerCAmelCase__ , torch.Tensor ):
return image
elif isinstance(lowerCAmelCase__ , PIL.Image.Image ):
__a = [image]
if isinstance(image[0] , PIL.Image.Image ):
__a = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
__a = np.concatenate(lowerCAmelCase__ , axis=0 )
__a = np.array(lowerCAmelCase__ ).astype(np.floataa ) / 2_55.0
__a = image.transpose(0 , 3 , 1 , 2 )
__a = 2.0 * image - 1.0
__a = torch.from_numpy(lowerCAmelCase__ )
elif isinstance(image[0] , torch.Tensor ):
__a = torch.cat(lowerCAmelCase__ , dim=0 )
return image
def lowercase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int]=0.99_95 ) -> int:
if not isinstance(lowerCAmelCase__ , np.ndarray ):
__a = True
__a = va.device
__a = va.cpu().numpy()
__a = va.cpu().numpy()
__a = np.sum(va * va / (np.linalg.norm(lowerCAmelCase__ ) * np.linalg.norm(lowerCAmelCase__ )) )
if np.abs(lowerCAmelCase__ ) > DOT_THRESHOLD:
__a = (1 - t) * va + t * va
else:
__a = np.arccos(lowerCAmelCase__ )
__a = np.sin(lowerCAmelCase__ )
__a = theta_a * t
__a = np.sin(lowerCAmelCase__ )
__a = np.sin(theta_a - theta_t ) / sin_theta_a
__a = sin_theta_t / sin_theta_a
__a = sa * va + sa * va
if inputs_are_torch:
__a = torch.from_numpy(lowerCAmelCase__ ).to(lowerCAmelCase__ )
return va
def lowercase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[Any] ) -> int:
__a = F.normalize(lowerCAmelCase__ , dim=-1 )
__a = F.normalize(lowerCAmelCase__ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def lowercase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any ) -> List[str]:
for param in model.parameters():
__a = value
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a , _a , _a , _a , _a=None , _a=None , _a=None , ):
super().__init__()
self.register_modules(
vae=_a , text_encoder=_a , clip_model=_a , tokenizer=_a , unet=_a , scheduler=_a , feature_extractor=_a , coca_model=_a , coca_tokenizer=_a , coca_transform=_a , )
__a = (
feature_extractor.size
if isinstance(feature_extractor.size , _a )
else feature_extractor.size['''shortest_edge''']
)
__a = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , _a )
set_requires_grad(self.clip_model , _a )
def __UpperCAmelCase ( self , _a = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__a = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_a )
def __UpperCAmelCase ( self ):
self.enable_attention_slicing(_a )
def __UpperCAmelCase ( self ):
set_requires_grad(self.vae , _a )
def __UpperCAmelCase ( self ):
set_requires_grad(self.vae , _a )
def __UpperCAmelCase ( self ):
set_requires_grad(self.unet , _a )
def __UpperCAmelCase ( self ):
set_requires_grad(self.unet , _a )
def __UpperCAmelCase ( self , _a , _a , _a ):
# get the original timestep using init_timestep
__a = min(int(num_inference_steps * strength ) , _a )
__a = max(num_inference_steps - init_timestep , 0 )
__a = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a=None ):
if not isinstance(_a , torch.Tensor ):
raise ValueError(f'''`image` has to be of type `torch.Tensor` but is {type(_a )}''' )
__a = image.to(device=_a , dtype=_a )
if isinstance(_a , _a ):
__a = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_a )
]
__a = torch.cat(_a , dim=0 )
else:
__a = self.vae.encode(_a ).latent_dist.sample(_a )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__a = 0.1_8215 * init_latents
__a = init_latents.repeat_interleave(_a , dim=0 )
__a = randn_tensor(init_latents.shape , generator=_a , device=_a , dtype=_a )
# get latents
__a = self.scheduler.add_noise(_a , _a , _a )
__a = init_latents
return latents
def __UpperCAmelCase ( self , _a ):
__a = self.coca_transform(_a ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
__a = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
__a = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('''<end_of_text>''' )[0].replace('''<start_of_text>''' , '''''' ).rstrip(''' .,''' )
def __UpperCAmelCase ( self , _a , _a ):
__a = self.feature_extractor.preprocess(_a )
__a = torch.from_numpy(clip_image_input['''pixel_values'''][0] ).unsqueeze(0 ).to(self.device ).half()
__a = self.clip_model.get_image_features(_a )
__a = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_a )
__a = image_embeddings_clip.repeat_interleave(_a , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a , ):
__a = latents.detach().requires_grad_()
__a = self.scheduler.scale_model_input(_a , _a )
# predict the noise residual
__a = self.unet(_a , _a , encoder_hidden_states=_a ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
__a = self.scheduler.alphas_cumprod[timestep]
__a = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__a = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
__a = torch.sqrt(_a )
__a = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , _a ):
__a = self.scheduler.sigmas[index]
__a = latents - sigma * noise_pred
else:
raise ValueError(f'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__a = 1 / 0.1_8215 * sample
__a = self.vae.decode(_a ).sample
__a = (image / 2 + 0.5).clamp(0 , 1 )
__a = transforms.Resize(self.feature_extractor_size )(_a )
__a = self.normalize(_a ).to(latents.dtype )
__a = self.clip_model.get_image_features(_a )
__a = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_a )
__a = spherical_dist_loss(_a , _a ).mean() * clip_guidance_scale
__a = -torch.autograd.grad(_a , _a )[0]
if isinstance(self.scheduler , _a ):
__a = latents.detach() + grads * (sigma**2)
__a = noise_pred_original
else:
__a = noise_pred_original - torch.sqrt(_a ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self , _a , _a , _a = None , _a = None , _a = 512 , _a = 512 , _a = 0.6 , _a = 50 , _a = 7.5 , _a = 1 , _a = 0.0 , _a = 100 , _a = None , _a = "pil" , _a = True , _a = 0.8 , _a = 0.1 , _a = 0.1 , ):
if isinstance(_a , _a ) and len(_a ) != batch_size:
raise ValueError(f'''You have passed {batch_size} batch_size, but only {len(_a )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(_a , torch.Generator ) and batch_size > 1:
__a = [generator] + [None] * (batch_size - 1)
__a = [
('''model''', self.coca_model is None),
('''tokenizer''', self.coca_tokenizer is None),
('''transform''', self.coca_transform is None),
]
__a = [x[0] for x in coca_is_none if x[1]]
__a = ''', '''.join(_a )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(_a ):
raise ValueError(
f'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
f'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
__a = self.get_image_description(_a )
if style_prompt is None:
if len(_a ):
raise ValueError(
f'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
f''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
__a = self.get_image_description(_a )
# get prompt text embeddings for content and style
__a = self.tokenizer(
_a , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=_a , return_tensors='''pt''' , )
__a = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
__a = self.tokenizer(
_a , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=_a , return_tensors='''pt''' , )
__a = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
__a = slerp(_a , _a , _a )
# duplicate text embeddings for each generation per prompt
__a = text_embeddings.repeat_interleave(_a , dim=0 )
# set timesteps
__a = '''offset''' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
__a = {}
if accepts_offset:
__a = 1
self.scheduler.set_timesteps(_a , **_a )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
__a , __a = self.get_timesteps(_a , _a , self.device )
__a = timesteps[:1].repeat(_a )
# Preprocess image
__a = preprocess(_a , _a , _a )
__a = self.prepare_latents(
_a , _a , _a , text_embeddings.dtype , self.device , _a )
__a = preprocess(_a , _a , _a )
__a = self.prepare_latents(
_a , _a , _a , text_embeddings.dtype , self.device , _a )
__a = slerp(_a , _a , _a )
if clip_guidance_scale > 0:
__a = self.get_clip_image_embeddings(_a , _a )
__a = self.get_clip_image_embeddings(_a , _a )
__a = slerp(
_a , _a , _a )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__a = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__a = content_text_input.input_ids.shape[-1]
__a = self.tokenizer([''''''] , padding='''max_length''' , max_length=_a , return_tensors='''pt''' )
__a = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
__a = uncond_embeddings.repeat_interleave(_a , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__a = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__a = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
__a = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
__a = torch.randn(_a , generator=_a , device='''cpu''' , dtype=_a ).to(
self.device )
else:
__a = torch.randn(_a , generator=_a , device=self.device , dtype=_a )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
__a = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__a = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__a = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__a = {}
if accepts_eta:
__a = eta
# check if the scheduler accepts generator
__a = '''generator''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
__a = generator
with self.progress_bar(total=_a ):
for i, t in enumerate(_a ):
# expand the latents if we are doing classifier free guidance
__a = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__a = self.scheduler.scale_model_input(_a , _a )
# predict the noise residual
__a = self.unet(_a , _a , encoder_hidden_states=_a ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
__a , __a = noise_pred.chunk(2 )
__a = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
__a = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
__a , __a = self.cond_fn(
_a , _a , _a , _a , _a , _a , _a , )
# compute the previous noisy sample x_t -> x_t-1
__a = self.scheduler.step(_a , _a , _a , **_a ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__a = 1 / 0.1_8215 * latents
__a = self.vae.decode(_a ).sample
__a = (image / 2 + 0.5).clamp(0 , 1 )
__a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a = self.numpy_to_pil(_a )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=_a , nsfw_content_detected=_a )
| 65
| 1
|
"""simple docstring"""
# Algorithm for the pigeonhole sorting
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ):
lowerCAmelCase = min(_UpperCAmelCase ) # min() finds the minimum value
lowerCAmelCase = max(_UpperCAmelCase ) # max() finds the maximum value
lowerCAmelCase = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
lowerCAmelCase = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
lowerCAmelCase = 0
for count in range(_UpperCAmelCase ):
while holes[count] > 0:
holes[count] -= 1
lowerCAmelCase = count + min_val
i += 1
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(_UpperCAmelCase )
print('Sorted order is:' , ' '.join(_UpperCAmelCase ) )
if __name__ == "__main__":
main()
| 4
|
import numpy as np
def _a ( UpperCamelCase_ : np.array ) -> np.array:
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
def _a ( UpperCamelCase_ : np.array ) -> np.array:
"""simple docstring"""
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 339
| 0
|
"""simple docstring"""
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
snake_case_ : Union[str, Any] = """▁"""
snake_case_ : List[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class snake_case__ ( lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = BigBirdTokenizer
SCREAMING_SNAKE_CASE__ = BigBirdTokenizerFast
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
UpperCAmelCase : str = self.tokenizer_class(lowercase , keep_accents=lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = "<s>"
UpperCAmelCase : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase ) , lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase ) , lowercase )
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "[MASK]" )
self.assertEqual(len(lowercase ) , 10_04 )
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
UpperCAmelCase : Tuple = self.get_tokenizer()
UpperCAmelCase : Tuple = self.get_rust_tokenizer()
UpperCAmelCase : Dict = "I was born in 92000, and this is falsé."
UpperCAmelCase : Optional[int] = tokenizer.tokenize(lowercase )
UpperCAmelCase : Tuple = rust_tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
UpperCAmelCase : List[str] = tokenizer.encode(lowercase , add_special_tokens=lowercase )
UpperCAmelCase : Optional[int] = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
UpperCAmelCase : Tuple = self.get_rust_tokenizer()
UpperCAmelCase : Dict = tokenizer.encode(lowercase )
UpperCAmelCase : List[str] = rust_tokenizer.encode(lowercase )
self.assertListEqual(lowercase , lowercase )
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase : Dict = BigBirdTokenizer(lowercase , keep_accents=lowercase )
UpperCAmelCase : int = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowercase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase ) , [2_85, 46, 10, 1_70, 3_82] , )
UpperCAmelCase : Optional[int] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(lowercase )
self.assertListEqual(
lowercase , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
UpperCAmelCase : Dict = tokenizer.convert_ids_to_tokens(lowercase )
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
@slow
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase : str = "Hello World!"
UpperCAmelCase : Union[str, Any] = [65, 1_85_36, 22_60, 1_01, 66]
self.assertListEqual(lowercase , self.big_tokenizer.encode(lowercase ) )
@slow
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase : List[str] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
# fmt: off
UpperCAmelCase : Tuple = [65, 8_71, 4_19, 3_58, 9_46, 9_91, 25_21, 4_52, 3_58, 13_57, 3_87, 77_51, 35_36, 1_12, 9_85, 4_56, 1_26, 8_65, 9_38, 54_00, 57_34, 4_58, 13_68, 4_67, 7_86, 24_62, 52_46, 11_59, 6_33, 8_65, 45_19, 4_57, 5_82, 8_52, 25_57, 4_27, 9_16, 5_08, 4_05, 3_43_24, 4_97, 3_91, 4_08, 1_13_42, 12_44, 3_85, 1_00, 9_38, 9_85, 4_56, 5_74, 3_62, 1_25_97, 32_00, 31_29, 11_72, 66] # noqa: E231
# fmt: on
self.assertListEqual(lowercase , self.big_tokenizer.encode(lowercase ) )
@require_torch
@slow
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
UpperCAmelCase : List[Any] = list(self.big_tokenizer.get_vocab().keys() )[:10]
UpperCAmelCase : Optional[Any] = " ".join(lowercase )
UpperCAmelCase : List[Any] = self.big_tokenizer.encode_plus(lowercase , return_tensors="pt" , return_token_type_ids=lowercase )
UpperCAmelCase : Tuple = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=lowercase )
UpperCAmelCase : Optional[Any] = BigBirdConfig(attention_type="original_full" )
UpperCAmelCase : List[Any] = BigBirdModel(lowercase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowercase )
model(**lowercase )
@slow
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
UpperCAmelCase : Any = tokenizer.decode(tokenizer("Paris is the [MASK]." ).input_ids )
self.assertTrue(decoded_text == "[CLS] Paris is the[MASK].[SEP]" )
@slow
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = {"input_ids": [[65, 3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14, 66], [65, 4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase , model_name="google/bigbird-roberta-base" , revision="215c99f1600e06f83acce68422f2035b2b5c3510" , )
| 292
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : List[Any] = logging.get_logger(__name__)
snake_case_ : List[Any] = {
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class snake_case__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = '''rwkv'''
SCREAMING_SNAKE_CASE__ = {'''max_position_embeddings''': '''context_length'''}
def __init__( self : int , lowercase : Optional[Any]=5_02_77 , lowercase : Union[str, Any]=10_24 , lowercase : Union[str, Any]=40_96 , lowercase : List[str]=32 , lowercase : str=None , lowercase : Tuple=None , lowercase : Dict=1E-5 , lowercase : Any=0 , lowercase : List[Any]=0 , lowercase : int=6 , lowercase : Dict=False , lowercase : Dict=True , **lowercase : Optional[Any] , ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = vocab_size
UpperCAmelCase : List[str] = context_length
UpperCAmelCase : Optional[Any] = hidden_size
UpperCAmelCase : List[Any] = num_hidden_layers
UpperCAmelCase : Union[str, Any] = attention_hidden_size if attention_hidden_size is not None else hidden_size
UpperCAmelCase : List[Any] = intermediate_size if intermediate_size is not None else 4 * hidden_size
UpperCAmelCase : Tuple = layer_norm_epsilon
UpperCAmelCase : Optional[int] = rescale_every
UpperCAmelCase : Tuple = use_cache
UpperCAmelCase : Union[str, Any] = bos_token_id
UpperCAmelCase : Optional[int] = eos_token_id
super().__init__(
tie_word_embeddings=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
| 292
| 1
|
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def A__ ( SCREAMING_SNAKE_CASE_ : Any ) -> List[Any]:
"""simple docstring"""
for param in module.parameters():
_UpperCAmelCase = False
def A__ ( ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
_UpperCAmelCase = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def A__ ( SCREAMING_SNAKE_CASE_ : Any ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = plt.imshow(SCREAMING_SNAKE_CASE_ )
fig.axes.get_xaxis().set_visible(SCREAMING_SNAKE_CASE_ )
fig.axes.get_yaxis().set_visible(SCREAMING_SNAKE_CASE_ )
plt.show()
def A__ ( ) -> int:
"""simple docstring"""
_UpperCAmelCase = datetime.now()
_UpperCAmelCase = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 32
|
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a__ ( A_, A_, A_ ):
'''simple docstring'''
if gpta_config_file == "":
__magic_name__ = GPTaConfig()
else:
__magic_name__ = GPTaConfig.from_json_file(A_ )
__magic_name__ = GPTaModel(A_ )
# Load weights from numpy
load_tf_weights_in_gpta(A_, A_, A_ )
# Save pytorch-model
__magic_name__ = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
__magic_name__ = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict(), A_ )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(A_, """w""", encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
__lowerCAmelCase : Tuple = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 529
| 0
|
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
A = 'hf-internal-testing/tiny-random-bert'
A = os.path.join(TRANSFORMERS_CACHE, 'models--hf-internal-testing--tiny-random-bert')
A = '9b8c223d42b2188cb49d29af482996f9d0f3e5a6'
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : str ) -> List[str]:
_lowerCamelCase = cached_file(snake_case__ , snake_case__ )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(snake_case__ ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(snake_case__ , snake_case__ ) ) )
with open(os.path.join(snake_case__ , 'refs' , 'main' ) ) as f:
_lowerCamelCase = f.read()
self.assertEqual(snake_case__ , os.path.join(snake_case__ , 'snapshots' , snake_case__ , snake_case__ ) )
self.assertTrue(os.path.isfile(snake_case__ ) )
# File is cached at the same place the second time.
_lowerCamelCase = cached_file(snake_case__ , snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
# Using a specific revision to test the full commit hash.
_lowerCamelCase = cached_file(snake_case__ , snake_case__ , revision='9b8c223' )
self.assertEqual(snake_case__ , os.path.join(snake_case__ , 'snapshots' , snake_case__ , snake_case__ ) )
def _snake_case ( self : Union[str, Any] ) -> Dict:
with self.assertRaisesRegex(snake_case__ , 'is not a valid model identifier' ):
_lowerCamelCase = cached_file('tiny-random-bert' , snake_case__ )
with self.assertRaisesRegex(snake_case__ , 'is not a valid git identifier' ):
_lowerCamelCase = cached_file(snake_case__ , snake_case__ , revision='aaaa' )
with self.assertRaisesRegex(snake_case__ , 'does not appear to have a file named' ):
_lowerCamelCase = cached_file(snake_case__ , 'conf' )
def _snake_case ( self : List[str] ) -> Optional[Any]:
with self.assertRaisesRegex(snake_case__ , 'does not appear to have a file named' ):
_lowerCamelCase = cached_file(snake_case__ , 'conf' )
with open(os.path.join(snake_case__ , 'refs' , 'main' ) ) as f:
_lowerCamelCase = f.read()
self.assertTrue(os.path.isfile(os.path.join(snake_case__ , '.no_exist' , snake_case__ , 'conf' ) ) )
_lowerCamelCase = cached_file(snake_case__ , 'conf' , _raise_exceptions_for_missing_entries=snake_case__ )
self.assertIsNone(snake_case__ )
_lowerCamelCase = cached_file(snake_case__ , 'conf' , local_files_only=snake_case__ , _raise_exceptions_for_missing_entries=snake_case__ )
self.assertIsNone(snake_case__ )
_lowerCamelCase = mock.Mock()
_lowerCamelCase = 5_0_0
_lowerCamelCase = {}
_lowerCamelCase = HTTPError
_lowerCamelCase = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=snake_case__ ) as mock_head:
_lowerCamelCase = cached_file(snake_case__ , 'conf' , _raise_exceptions_for_connection_errors=snake_case__ )
self.assertIsNone(snake_case__ )
# This check we did call the fake head request
mock_head.assert_called()
def _snake_case ( self : List[str] ) -> List[str]:
self.assertTrue(has_file('hf-internal-testing/tiny-bert-pt-only' , snake_case__ ) )
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , snake_case__ ) )
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , snake_case__ ) )
def _snake_case ( self : Tuple ) -> Any:
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo('bert-base-cased' , 'ahah.txt' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(snake_case__ , 'is not a valid model identifier' ):
get_file_from_repo('bert-base-case' , snake_case__ )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(snake_case__ , 'is not a valid git identifier' ):
get_file_from_repo('bert-base-cased' , snake_case__ , revision='ahaha' )
_lowerCamelCase = get_file_from_repo('bert-base-cased' , snake_case__ )
# The name is the cached name which is not very easy to test, so instead we load the content.
_lowerCamelCase = json.loads(open(snake_case__ , 'r' ).read() )
self.assertEqual(config['hidden_size'] , 7_6_8 )
def _snake_case ( self : Tuple ) -> int:
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase = Path(snake_case__ ) / 'a.txt'
filename.touch()
self.assertEqual(get_file_from_repo(snake_case__ , 'a.txt' ) , str(snake_case__ ) )
self.assertIsNone(get_file_from_repo(snake_case__ , 'b.txt' ) )
| 234
|
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 234
| 1
|
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
snake_case__ = XLNetTokenizer
snake_case__ = XLNetTokenizerFast
snake_case__ = True
snake_case__ = True
def a ( self : str ) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ = XLNetTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def a ( self : List[str] ) -> List[Any]:
lowerCAmelCase__ = "<s>"
lowerCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def a ( self : Union[str, Any] ) -> str:
lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "<eod>" )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 1_006 )
def a ( self : int ) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def a ( self : List[str] ) -> Any:
lowerCAmelCase__ = XLNetTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [285, 46, 10, 170, 382] )
lowerCAmelCase__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def a ( self : Optional[int] ) -> Optional[Any]:
lowerCAmelCase__ = XLNetTokenizer(SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + "",
"i",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"se",
".",
] , )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["▁he", "ll", "o"] )
def a ( self : List[Any] ) -> Optional[int]:
lowerCAmelCase__ = XLNetTokenizer(SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"se",
".",
] , )
@slow
def a ( self : Any ) -> Any:
lowerCAmelCase__ = XLNetTokenizer.from_pretrained("xlnet-base-cased" )
lowerCAmelCase__ = tokenizer.encode("sequence builders" , add_special_tokens=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = tokenizer.encode("multi-sequence build" , add_special_tokens=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def a ( self : Union[str, Any] ) -> Any:
# fmt: off
lowerCAmelCase__ = {"input_ids": [[17, 21_442, 270, 17, 10, 14_645, 318, 34, 17, 4_546, 3_145, 787, 13, 7_752, 22_018, 23, 21, 17, 4_546, 3_145, 787, 13, 3_352, 14_431, 13, 5_500, 11, 1_176, 580, 13, 16_819, 4_797, 23, 17, 10, 17_135, 658, 19, 457, 7_932, 13, 184, 19, 3_154, 17_135, 6_468, 19, 1_404, 12_269, 19, 4_229, 5_356, 16_264, 46, 19, 17, 20_545, 10_395, 9, 9, 9, 11, 28, 6_421, 9_531, 20_729, 17, 10, 353, 17_022, 11, 21, 6_421, 9_531, 16_949, 17, 10, 11_509, 753, 11, 33, 95, 2_421, 7_385, 956, 14_431, 2_626, 25, 842, 7_385, 4_836, 21, 1_429, 2_272, 9_855, 3_120, 161, 24_738, 19, 13_203, 658, 218, 787, 21, 430, 18_482, 847, 2_637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 22_178, 27, 1_064, 22, 956, 13, 11_101, 1_429, 5_854, 24_313, 18_953, 40, 422, 24_366, 68, 1_758, 37, 10_483, 14_257, 31, 207, 263, 21, 203, 3_773, 25, 71, 9_735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2_049, 3_442, 17, 13_894, 3_380, 23, 95, 18, 17_634, 2_288, 9, 4, 3]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE__ , model_name="xlnet-base-cased" , revision="c841166438c31ec7ca9a106dee7bb312b73ae511" , )
| 61
|
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
__UpperCAmelCase = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"""text-classification""",
"""language-modeling""",
"""summarization""",
"""token-classification""",
"""question-answering""",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
__UpperCAmelCase = logging.getLogger()
def snake_case_ () -> Optional[Any]:
__lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("""-f""" )
__lowerCAmelCase : Dict = parser.parse_args()
return args.f
def snake_case_ (__A : Dict , __A : List[str]="eval" ) -> int:
__lowerCAmelCase : int = os.path.join(__A , f'''{split}_results.json''' )
if os.path.exists(__A ):
with open(__A , """r""" ) as f:
return json.load(__A )
raise ValueError(f'''can\'t find {path}''' )
__UpperCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.get_auto_remove_tmp_dir()
__lowerCAmelCase : Optional[Any] = f'''
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(lowerCAmelCase , """argv""" , lowerCAmelCase ):
run_flax_glue.main()
__lowerCAmelCase : Dict = get_results(lowerCAmelCase )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = self.get_auto_remove_tmp_dir()
__lowerCAmelCase : Any = f'''
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(lowerCAmelCase , """argv""" , lowerCAmelCase ):
run_clm_flax.main()
__lowerCAmelCase : int = get_results(lowerCAmelCase )
self.assertLess(result["""eval_perplexity"""] , 1_00 )
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.get_auto_remove_tmp_dir()
__lowerCAmelCase : int = f'''
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
'''.split()
with patch.object(lowerCAmelCase , """argv""" , lowerCAmelCase ):
run_summarization_flax.main()
__lowerCAmelCase : Union[str, Any] = get_results(lowerCAmelCase , split="""test""" )
self.assertGreaterEqual(result["""test_rouge1"""] , 10 )
self.assertGreaterEqual(result["""test_rouge2"""] , 2 )
self.assertGreaterEqual(result["""test_rougeL"""] , 7 )
self.assertGreaterEqual(result["""test_rougeLsum"""] , 7 )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.get_auto_remove_tmp_dir()
__lowerCAmelCase : List[str] = f'''
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
'''.split()
with patch.object(lowerCAmelCase , """argv""" , lowerCAmelCase ):
run_mlm_flax.main()
__lowerCAmelCase : List[Any] = get_results(lowerCAmelCase )
self.assertLess(result["""eval_perplexity"""] , 42 )
@slow
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = self.get_auto_remove_tmp_dir()
__lowerCAmelCase : List[str] = f'''
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
'''.split()
with patch.object(lowerCAmelCase , """argv""" , lowerCAmelCase ):
run_ta_mlm_flax.main()
__lowerCAmelCase : Union[str, Any] = get_results(lowerCAmelCase )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.42 )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : List[Any] = 7 if get_gpu_count() > 1 else 2
__lowerCAmelCase : Optional[int] = self.get_auto_remove_tmp_dir()
__lowerCAmelCase : List[Any] = f'''
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
'''.split()
with patch.object(lowerCAmelCase , """argv""" , lowerCAmelCase ):
run_flax_ner.main()
__lowerCAmelCase : Dict = get_results(lowerCAmelCase )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
self.assertGreaterEqual(result["""eval_f1"""] , 0.3 )
@slow
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.get_auto_remove_tmp_dir()
__lowerCAmelCase : List[str] = f'''
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
'''.split()
with patch.object(lowerCAmelCase , """argv""" , lowerCAmelCase ):
run_qa.main()
__lowerCAmelCase : Union[str, Any] = get_results(lowerCAmelCase )
self.assertGreaterEqual(result["""eval_f1"""] , 30 )
self.assertGreaterEqual(result["""eval_exact"""] , 30 )
| 651
| 0
|
import doctest
from collections import deque
import numpy as np
class lowercase :
"""simple docstring"""
def __init__( self : List[Any] ):
'''simple docstring'''
_snake_case : int = [2, 1, 2, -1]
_snake_case : Union[str, Any] = [1, 2, 3, 4]
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
_snake_case : List[Any] = len(self.first_signal )
_snake_case : Any = len(self.second_signal )
_snake_case : int = max(lowerCamelCase_ , lowerCamelCase_ )
# create a zero matrix of max_length x max_length
_snake_case : List[str] = [[0] * max_length for i in range(lowerCamelCase_ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowerCamelCase_ ):
_snake_case : List[Any] = deque(self.second_signal )
rotated_signal.rotate(lowerCamelCase_ )
for j, item in enumerate(lowerCamelCase_ ):
matrix[i][j] += item
# multiply the matrix with the first signal
_snake_case : int = np.matmul(np.transpose(lowerCamelCase_ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(lowerCamelCase_ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 652
|
def A__( __lowerCAmelCase ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('only integers accepted as input' )
else:
_snake_case : Any = str(abs(__lowerCAmelCase ) )
_snake_case : List[str] = [list(__lowerCAmelCase ) for char in range(len(__lowerCAmelCase ) )]
for index in range(len(__lowerCAmelCase ) ):
num_transpositions[index].pop(__lowerCAmelCase )
return max(
int(''.join(list(__lowerCAmelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 652
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__UpperCAmelCase = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
__UpperCAmelCase = {
'bert-base-uncased': 5_12,
'bert-large-uncased': 5_12,
'bert-base-cased': 5_12,
'bert-large-cased': 5_12,
'bert-base-multilingual-uncased': 5_12,
'bert-base-multilingual-cased': 5_12,
'bert-base-chinese': 5_12,
'bert-base-german-cased': 5_12,
'bert-large-uncased-whole-word-masking': 5_12,
'bert-large-cased-whole-word-masking': 5_12,
'bert-large-uncased-whole-word-masking-finetuned-squad': 5_12,
'bert-large-cased-whole-word-masking-finetuned-squad': 5_12,
'bert-base-cased-finetuned-mrpc': 5_12,
'bert-base-german-dbmdz-cased': 5_12,
'bert-base-german-dbmdz-uncased': 5_12,
'TurkuNLP/bert-base-finnish-cased-v1': 5_12,
'TurkuNLP/bert-base-finnish-uncased-v1': 5_12,
'wietsedv/bert-base-dutch-cased': 5_12,
}
__UpperCAmelCase = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class __a ( __UpperCamelCase ):
__snake_case : Dict = VOCAB_FILES_NAMES
__snake_case : List[str] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : Dict = PRETRAINED_INIT_CONFIGURATION
__snake_case : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : List[Any] = BertTokenizer
def __init__( self : int , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : str=None , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : int="[UNK]" , UpperCAmelCase : str="[SEP]" , UpperCAmelCase : int="[PAD]" , UpperCAmelCase : Any="[CLS]" , UpperCAmelCase : Tuple="[MASK]" , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Dict=None , **UpperCAmelCase : int , ):
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , )
lowerCAmelCase_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , UpperCAmelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , UpperCAmelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , UpperCAmelCase ) != tokenize_chinese_chars
):
lowerCAmelCase_ : str = getattr(UpperCAmelCase , normalizer_state.pop("""type""" ) )
lowerCAmelCase_ : List[Any] = do_lower_case
lowerCAmelCase_ : Any = strip_accents
lowerCAmelCase_ : List[str] = tokenize_chinese_chars
lowerCAmelCase_ : List[str] = normalizer_class(**UpperCAmelCase )
lowerCAmelCase_ : Dict = do_lower_case
def A ( self : int , UpperCAmelCase : int , UpperCAmelCase : int=None ):
lowerCAmelCase_ : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A ( self : str , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ):
lowerCAmelCase_ : Optional[int] = [self.sep_token_id]
lowerCAmelCase_ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A ( self : Dict , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ):
lowerCAmelCase_ : Optional[Any] = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 600
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__UpperCAmelCase = {'configuration_encoder_decoder': ['EncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['EncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['TFEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['FlaxEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 600
| 1
|
"""simple docstring"""
def lowerCAmelCase_ ( ):
"""simple docstring"""
__lowercase = []
__lowercase = 1
while len(UpperCamelCase__ ) < 1E6:
constant.append(str(UpperCamelCase__ ) )
i += 1
__lowercase = """""".join(UpperCamelCase__ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution())
| 442
|
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def lowerCAmelCase_ ( UpperCamelCase__ : Callable , UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ):
"""simple docstring"""
__lowercase = int(np.ceil((x_end - xa) / step_size ) )
__lowercase = np.zeros((n + 1,) )
__lowercase = ya
__lowercase = xa
for k in range(UpperCamelCase__ ):
__lowercase = y[k] + step_size * ode_func(UpperCamelCase__ , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 442
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
SCREAMING_SNAKE_CASE__:Optional[Any] = list[list[float | int]]
def _lowerCamelCase( a , a ):
__a = len(_snake_case )
__a = [[0 for _ in range(size + 1 )] for _ in range(_snake_case )]
__a = 4_2
__a = 4_2
__a = 4_2
__a = 4_2
__a = 4_2
__a = 4_2
for row in range(_snake_case ):
for col in range(_snake_case ):
__a = matrix[row][col]
__a = vector[row][0]
__a = 0
__a = 0
while row < size and col < size:
# pivoting
__a = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_snake_case , _snake_case ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
__a , __a = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , _snake_case ):
__a = augmented[rowa][col] / augmented[row][col]
__a = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , _snake_case ):
for row in range(_snake_case ):
__a = augmented[row][col] / augmented[col][col]
for cola in range(_snake_case , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 1_0 )] for row in range(_snake_case )
]
def _lowerCamelCase( a ):
__a = len(_snake_case )
__a = [[0 for _ in range(_snake_case )] for _ in range(_snake_case )]
__a = [[0] for _ in range(_snake_case )]
__a = 4_2
__a = 4_2
__a = 4_2
__a = 4_2
for x_val, y_val in enumerate(_snake_case ):
for col in range(_snake_case ):
__a = (x_val + 1) ** (size - col - 1)
__a = y_val
__a = solve(_snake_case , _snake_case )
def interpolated_func(a ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(_snake_case ) )
return interpolated_func
def _lowerCamelCase( a ):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**1_0
)
def _lowerCamelCase( a = question_function , a = 1_0 ):
__a = [func(_snake_case ) for x_val in range(1 , order + 1 )]
__a = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
__a = 0
__a = 4_2
__a = 4_2
for poly in polynomials:
__a = 1
while func(_snake_case ) == poly(_snake_case ):
x_val += 1
ret += poly(_snake_case )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 528
|
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = GPTSanJapaneseTokenizer
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = {'do_clean_text': False, 'add_prefix_space': False}
def __magic_name__ ( self ) -> int:
'''simple docstring'''
super().setUp()
# fmt: off
__a =['こん', 'こんに', 'にちは', 'ばんは', '世界,㔺界', '、', '。', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>']
# fmt: on
__a ={'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # 😀
__a ={'unk_token': '<unk>'}
__a =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__a =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['emoji_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.emoji_file , 'w' ) as emoji_writer:
emoji_writer.write(json.dumps(__snake_case ) )
def __magic_name__ ( self , **__snake_case ) -> Tuple:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def __magic_name__ ( self , __snake_case ) -> Any:
'''simple docstring'''
__a ='こんにちは、世界。 \nこんばんは、㔺界。😀'
__a ='こんにちは、世界。 \nこんばんは、世界。😀'
return input_text, output_text
def __magic_name__ ( self , __snake_case ) -> Any:
'''simple docstring'''
__a , __a =self.get_input_output_texts(__snake_case )
__a =tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
__a =tokenizer.decode(__snake_case , clean_up_tokenization_spaces=__snake_case )
return text, ids
def __magic_name__ ( self ) -> int:
'''simple docstring'''
pass # TODO add if relevant
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
pass # TODO add if relevant
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
pass # TODO add if relevant
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
__a =self.get_tokenizer()
# Testing tokenization
__a ='こんにちは、世界。 こんばんは、㔺界。'
__a =['こん', 'にちは', '、', '世界', '。', '<SP>', 'こん', 'ばんは', '、', '㔺界', '。']
__a =tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
# Testing conversion to ids without special tokens
__a =[0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
__a =tokenizer.convert_tokens_to_ids(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
# Testing conversion to ids with special tokens
__a =tokens + [tokenizer.unk_token]
__a =[0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
__a =tokenizer.convert_tokens_to_ids(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
__a =self.get_tokenizer()
# Testing tokenization
__a ='こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'
__a ='こんにちは、、、、世界。こんばんは、、、、世界。'
__a =tokenizer.encode(__snake_case )
__a =tokenizer.decode(__snake_case )
self.assertEqual(__snake_case , __snake_case )
@slow
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
__a =self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
__a ='こんにちは、世界。'
__a ='こんばんは、㔺界。😀'
__a ='こんにちは、世界。こんばんは、世界。😀'
__a =tokenizer.encode(prefix_text + input_text )
__a =tokenizer.encode('' , prefix_text=prefix_text + input_text )
__a =tokenizer.encode(__snake_case , prefix_text=__snake_case )
__a =tokenizer.decode(__snake_case )
__a =tokenizer.decode(__snake_case )
__a =tokenizer.decode(__snake_case )
self.assertEqual(__snake_case , __snake_case )
self.assertEqual(__snake_case , __snake_case )
self.assertEqual(__snake_case , __snake_case )
@slow
def __magic_name__ ( self ) -> List[Any]:
'''simple docstring'''
__a =self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
__a ='こんにちは、世界。'
__a ='こんばんは、㔺界。😀'
__a =len(tokenizer.encode(__snake_case ) ) - 2
__a =len(tokenizer.encode(__snake_case ) ) - 2
__a =[1] + [0] * (len_prefix + len_text + 1)
__a =[1] * (len_prefix + len_text + 1) + [0]
__a =[1] + [1] * (len_prefix) + [0] * (len_text + 1)
__a =tokenizer(prefix_text + input_text ).token_type_ids
__a =tokenizer('' , prefix_text=prefix_text + input_text ).token_type_ids
__a =tokenizer(__snake_case , prefix_text=__snake_case ).token_type_ids
self.assertListEqual(__snake_case , __snake_case )
self.assertListEqual(__snake_case , __snake_case )
self.assertListEqual(__snake_case , __snake_case )
@slow
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
__a =tokenizer.encode('あンいワ' )
__a =tokenizer.encode('' , prefix_text='あンいワ' )
__a =tokenizer.encode('いワ' , prefix_text='あン' )
self.assertEqual(tokenizer.decode(__snake_case ) , tokenizer.decode(__snake_case ) )
self.assertEqual(tokenizer.decode(__snake_case ) , tokenizer.decode(__snake_case ) )
self.assertNotEqual(__snake_case , __snake_case )
self.assertNotEqual(__snake_case , __snake_case )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
__a =[['武田信玄', 'は、'], ['織田信長', 'の配下の、']]
__a =tokenizer(__snake_case , padding=__snake_case )
__a =tokenizer.batch_encode_plus(__snake_case , padding=__snake_case )
# fmt: off
__a =[[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]]
__a =[[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
__a =[[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __snake_case )
self.assertListEqual(x_token.token_type_ids , __snake_case )
self.assertListEqual(x_token.attention_mask , __snake_case )
self.assertListEqual(x_token_a.input_ids , __snake_case )
self.assertListEqual(x_token_a.token_type_ids , __snake_case )
self.assertListEqual(x_token_a.attention_mask , __snake_case )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
# tokenizer has no padding token
pass
| 242
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowercase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ["MLukeTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
_lowercase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 526
|
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = "▁"
_lowercase = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
_lowercase = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
_lowercase = {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
_lowercase = {
"ernie-m-base": 514,
"ernie-m-large": 514,
}
_lowercase = {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class _UpperCAmelCase ( A__ ):
UpperCamelCase__ = ["input_ids"]
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = RESOURCE_FILES_NAMES
def __init__( self , a__ , a__=None , a__=False , a__="utf8" , a__="[UNK]" , a__="[SEP]" , a__="[PAD]" , a__="[CLS]" , a__="[MASK]" , a__ = None , **a__ , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
A__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=a__ , unk_token=a__ , sep_token=a__ , pad_token=a__ , cls_token=a__ , mask_token=a__ , vocab_file=a__ , encoding=a__ , sp_model_kwargs=self.sp_model_kwargs , **a__ , )
A__ = do_lower_case
A__ = sentencepiece_model_ckpt
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(a__)
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
A__ = self.load_vocab(filepath=a__)
else:
A__ = {self.sp_model.id_to_piece(a__): id for id in range(self.sp_model.get_piece_size())}
A__ = {v: k for k, v in self.vocab.items()}
def snake_case_ ( self , a__):
if text is None:
return None
A__ = self.tokenize(a__)
A__ , A__ = '''''', []
for i, ch in enumerate(a__):
if ch in self.SP_CHAR_MAPPING:
A__ = self.SP_CHAR_MAPPING.get(a__)
else:
A__ = unicodedata.normalize('''NFKC''' , a__)
if self.is_whitespace(a__):
continue
normalized_text += ch
char_mapping.extend([i] * len(a__))
A__ , A__ , A__ = normalized_text, [], 0
if self.do_lower_case:
A__ = text.lower()
for token in split_tokens:
if token[:1] == "▁":
A__ = token[1:]
A__ = text[offset:].index(a__) + offset
A__ = start + len(a__)
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1))
A__ = end
return token_mapping
@property
def snake_case_ ( self):
return len(self.vocab)
def snake_case_ ( self):
return dict(self.vocab , **self.added_tokens_encoder)
def __getstate__( self):
A__ = self.__dict__.copy()
A__ = None
return state
def __setstate__( self , a__):
A__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
A__ = {}
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.sentencepiece_model_ckpt)
def snake_case_ ( self , a__):
return "".join((self.SP_CHAR_MAPPING.get(a__ , a__) for c in text))
def snake_case_ ( self , a__ , a__=False , a__=6_4 , a__=0.1):
if self.sp_model_kwargs.get('''enable_sampling''') is True:
A__ = True
if self.sp_model_kwargs.get('''alpha''') is not None:
A__ = self.sp_model_kwargs.get('''alpha''')
if self.sp_model_kwargs.get('''nbest_size''') is not None:
A__ = self.sp_model_kwargs.get('''nbest_size''')
if not enable_sampling:
A__ = self.sp_model.EncodeAsPieces(a__)
else:
A__ = self.sp_model.SampleEncodeAsPieces(a__ , a__ , a__)
A__ = []
for pi, piece in enumerate(a__):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(a__) and pi != 0:
new_pieces.append(a__)
continue
else:
continue
A__ = 0
for i, chunk in enumerate(a__):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(a__) or self.is_punct(a__):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i])
new_pieces.append(a__)
A__ = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i])
A__ = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i])
A__ = i
if len(a__) > lst_i:
new_pieces.append(piece[lst_i:])
return new_pieces
def snake_case_ ( self , a__):
A__ = ''''''.join(a__).replace(a__ , ''' ''').strip()
return out_string
def snake_case_ ( self , a__):
A__ = self.convert_ids_to_tokens(a__)
A__ = ''''''.join(a__).replace(a__ , ''' ''').strip()
return out_string
def snake_case_ ( self , a__):
return self.vocab.get(a__ , self.vocab.get(self.unk_token))
def snake_case_ ( self , a__):
return self.reverse_vocab.get(a__ , self.unk_token)
def snake_case_ ( self , a__ , a__=None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A__ = [self.cls_token_id]
A__ = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def snake_case_ ( self , a__ , a__=None):
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def snake_case_ ( self , a__ , a__=None , a__=False):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''')
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(a__)) + [1, 1] + ([0] * len(a__)) + [1]
return [1] + ([0] * len(a__)) + [1]
def snake_case_ ( self , a__ , a__ = None):
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(a__) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(a__) + 1) + [1] * (len(a__) + 3)
def snake_case_ ( self , a__):
if "\u4e00" <= char <= "\u9fff":
return True
return False
def snake_case_ ( self , a__):
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def snake_case_ ( self , a__):
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def snake_case_ ( self , a__):
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(a__) == 1:
A__ = unicodedata.category(a__)
if cat == "Zs":
return True
return False
def snake_case_ ( self , a__):
A__ = {}
with io.open(a__ , '''r''' , encoding='''utf-8''') as f:
for index, line in enumerate(a__):
A__ = line.rstrip('''\n''')
A__ = int(a__)
return token_to_idx
def snake_case_ ( self , a__ , a__ = None):
A__ = 0
if os.path.isdir(a__):
A__ = os.path.join(
a__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
else:
A__ = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
with open(a__ , '''w''' , encoding='''utf-8''') as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda a__: kv[1]):
if index != token_index:
logger.warning(
F"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
''' Please check that the vocabulary is not corrupted!''')
A__ = token_index
writer.write(token + '''\n''')
index += 1
A__ = os.path.join(a__ , '''sentencepiece.bpe.model''')
with open(a__ , '''wb''') as fi:
A__ = self.sp_model.serialized_model_proto()
fi.write(a__)
return (vocab_file,)
| 526
| 1
|
"""simple docstring"""
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
UpperCAmelCase__ : Optional[int] = logging.get_logger(__name__)
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = '''vision-encoder-decoder'''
__UpperCamelCase : str = True
def __init__(self , **SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F'''A configuraton of type {self.model_type} cannot be instantiated because '''
F'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = kwargs.pop("""encoder""" )
SCREAMING_SNAKE_CASE__ : Dict = encoder_config.pop("""model_type""" )
SCREAMING_SNAKE_CASE__ : Any = kwargs.pop("""decoder""" )
SCREAMING_SNAKE_CASE__ : Tuple = decoder_config.pop("""model_type""" )
SCREAMING_SNAKE_CASE__ : Tuple = AutoConfig.for_model(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoConfig.for_model(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = True
@classmethod
def __magic_name__ (cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> PretrainedConfig:
"""simple docstring"""
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
SCREAMING_SNAKE_CASE__ : str = True
SCREAMING_SNAKE_CASE__ : str = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ : str = self.encoder.to_dict()
SCREAMING_SNAKE_CASE__ : List[str] = self.decoder.to_dict()
SCREAMING_SNAKE_CASE__ : Optional[int] = self.__class__.model_type
return output
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : List[str] = version.parse('''1.11''' )
@property
def __magic_name__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __magic_name__ (self ) -> float:
"""simple docstring"""
return 1E-4
@property
def __magic_name__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
@property
def __magic_name__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = OrderedDict()
SCREAMING_SNAKE_CASE__ : List[str] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
SCREAMING_SNAKE_CASE__ : List[str] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
SCREAMING_SNAKE_CASE__ : List[Any] = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , ) -> Mapping[str, Any]:
"""simple docstring"""
import torch
SCREAMING_SNAKE_CASE__ : Tuple = OrderedDict()
SCREAMING_SNAKE_CASE__ : Any = super().generate_dummy_inputs(
SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , seq_length=SCREAMING_SNAKE_CASE__ , is_pair=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = dummy_input["""input_ids"""].shape
SCREAMING_SNAKE_CASE__ : Dict = (batch, encoder_sequence, self._config.encoder_hidden_size)
SCREAMING_SNAKE_CASE__ : List[str] = dummy_input.pop("""input_ids""" )
SCREAMING_SNAKE_CASE__ : str = dummy_input.pop("""attention_mask""" )
SCREAMING_SNAKE_CASE__ : Tuple = torch.zeros(SCREAMING_SNAKE_CASE__ )
return common_inputs
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
@property
def __magic_name__ (self ) -> None:
"""simple docstring"""
pass
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> OnnxConfig:
"""simple docstring"""
return VisionEncoderDecoderEncoderOnnxConfig(SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = "default" ) -> OnnxConfig:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 223
|
"""simple docstring"""
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ : List[str] = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
UpperCAmelCase__ : List[str] = 5_0_0_0_3
UpperCAmelCase__ : Tuple = 5_0_0_0_2
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ (a__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Any = PLBartTokenizer
__UpperCamelCase : Tuple = None
__UpperCamelCase : int = False
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ : List[Any] = PLBartTokenizer(SCREAMING_SNAKE_CASE__ , language_codes="""base""" , keep_accents=SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = PLBartTokenizer(SCREAMING_SNAKE_CASE__ , language_codes="""base""" , keep_accents=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
SCREAMING_SNAKE_CASE__ : Any = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ) for x in range(end - 4 , SCREAMING_SNAKE_CASE__ )]
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ["""__java__""", """__python__""", """__en_XX__""", """<mask>"""] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = """java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
SCREAMING_SNAKE_CASE__ : int = tokenizer(SCREAMING_SNAKE_CASE__ ).input_ids
self.assertEqual(
tokenizer.decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , )
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = PLBartTokenizer(SCREAMING_SNAKE_CASE__ , language_codes="""multi""" , keep_accents=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
SCREAMING_SNAKE_CASE__ : int = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ : List[str] = [tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ) for x in range(end - 7 , SCREAMING_SNAKE_CASE__ )]
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , ["""__java__""", """__python__""", """__en_XX__""", """__javascript__""", """__php__""", """__ruby__""", """__go__"""] )
SCREAMING_SNAKE_CASE__ : str = """java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer(SCREAMING_SNAKE_CASE__ ).input_ids
self.assertEqual(
tokenizer.decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : List[Any] = '''uclanlp/plbart-python-en_XX'''
__UpperCamelCase : Optional[Any] = [
'''def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])''',
'''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''',
]
__UpperCamelCase : List[Any] = [
'''Returns the maximum value of a b c.''',
'''Sums the values of a b c.''',
]
__UpperCamelCase : Tuple = [
134,
5452,
33460,
33441,
33463,
33465,
33463,
33449,
988,
20,
33456,
19,
33456,
771,
39,
4258,
889,
3318,
33441,
33463,
33465,
33463,
33449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def __magic_name__ (cls ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="""base""" , src_lang="""python""" , tgt_lang="""en_XX""" )
SCREAMING_SNAKE_CASE__ : str = 1
return cls
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__java__"""] , 5_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__python__"""] , 5_00_02 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__en_XX__"""] , 5_00_03 )
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
self.assertIn(SCREAMING_SNAKE_CASE__ , self.tokenizer.all_special_ids )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [EN_CODE, 90_37, 3_34_42, 57, 7_52, 1_53, 14, 56, 18, 9, 2]
SCREAMING_SNAKE_CASE__ : List[Any] = self.tokenizer.decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertNotIn(self.tokenizer.eos_token , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = ["""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""" * 20]
self.assertIsInstance(src_text[0] , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = 10
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer(SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , SCREAMING_SNAKE_CASE__ )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> str:
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """__java__"""] ) , [5_00_04, 5_00_01] )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : Any = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = PLBartTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , SCREAMING_SNAKE_CASE__ )
@require_torch
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , SCREAMING_SNAKE_CASE__ )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
SCREAMING_SNAKE_CASE__ : Dict = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
SCREAMING_SNAKE_CASE__ : str = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE__ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.tokenizer(self.src_text , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=3 , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.tokenizer(
text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=10 , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = targets["""input_ids"""]
SCREAMING_SNAKE_CASE__ : List[Any] = shift_tokens_right(SCREAMING_SNAKE_CASE__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""java""" )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , {
# A, test, EOS, en_XX
"""input_ids""": [[1_50, 2_42, 2, 5_00_03]],
"""attention_mask""": [[1, 1, 1, 1]],
# java
"""forced_bos_token_id""": 5_00_01,
} , )
| 223
| 1
|
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger('transformers.models.speecht5')
def _lowerCamelCase ( __a, __a, __a ):
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE_ = checkpoint['''input_conv.weight_g''']
SCREAMING_SNAKE_CASE_ = checkpoint['''input_conv.weight_v''']
SCREAMING_SNAKE_CASE_ = checkpoint['''input_conv.bias''']
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE_ = checkpoint[F'upsamples.{i}.1.weight_g']
SCREAMING_SNAKE_CASE_ = checkpoint[F'upsamples.{i}.1.weight_v']
SCREAMING_SNAKE_CASE_ = checkpoint[F'upsamples.{i}.1.bias']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE_ = checkpoint[F'blocks.{i}.convs1.{j}.1.weight_g']
SCREAMING_SNAKE_CASE_ = checkpoint[F'blocks.{i}.convs1.{j}.1.weight_v']
SCREAMING_SNAKE_CASE_ = checkpoint[F'blocks.{i}.convs1.{j}.1.bias']
SCREAMING_SNAKE_CASE_ = checkpoint[F'blocks.{i}.convs2.{j}.1.weight_g']
SCREAMING_SNAKE_CASE_ = checkpoint[F'blocks.{i}.convs2.{j}.1.weight_v']
SCREAMING_SNAKE_CASE_ = checkpoint[F'blocks.{i}.convs2.{j}.1.bias']
SCREAMING_SNAKE_CASE_ = checkpoint['''output_conv.1.weight_g''']
SCREAMING_SNAKE_CASE_ = checkpoint['''output_conv.1.weight_v''']
SCREAMING_SNAKE_CASE_ = checkpoint['''output_conv.1.bias''']
hf_model.remove_weight_norm()
@torch.no_grad()
def _lowerCamelCase ( __a, __a, __a, __a=None, __a=None, ):
if config_path is not None:
SCREAMING_SNAKE_CASE_ = SpeechTaHifiGanConfig.from_pretrained(__a )
else:
SCREAMING_SNAKE_CASE_ = SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE_ = SpeechTaHifiGan(__a )
SCREAMING_SNAKE_CASE_ = torch.load(__a )
load_weights(orig_checkpoint['''model''']['''generator'''], __a, __a )
SCREAMING_SNAKE_CASE_ = np.load(__a )
SCREAMING_SNAKE_CASE_ = stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE_ = stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE_ = torch.from_numpy(__a ).float()
SCREAMING_SNAKE_CASE_ = torch.from_numpy(__a ).float()
model.save_pretrained(__a )
if repo_id:
print('''Pushing to the hub...''' )
model.push_to_hub(__a )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
lowerCAmelCase__ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 714
|
"""simple docstring"""
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case ( __lowercase ):
UpperCAmelCase__ = (DDIMParallelScheduler,)
UpperCAmelCase__ = (('''eta''', 0.0), ('''num_inference_steps''', 50))
def _lowercase (self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {
'''num_train_timesteps''': 10_00,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''clip_sample''': True,
}
config.update(**SCREAMING_SNAKE_CASE_ )
return config
def _lowercase (self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = 10, 0.0
SCREAMING_SNAKE_CASE_ = self.dummy_model()
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
return sample
def _lowercase (self ):
"""simple docstring"""
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config(steps_offset=1 )
SCREAMING_SNAKE_CASE_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) )
def _lowercase (self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE_ , beta_end=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE_ , prediction_type=SCREAMING_SNAKE_CASE_ , sample_max_value=SCREAMING_SNAKE_CASE_ , )
def _lowercase (self ):
"""simple docstring"""
for t in [1, 10, 49]:
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00] ):
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00 ) - 0.1_47_71 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60 ) - 0.3_24_60 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86 ) - 0.0_09_79 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98 ) - 0.02 ) ) < 1e-5
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = 10, 0.0
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.dummy_model()
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter + 0.1
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter - 0.1
SCREAMING_SNAKE_CASE_ = samplea.shape[0]
SCREAMING_SNAKE_CASE_ = torch.stack([samplea, samplea, samplea] , dim=0 )
SCREAMING_SNAKE_CASE_ = torch.arange(SCREAMING_SNAKE_CASE_ )[0:3, None].repeat(1 , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
SCREAMING_SNAKE_CASE_ = scheduler.batch_step_no_noise(SCREAMING_SNAKE_CASE_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1e-2
assert abs(result_mean.item() - 0.49_82 ) < 1e-3
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.full_loop()
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1e-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1e-3
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.full_loop(prediction_type='''v_prediction''' )
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 52.53_02 ) < 1e-2
assert abs(result_mean.item() - 0.06_84 ) < 1e-3
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE_ , beta_start=0.01 )
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1e-2
assert abs(result_mean.item() - 0.19_51 ) < 1e-3
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE_ , beta_start=0.01 )
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1e-2
assert abs(result_mean.item() - 0.19_41 ) < 1e-3
| 628
| 0
|
"""simple docstring"""
from __future__ import annotations
import typing
from collections import Counter
def lowercase__ ( lowerCamelCase ):
_SCREAMING_SNAKE_CASE : typing.Counter[int] = Counter()
for base in range(1, max_perimeter + 1 ):
for perpendicular in range(lowerCamelCase__, max_perimeter + 1 ):
_SCREAMING_SNAKE_CASE : List[str] = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(lowerCamelCase__ ):
_SCREAMING_SNAKE_CASE : List[str] = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def lowercase__ ( lowerCamelCase = 1_000 ):
_SCREAMING_SNAKE_CASE : str = pythagorean_triple(lowerCamelCase__ )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(F'Perimeter {solution()} has maximum solutions')
| 621
|
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class SCREAMING_SNAKE_CASE__ ( snake_case_ ):
"""simple docstring"""
A__ : int = ['''image_processor''', '''tokenizer''']
A__ : List[Any] = '''BlipImageProcessor'''
A__ : int = '''AutoTokenizer'''
def __init__( self , A , A , A ) -> str:
super().__init__(A , A )
# add QFormer tokenizer
A: List[str] = qformer_tokenizer
def __call__( self , A = None , A = None , A = True , A = False , A = None , A = None , A = 0 , A = None , A = None , A = False , A = False , A = False , A = False , A = False , A = True , A = None , **A , ) -> BatchFeature:
if images is None and text is None:
raise ValueError("""You have to specify at least images or text.""" )
A: Dict = BatchFeature()
if text is not None:
A: Tuple = self.tokenizer(
text=A , add_special_tokens=A , padding=A , truncation=A , max_length=A , stride=A , pad_to_multiple_of=A , return_attention_mask=A , return_overflowing_tokens=A , return_special_tokens_mask=A , return_offsets_mapping=A , return_token_type_ids=A , return_length=A , verbose=A , return_tensors=A , **A , )
encoding.update(A )
A: Optional[int] = self.qformer_tokenizer(
text=A , add_special_tokens=A , padding=A , truncation=A , max_length=A , stride=A , pad_to_multiple_of=A , return_attention_mask=A , return_overflowing_tokens=A , return_special_tokens_mask=A , return_offsets_mapping=A , return_token_type_ids=A , return_length=A , verbose=A , return_tensors=A , **A , )
A: Union[str, Any] = qformer_text_encoding.pop("""input_ids""" )
A: Any = qformer_text_encoding.pop("""attention_mask""" )
if images is not None:
A: Union[str, Any] = self.image_processor(A , return_tensors=A )
encoding.update(A )
return encoding
def a__ ( self , *A , **A ) -> Dict:
return self.tokenizer.batch_decode(*A , **A )
def a__ ( self , *A , **A ) -> List[str]:
return self.tokenizer.decode(*A , **A )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def a__ ( self ) -> int:
A: Any = self.tokenizer.model_input_names
A: Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def a__ ( self , A , **A ) -> Optional[int]:
if os.path.isfile(A ):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(A , exist_ok=A )
A: Union[str, Any] = os.path.join(A , """qformer_tokenizer""" )
self.qformer_tokenizer.save_pretrained(A )
return super().save_pretrained(A , **A )
@classmethod
def a__ ( cls , A , **A ) -> List[str]:
A: int = AutoTokenizer.from_pretrained(A , subfolder="""qformer_tokenizer""" )
A: List[str] = cls._get_arguments_from_pretrained(A , **A )
args.append(A )
return cls(*A )
| 135
| 0
|
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model')
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase( A__ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = GPTSwaTokenizer
__magic_name__ = False
__magic_name__ = True
__magic_name__ = False
def UpperCAmelCase ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
A_ : Tuple = GPTSwaTokenizer(__magic_name__ , eos_token='''<unk>''' , bos_token='''<unk>''' , pad_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self , __magic_name__ ):
"""simple docstring"""
A_ : int = '''This is a test'''
A_ : Union[str, Any] = '''This is a test'''
return input_text, output_text
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : List[str] = '''<s>'''
A_ : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ )
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(__magic_name__ ) , 2000 )
def UpperCAmelCase ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 2000 )
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : Any = GPTSwaTokenizer(__magic_name__ )
A_ : Dict = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__magic_name__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , [465, 287, 265, 631, 842] )
A_ : List[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
# fmt: off
self.assertListEqual(
__magic_name__ , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] , )
# fmt: on
A_ : Union[str, Any] = tokenizer.convert_tokens_to_ids(__magic_name__ )
self.assertListEqual(
__magic_name__ , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
A_ : Dict = tokenizer.convert_ids_to_tokens(__magic_name__ )
# fmt: off
self.assertListEqual(
__magic_name__ , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] )
# fmt: on
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : int = GPTSwaTokenizer(__magic_name__ )
A_ : List[str] = ['''This is a test''', '''I was born in 92000, and this is falsé.''']
A_ : List[str] = [
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(__magic_name__ , __magic_name__ ):
self.assertListEqual(tokenizer.encode_fast(__magic_name__ ) , __magic_name__ )
# Test that decode_fast returns the input text
for text, token_ids in zip(__magic_name__ , __magic_name__ ):
self.assertEqual(tokenizer.decode_fast(__magic_name__ ) , __magic_name__ )
@slow
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : str = [
'''<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')''',
'''Hey there, how are you doing this fine day?''',
'''This is a text with a trailing spaces followed by a dot .''',
'''Häj sväjs lillebrör! =)''',
'''Det är inget fel på Mr. Cool''',
]
# fmt: off
A_ : Any = {'''input_ids''': [[6_3423, 5, 6811, 1_4954, 282, 816, 3821, 6_3466, 6_3425, 6_3462, 18, 6_3978, 678, 301, 1320, 6_3423, 6_3455, 6_3458, 18, 6_3982, 4246, 3940, 1901, 4_7789, 5547, 1_8994], [1_9630, 1100, 6_3446, 1342, 633, 544, 4488, 593, 5102, 2416, 6_3495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 5_8593, 2_2413, 9106, 546, 268, 3_3213, 6_3979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5130, 6_3450, 924, 6_3449, 2249, 4062, 1558, 318, 6_3504, 2_1498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 6_3443, 2_6801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__magic_name__ , model_name='''AI-Sweden/gpt-sw3-126m''' , sequences=__magic_name__ , )
| 710
|
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
def a__ ( a , a , a ) -> Any:
A_ : List[Any] = WavaVecaForSequenceClassification.from_pretrained(a , config=a )
A_ : str = downstream_dict['''projector.weight''']
A_ : Dict = downstream_dict['''projector.bias''']
A_ : str = downstream_dict['''model.post_net.linear.weight''']
A_ : Optional[Any] = downstream_dict['''model.post_net.linear.bias''']
return model
def a__ ( a , a , a ) -> Optional[int]:
A_ : List[str] = WavaVecaForAudioFrameClassification.from_pretrained(a , config=a )
A_ : Any = downstream_dict['''model.linear.weight''']
A_ : str = downstream_dict['''model.linear.bias''']
return model
def a__ ( a , a , a ) -> Optional[int]:
A_ : Union[str, Any] = WavaVecaForXVector.from_pretrained(a , config=a )
A_ : Any = downstream_dict['''connector.weight''']
A_ : Dict = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
A_ : Dict = downstream_dict[
f"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
A_ : List[str] = downstream_dict[f"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
A_ : Optional[Any] = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
A_ : str = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
A_ : Tuple = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
A_ : List[Any] = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
A_ : Union[str, Any] = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def a__ ( a , a , a , a ) -> str:
A_ : List[Any] = torch.load(a , map_location='''cpu''' )
A_ : int = checkpoint['''Downstream''']
A_ : Union[str, Any] = WavaVecaConfig.from_pretrained(a )
A_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(
a , return_attention_mask=a , do_normalize=a )
A_ : List[Any] = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
A_ : str = convert_classification(a , a , a )
elif arch.endswith('''ForAudioFrameClassification''' ):
A_ : Tuple = convert_diarization(a , a , a )
elif arch.endswith('''ForXVector''' ):
A_ : Dict = convert_xvector(a , a , a )
else:
raise NotImplementedError(f"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
A_ : List[Any] = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(a )
hf_model.save_pretrained(a )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
_lowerCAmelCase = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 236
| 0
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def A__ ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : str=None ) -> str:
"""simple docstring"""
if attention_mask is None:
_UpperCAmelCase = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE_ , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class __UpperCamelCase :
__A : Optional[int] = OPTConfig
__A : Tuple = {}
__A : List[Any] = """gelu"""
def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=False , _UpperCamelCase=99 , _UpperCamelCase=16 , _UpperCamelCase=2 , _UpperCamelCase=4 , _UpperCamelCase=4 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=20 , _UpperCamelCase=2 , _UpperCamelCase=1 , _UpperCamelCase=0 , _UpperCamelCase=16 , _UpperCamelCase=16 , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = pad_token_id
_UpperCAmelCase = bos_token_id
_UpperCAmelCase = embed_dim
_UpperCAmelCase = word_embed_proj_dim
_UpperCAmelCase = False
def UpperCamelCase( self ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_UpperCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
_UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=_UpperCamelCase , **self.config_updates , )
_UpperCAmelCase = prepare_opt_inputs_dict(_UpperCamelCase , _UpperCamelCase )
return config, inputs_dict
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = TFOPTModel(config=_UpperCamelCase )
_UpperCAmelCase = inputs_dict['''input_ids''']
_UpperCAmelCase = input_ids[:1, :]
_UpperCAmelCase = inputs_dict['''attention_mask'''][:1, :]
_UpperCAmelCase = 1
# first forward pass
_UpperCAmelCase = model(_UpperCamelCase , attention_mask=_UpperCamelCase , use_cache=_UpperCamelCase )
_UpperCAmelCase , _UpperCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_UpperCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
_UpperCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_UpperCAmelCase = model(_UpperCamelCase , attention_mask=_UpperCamelCase )[0]
_UpperCAmelCase = model(_UpperCamelCase , attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_UpperCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx]
_UpperCAmelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_UpperCamelCase , _UpperCamelCase , rtol=1e-3 )
@require_tf
class __UpperCamelCase ( A__ , A__ , unittest.TestCase ):
__A : Union[str, Any] = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
__A : int = (TFOPTForCausalLM,) if is_tf_available() else ()
__A : Any = (
{"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {}
)
__A : Dict = False
__A : Union[str, Any] = False
__A : Any = False
__A : Union[str, Any] = 10
def UpperCamelCase( self ):
_UpperCAmelCase = TFOPTModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=_UpperCamelCase )
def UpperCamelCase( self ):
self.config_tester.run_common_tests()
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(_UpperCamelCase , _UpperCamelCase ):
if hasattr(_UpperCamelCase , '''weight''' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(_UpperCamelCase , '''weight''' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
_UpperCAmelCase = model_class(config=_UpperCamelCase )
_UpperCAmelCase = _get_word_embedding_weight(_UpperCamelCase , model.get_input_embeddings() )
_UpperCAmelCase = _get_word_embedding_weight(_UpperCamelCase , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(_UpperCamelCase )
_UpperCAmelCase = _get_word_embedding_weight(_UpperCamelCase , model.get_input_embeddings() )
_UpperCAmelCase = _get_word_embedding_weight(_UpperCamelCase , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
_UpperCAmelCase = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , _UpperCamelCase )
# check that weights remain the same after resizing
_UpperCAmelCase = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_UpperCAmelCase = False
self.assertTrue(_UpperCamelCase )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , _UpperCamelCase )
_UpperCAmelCase = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_UpperCAmelCase = False
self.assertTrue(_UpperCamelCase )
def A__ ( SCREAMING_SNAKE_CASE_ : List[Any] ) -> str:
"""simple docstring"""
return tf.constant(SCREAMING_SNAKE_CASE_ , dtype=tf.intaa )
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
__A : List[Any] = 99
def UpperCamelCase( self ):
_UpperCAmelCase = tf.ones((4, 1) , dtype=tf.intaa ) * 2
_UpperCAmelCase = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
_UpperCAmelCase = input_ids.shape[0]
_UpperCAmelCase = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
@slow
def UpperCamelCase( self ):
_UpperCAmelCase = TFOPTModel.from_pretrained('''facebook/opt-350m''' )
_UpperCAmelCase = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
_UpperCAmelCase = tf.not_equal(_UpperCamelCase , model.config.pad_token_id )
with tf.GradientTape():
_UpperCAmelCase = model(input_ids=_UpperCamelCase , attention_mask=_UpperCamelCase ).last_hidden_state
_UpperCAmelCase = (1, 11, 512)
self.assertEqual(output.shape , _UpperCamelCase )
_UpperCAmelCase = tf.constant(
[[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , _UpperCamelCase , atol=4e-3 ) )
_UpperCAmelCase = tf.function(_UpperCamelCase , jit_compile=_UpperCamelCase )
_UpperCAmelCase = xla_generate(_UpperCamelCase , _UpperCamelCase )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , _UpperCamelCase , atol=4e-2 ) )
@require_tf
@slow
class __UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase( self ):
super().setUp()
_UpperCAmelCase = '''facebook/opt-350m'''
def UpperCamelCase( self ):
_UpperCAmelCase = TFOPTForCausalLM.from_pretrained(self.path_model )
_UpperCAmelCase = GPTaTokenizer.from_pretrained(self.path_model )
_UpperCAmelCase = [
'''Today is a beautiful day and I want to''',
'''In the city of''',
'''Paris is the capital of France and''',
'''Computers and mobile phones have taken''',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
_UpperCAmelCase = tokenizer(_UpperCamelCase , return_tensors='''tf''' , padding=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
_UpperCAmelCase = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
_UpperCAmelCase = tf.constant(
[
[1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670],
[-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822],
[0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703],
[6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477],
] )
self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-4 ) )
_UpperCAmelCase = tf.function(_UpperCamelCase , jit_compile=_UpperCamelCase )
_UpperCAmelCase = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-4 ) )
@require_tf
@slow
class __UpperCamelCase ( unittest.TestCase ):
@property
def UpperCamelCase( self ):
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def UpperCamelCase( self ):
_UpperCAmelCase = '''facebook/opt-125m'''
_UpperCAmelCase = [
'''Today is a beautiful day and I want to''',
'''In the city of New York, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
_UpperCAmelCase = []
_UpperCAmelCase = GPTaTokenizer.from_pretrained(_UpperCamelCase )
_UpperCAmelCase = TFOPTForCausalLM.from_pretrained(_UpperCamelCase )
for prompt in self.prompts:
_UpperCAmelCase = tokenizer(_UpperCamelCase , return_tensors='''tf''' ).input_ids
_UpperCAmelCase = model.generate(_UpperCamelCase , max_length=10 )
_UpperCAmelCase = tokenizer.batch_decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase )
predicted_outputs += generated_string
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = '''facebook/opt-350m'''
_UpperCAmelCase = GPTaTokenizer.from_pretrained(_UpperCamelCase )
_UpperCAmelCase = TFOPTForCausalLM.from_pretrained(_UpperCamelCase )
_UpperCAmelCase = '''left'''
# use different length sentences to test batching
_UpperCAmelCase = [
'''Hello, my dog is a little''',
'''Today, I''',
]
_UpperCAmelCase = tokenizer(_UpperCamelCase , return_tensors='''tf''' , padding=_UpperCamelCase )
_UpperCAmelCase = inputs['''input_ids''']
_UpperCAmelCase = model.generate(input_ids=_UpperCamelCase , attention_mask=inputs['''attention_mask'''] )
_UpperCAmelCase = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
_UpperCAmelCase = model.generate(input_ids=_UpperCamelCase )
_UpperCAmelCase = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['''attention_mask'''][-1] , tf.intaa ) )
_UpperCAmelCase = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
_UpperCAmelCase = model.generate(input_ids=_UpperCamelCase , max_length=model.config.max_length - num_paddings )
_UpperCAmelCase = tokenizer.batch_decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase )
_UpperCAmelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_UpperCamelCase )
_UpperCAmelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=_UpperCamelCase )
_UpperCAmelCase = [
'''Hello, my dog is a little bit of a dork.\nI\'m a little bit''',
'''Today, I was in the middle of a conversation with a friend about the''',
]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
self.assertListEqual(_UpperCamelCase , [non_padded_sentence, padded_sentence] )
def UpperCamelCase( self ):
_UpperCAmelCase = '''facebook/opt-350m'''
_UpperCAmelCase = [
'''Today is a beautiful day and I want to''',
'''In the city of San Francisco, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
_UpperCAmelCase = []
_UpperCAmelCase = GPTaTokenizer.from_pretrained(_UpperCamelCase )
_UpperCAmelCase = TFOPTForCausalLM.from_pretrained(_UpperCamelCase )
for prompt in self.prompts:
_UpperCAmelCase = tokenizer(_UpperCamelCase , return_tensors='''tf''' ).input_ids
_UpperCAmelCase = model.generate(_UpperCamelCase , max_length=10 )
_UpperCAmelCase = tokenizer.batch_decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase )
predicted_outputs += generated_string
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
| 32
|
UpperCAmelCase_ = {
"A": ".-", "B": "-...", "C": "-.-.", "D": "-..", "E": ".", "F": "..-.", "G": "--.",
"H": "....", "I": "..", "J": ".---", "K": "-.-", "L": ".-..", "M": "--", "N": "-.",
"O": "---", "P": ".--.", "Q": "--.-", "R": ".-.", "S": "...", "T": "-", "U": "..-",
"V": "...-", "W": ".--", "X": "-..-", "Y": "-.--", "Z": "--..", "1": ".----",
"2": "..---", "3": "...--", "4": "....-", "5": ".....", "6": "-....", "7": "--...",
"8": "---..", "9": "----.", "0": "-----", "&": ".-...", "@": ".--.-.",
":": "---...", ",": "--..--", ".": ".-.-.-", "'": ".----.", "\"": ".-..-.",
"?": "..--..", "/": "-..-.", "=": "-...-", "+": ".-.-.", "-": "-....-",
"(": "-.--.", ")": "-.--.-", "!": "-.-.--", " ": "/"
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
UpperCAmelCase_ = {value: key for key, value in MORSE_CODE_DICT.items()}
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
return "".join(REVERSE_DICT[char] for char in message.split() )
def A__ ( ) -> None:
"""simple docstring"""
_UpperCAmelCase = '''Morse code here!'''
print(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = encrypt(SCREAMING_SNAKE_CASE_ )
print(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = decrypt(SCREAMING_SNAKE_CASE_ )
print(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 32
| 1
|
"""simple docstring"""
from __future__ import annotations
from random import random
class UpperCamelCase_ :
def __init__( self : List[Any] , lowerCAmelCase_ : Union[str, Any] = None ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = value
UpperCAmelCase_ : str = random()
UpperCAmelCase_ : Node | None = None
UpperCAmelCase_ : Node | None = None
def __repr__( self : Tuple ) -> int:
from pprint import pformat
if self.left is None and self.right is None:
return f"""\'{self.value}: {self.prior:.5}\'"""
else:
return pformat(
{f"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 )
def __str__( self : int ) -> str:
UpperCAmelCase_ : str = str(self.value ) + """ """
UpperCAmelCase_ : List[Any] = str(self.left or "" )
UpperCAmelCase_ : Optional[Any] = str(self.right or "" )
return value + left + right
def snake_case ( A__ ,A__ ):
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
UpperCAmelCase_ : Union[str, Any] = split(root.left ,_lowerCamelCase )
return left, root
else:
UpperCAmelCase_ : Any = split(root.right ,_lowerCamelCase )
return root, right
def snake_case ( A__ ,A__ ):
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
UpperCAmelCase_ : str = merge(left.right ,_lowerCamelCase )
return left
else:
UpperCAmelCase_ : List[Any] = merge(_lowerCamelCase ,right.left )
return right
def snake_case ( A__ ,A__ ):
UpperCAmelCase_ : Optional[int] = Node(_lowerCamelCase )
UpperCAmelCase_ : Dict = split(_lowerCamelCase ,_lowerCamelCase )
return merge(merge(_lowerCamelCase ,_lowerCamelCase ) ,_lowerCamelCase )
def snake_case ( A__ ,A__ ):
UpperCAmelCase_ : Any = split(_lowerCamelCase ,value - 1 )
UpperCAmelCase_ : List[Any] = split(_lowerCamelCase ,_lowerCamelCase )
return merge(_lowerCamelCase ,_lowerCamelCase )
def snake_case ( A__ ):
if not root: # None
return
else:
inorder(root.left )
print(root.value ,end="," )
inorder(root.right )
def snake_case ( A__ ,A__ ):
for arg in args.split():
if arg[0] == "+":
UpperCAmelCase_ : Optional[int] = insert(_lowerCamelCase ,int(arg[1:] ) )
elif arg[0] == "-":
UpperCAmelCase_ : Any = erase(_lowerCamelCase ,int(arg[1:] ) )
else:
print("Unknown command" )
return root
def snake_case ( ):
UpperCAmelCase_ : Optional[Any] = None
print(
"enter numbers to create a tree, + value to add value into treap, "
"- value to erase all nodes with value. 'q' to quit. " )
UpperCAmelCase_ : Tuple = input()
while args != "q":
UpperCAmelCase_ : Tuple = interact_treap(_lowerCamelCase ,_lowerCamelCase )
print(_lowerCamelCase )
UpperCAmelCase_ : Dict = input()
print("good by!" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 719
|
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def snake_case ( A__ ,A__ ,A__ ,A__ ,A__ ,A__ ):
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
UpperCAmelCase_ : List[Any] = ksize + 1
UpperCAmelCase_ : Optional[Any] = np.zeros((ksize, ksize) ,dtype=np.floataa )
# each value
for y in range(A__ ):
for x in range(A__ ):
# distance from center
UpperCAmelCase_ : Tuple = x - ksize // 2
UpperCAmelCase_ : Any = y - ksize // 2
# degree to radiant
UpperCAmelCase_ : int = theta / 1_80 * np.pi
UpperCAmelCase_ : Optional[int] = np.cos(_theta )
UpperCAmelCase_ : Union[str, Any] = np.sin(_theta )
# get kernel x
UpperCAmelCase_ : Tuple = cos_theta * px + sin_theta * py
# get kernel y
UpperCAmelCase_ : List[str] = -sin_theta * px + cos_theta * py
# fill kernel
UpperCAmelCase_ : Dict = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
lowerCamelCase_ = imread('''../image_data/lena.jpg''')
# turn image in gray scale value
lowerCamelCase_ = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
lowerCamelCase_ = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
lowerCamelCase_ = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
lowerCamelCase_ = out / out.max() * 255
lowerCamelCase_ = out.astype(np.uinta)
imshow('''Original''', gray)
imshow('''Gabor filter with 20x20 mask and 6 directions''', out)
waitKey(0)
| 463
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.