code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
from collections import deque
class __UpperCamelCase :
def __init__( self : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : int ) -> None:
"""simple docstring"""
__lowercase = process_name # process name
__lowercase = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
__lowercase = arrival_time
__lowercase = burst_time # remaining burst time
__lowercase = 0 # total time of the process wait in ready queue
__lowercase = 0 # time from arrival time to completion time
class __UpperCamelCase :
def __init__( self : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : list[int] , _lowerCAmelCase : deque[Process] , _lowerCAmelCase : int , ) -> None:
"""simple docstring"""
__lowercase = number_of_queues
# time slice of queues that round robin algorithm applied
__lowercase = time_slices
# unfinished process is in this ready_queue
__lowercase = queue
# current time
__lowercase = current_time
# finished process is in this sequence queue
__lowercase = deque()
def _a ( self : List[Any] ) -> list[str]:
"""simple docstring"""
__lowercase = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def _a ( self : List[str] , _lowerCAmelCase : list[Process] ) -> list[int]:
"""simple docstring"""
__lowercase = []
for i in range(len(_lowerCAmelCase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def _a ( self : List[str] , _lowerCAmelCase : list[Process] ) -> list[int]:
"""simple docstring"""
__lowercase = []
for i in range(len(_lowerCAmelCase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def _a ( self : Any , _lowerCAmelCase : list[Process] ) -> list[int]:
"""simple docstring"""
__lowercase = []
for i in range(len(_lowerCAmelCase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def _a ( self : Optional[int] , _lowerCAmelCase : deque[Process] ) -> list[int]:
"""simple docstring"""
return [q.burst_time for q in queue]
def _a ( self : Dict , _lowerCAmelCase : Process ) -> int:
"""simple docstring"""
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def _a ( self : Tuple , _lowerCAmelCase : deque[Process] ) -> deque[Process]:
"""simple docstring"""
__lowercase = deque() # sequence deque of finished process
while len(_lowerCAmelCase ) != 0:
__lowercase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_lowerCAmelCase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
__lowercase = 0
# set the process's turnaround time because it is finished
__lowercase = self.current_time - cp.arrival_time
# set the completion time
__lowercase = self.current_time
# add the process to queue that has finished queue
finished.append(_lowerCAmelCase )
self.finish_queue.extend(_lowerCAmelCase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def _a ( self : int , _lowerCAmelCase : deque[Process] , _lowerCAmelCase : int ) -> tuple[deque[Process], deque[Process]]:
"""simple docstring"""
__lowercase = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_lowerCAmelCase ) ):
__lowercase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_lowerCAmelCase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
__lowercase = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_lowerCAmelCase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
__lowercase = 0
# set the finish time
__lowercase = self.current_time
# update the process' turnaround time because it is finished
__lowercase = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_lowerCAmelCase )
self.finish_queue.extend(_lowerCAmelCase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def _a ( self : Any ) -> deque[Process]:
"""simple docstring"""
for i in range(self.number_of_queues - 1 ):
__lowercase , __lowercase = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
__UpperCamelCase : Optional[int] = Process("""P1""", 0, 53)
__UpperCamelCase : List[Any] = Process("""P2""", 0, 17)
__UpperCamelCase : Tuple = Process("""P3""", 0, 68)
__UpperCamelCase : Union[str, Any] = Process("""P4""", 0, 24)
__UpperCamelCase : Union[str, Any] = 3
__UpperCamelCase : Union[str, Any] = [17, 25]
__UpperCamelCase : Dict = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"""queue""": deque([Pa, Pa, Pa, Pa])})
__UpperCamelCase : int = Process("""P1""", 0, 53)
__UpperCamelCase : Union[str, Any] = Process("""P2""", 0, 17)
__UpperCamelCase : Any = Process("""P3""", 0, 68)
__UpperCamelCase : Dict = Process("""P4""", 0, 24)
__UpperCamelCase : int = 3
__UpperCamelCase : Optional[int] = [17, 25]
__UpperCamelCase : Optional[Any] = deque([Pa, Pa, Pa, Pa])
__UpperCamelCase : Dict = MLFQ(number_of_queues, time_slices, queue, 0)
__UpperCamelCase : List[Any] = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'''waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'''completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'''turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print sequence of finished processes
print(
F'''sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'''
)
| 80 | import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class lowercase_ ( unittest.TestCase ):
def UpperCamelCase ( self ):
_snake_case : Any = tempfile.mkdtemp()
# fmt: off
_snake_case : Optional[Any] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
_snake_case : Dict = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
_snake_case : Dict = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
_snake_case : Optional[int] = {"unk_token": "<unk>"}
_snake_case : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_snake_case : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowercase_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowercase_ ) )
_snake_case : Any = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
_snake_case : Optional[Any] = os.path.join(self.tmpdirname , lowercase_ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(lowercase_ , lowercase_ )
def UpperCamelCase ( self , **lowercase_ ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCamelCase ( self , **lowercase_ ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCamelCase ( self , **lowercase_ ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self ):
_snake_case : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_snake_case : Union[str, Any] = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase ( self ):
_snake_case : Tuple = self.get_tokenizer()
_snake_case : Any = self.get_rust_tokenizer()
_snake_case : Optional[Any] = self.get_image_processor()
_snake_case : Any = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
processor_slow.save_pretrained(self.tmpdirname )
_snake_case : Optional[int] = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase_ )
_snake_case : List[Any] = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
processor_fast.save_pretrained(self.tmpdirname )
_snake_case : Optional[Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowercase_ )
self.assertIsInstance(processor_fast.tokenizer , lowercase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowercase_ )
self.assertIsInstance(processor_fast.image_processor , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : List[Any] = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_snake_case : List[Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_snake_case : Optional[Any] = self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0 )
_snake_case : Tuple = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowercase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : Union[str, Any] = self.get_image_processor()
_snake_case : Any = self.get_tokenizer()
_snake_case : int = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : Optional[int] = self.prepare_image_inputs()
_snake_case : Optional[Any] = image_processor(lowercase_ , return_tensors="np" )
_snake_case : str = processor(images=lowercase_ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase ( self ):
_snake_case : Optional[Any] = self.get_image_processor()
_snake_case : Any = self.get_tokenizer()
_snake_case : Dict = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : List[str] = "lower newer"
_snake_case : int = processor(text=lowercase_ )
_snake_case : str = tokenizer(lowercase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase ( self ):
_snake_case : List[Any] = self.get_image_processor()
_snake_case : int = self.get_tokenizer()
_snake_case : Tuple = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : List[Any] = "lower newer"
_snake_case : int = self.prepare_image_inputs()
_snake_case : Dict = processor(text=lowercase_ , images=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowercase_ ):
processor()
def UpperCamelCase ( self ):
_snake_case : Dict = self.get_image_processor()
_snake_case : List[str] = self.get_tokenizer()
_snake_case : Union[str, Any] = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : Optional[int] = self.prepare_image_inputs()
_snake_case : Dict = self.prepare_image_inputs()
_snake_case : List[Any] = processor(images=lowercase_ , visual_prompt=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "conditional_pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowercase_ ):
processor()
def UpperCamelCase ( self ):
_snake_case : Dict = self.get_image_processor()
_snake_case : List[Any] = self.get_tokenizer()
_snake_case : str = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_snake_case : Any = processor.batch_decode(lowercase_ )
_snake_case : Any = tokenizer.batch_decode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ ) | 670 | 0 |
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
_snake_case : Optional[Any] = False
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : int , lowerCamelCase : Optional[Any]=32 ) -> Dict:
set_seed(0 )
__snake_case : int = UNetaDModel(sample_size=lowerCamelCase , in_channels=3 , out_channels=3 )
__snake_case : List[Any] = torch.optim.SGD(model.parameters() , lr=0.00_01 )
return model, optimizer
@slow
def __snake_case ( self : str ) -> List[Any]:
__snake_case : int = "cpu" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
__snake_case : Optional[Any] = DDPMScheduler(
num_train_timesteps=1000 , beta_start=0.00_01 , beta_end=0.02 , beta_schedule="linear" , clip_sample=lowerCamelCase , )
__snake_case : Tuple = DDIMScheduler(
num_train_timesteps=1000 , beta_start=0.00_01 , beta_end=0.02 , beta_schedule="linear" , clip_sample=lowerCamelCase , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
__snake_case : Tuple = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(lowerCamelCase ) for _ in range(4 )]
__snake_case : str = [torch.randn((4, 3, 32, 32) ).to(lowerCamelCase ) for _ in range(4 )]
__snake_case : List[Any] = [torch.randint(0 , 1000 , (4,) ).long().to(lowerCamelCase ) for _ in range(4 )]
# train with a DDPM scheduler
__snake_case , __snake_case : str = self.get_model_optimizer(resolution=32 )
model.train().to(lowerCamelCase )
for i in range(4 ):
optimizer.zero_grad()
__snake_case : Optional[Any] = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
__snake_case : List[str] = model(lowerCamelCase , timesteps[i] ).sample
__snake_case : Tuple = torch.nn.functional.mse_loss(lowerCamelCase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
__snake_case , __snake_case : Optional[int] = self.get_model_optimizer(resolution=32 )
model.train().to(lowerCamelCase )
for i in range(4 ):
optimizer.zero_grad()
__snake_case : str = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
__snake_case : Any = model(lowerCamelCase , timesteps[i] ).sample
__snake_case : Tuple = torch.nn.functional.mse_loss(lowerCamelCase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-5 ) )
self.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-5 ) )
| 81 | from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
__SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__) # pylint: disable=invalid-name
def snake_case (__lowercase ) -> Any:
'''simple docstring'''
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(__lowercase ):
return ext
raise Exception(
F"""Unable to determine file format from file extension {path}. """
F"""Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}""" )
def snake_case (__lowercase ) -> Any:
'''simple docstring'''
_snake_case : int = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
_snake_case : List[Any] = try_infer_format_from_ext(args.input ) if args.format == "infer" else args.format
_snake_case : Optional[int] = PipelineDataFormat.from_str(
format=__lowercase , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(__lowercase , __lowercase )
class lowercase_ ( __snake_case ):
def __init__( self , lowercase_ , lowercase_ ):
_snake_case : str = nlp
_snake_case : str = reader
@staticmethod
def UpperCamelCase ( lowercase_ ):
_snake_case : Dict = parser.add_parser("run" , help="Run a pipeline through the CLI" )
run_parser.add_argument("--task" , choices=get_supported_tasks() , help="Task to run" )
run_parser.add_argument("--input" , type=lowercase_ , help="Path to the file to use for inference" )
run_parser.add_argument("--output" , type=lowercase_ , help="Path to the file that will be used post to write results." )
run_parser.add_argument("--model" , type=lowercase_ , help="Name or path to the model to instantiate." )
run_parser.add_argument("--config" , type=lowercase_ , help="Name or path to the model's config to instantiate." )
run_parser.add_argument(
"--tokenizer" , type=lowercase_ , help="Name of the tokenizer to use. (default: same as the model name)" )
run_parser.add_argument(
"--column" , type=lowercase_ , help="Name of the column to use as input. (For multi columns input as QA use column1,columns2)" , )
run_parser.add_argument(
"--format" , type=lowercase_ , default="infer" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="Input format to read from" , )
run_parser.add_argument(
"--device" , type=lowercase_ , default=-1 , help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" , )
run_parser.add_argument("--overwrite" , action="store_true" , help="Allow overwriting the output file." )
run_parser.set_defaults(func=lowercase_ )
def UpperCamelCase ( self ):
_snake_case ,_snake_case : Tuple = self._nlp, []
for entry in self._reader:
_snake_case : Optional[Any] = nlp(**lowercase_ ) if self._reader.is_multi_columns else nlp(lowercase_ )
if isinstance(lowercase_ , lowercase_ ):
outputs.append(lowercase_ )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
_snake_case : str = self._reader.save_binary(lowercase_ )
logger.warning(f"""Current pipeline requires output to be in binary format, saving at {binary_path}""" )
else:
self._reader.save(lowercase_ ) | 670 | 0 |
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
lowerCamelCase = pd.read_csv("""sample_data.csv""", header=None)
lowerCamelCase = df.shape[:1][0]
# If you're using some other dataset input the target column
lowerCamelCase = df.iloc[:, 1:2]
lowerCamelCase = actual_data.values.reshape(len_data, 1)
lowerCamelCase = MinMaxScaler().fit_transform(actual_data)
lowerCamelCase = 10
lowerCamelCase = 5
lowerCamelCase = 20
lowerCamelCase = len_data - periods * look_back
lowerCamelCase = actual_data[:division]
lowerCamelCase = actual_data[division - look_back :]
lowerCamelCase , lowerCamelCase = [], []
lowerCamelCase , lowerCamelCase = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
lowerCamelCase = np.array(train_x)
lowerCamelCase = np.array(test_x)
lowerCamelCase = np.array([list(i.ravel()) for i in train_y])
lowerCamelCase = np.array([list(i.ravel()) for i in test_y])
lowerCamelCase = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss="""mean_squared_error""", optimizer="""adam""")
lowerCamelCase = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
lowerCamelCase = model.predict(x_test)
| 82 | import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
__SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
class lowercase_ ( __snake_case ):
def __init__( self , lowercase_ ):
super().__init__()
_snake_case : List[str] = nn.ModuleList(lowercase_ )
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = False , lowercase_ = True , ):
for i, (image, scale, controlnet) in enumerate(zip(lowercase_ , lowercase_ , self.nets ) ):
_snake_case ,_snake_case : Optional[int] = controlnet(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , )
# merge samples
if i == 0:
_snake_case ,_snake_case : Tuple = down_samples, mid_sample
else:
_snake_case : Tuple = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowercase_ , lowercase_ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def UpperCamelCase ( self , lowercase_ , lowercase_ = True , lowercase_ = None , lowercase_ = False , lowercase_ = None , ):
_snake_case : Tuple = 0
_snake_case : Dict = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowercase_ , is_main_process=lowercase_ , save_function=lowercase_ , safe_serialization=lowercase_ , variant=lowercase_ , )
idx += 1
_snake_case : int = model_path_to_save + f"""_{idx}"""
@classmethod
def UpperCamelCase ( cls , lowercase_ , **lowercase_ ):
_snake_case : List[str] = 0
_snake_case : Optional[Any] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_snake_case : Optional[Any] = pretrained_model_path
while os.path.isdir(lowercase_ ):
_snake_case : int = ControlNetModel.from_pretrained(lowercase_ , **lowercase_ )
controlnets.append(lowercase_ )
idx += 1
_snake_case : str = pretrained_model_path + f"""_{idx}"""
logger.info(f"""{len(lowercase_ )} controlnets loaded from {pretrained_model_path}.""" )
if len(lowercase_ ) == 0:
raise ValueError(
f"""No ControlNets found under {os.path.dirname(lowercase_ )}. Expected at least {pretrained_model_path + '_0'}.""" )
return cls(lowercase_ ) | 670 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __snake_case ( _lowercase , unittest.TestCase):
snake_case__ : List[str] = CTRLTokenizer
snake_case__ : List[str] = False
snake_case__ : Any = False
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCamelCase : str = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
_lowerCamelCase : Dict = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
_lowerCamelCase : Optional[Any] = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
_lowerCamelCase : Any = {'''unk_token''': '''<unk>'''}
_lowerCamelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCamelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , **__lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Dict = '''adapt react readapt apt'''
_lowerCamelCase : Dict = '''adapt react readapt apt'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : str = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCamelCase : List[Any] = '''adapt react readapt apt'''
_lowerCamelCase : Union[str, Any] = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
_lowerCamelCase : Union[str, Any] = tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Optional[Any] = tokens + [tokenizer.unk_token]
_lowerCamelCase : Any = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
| 83 | import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase_ ( __snake_case ):
_lowerCamelCase = ['image_processor', 'tokenizer']
_lowerCamelCase = 'CLIPImageProcessor'
_lowerCamelCase = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self , lowercase_=None , lowercase_=None , **lowercase_ ):
_snake_case : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowercase_ , )
_snake_case : Dict = kwargs.pop("feature_extractor" )
_snake_case : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowercase_ , lowercase_ )
def __call__( self , lowercase_=None , lowercase_=None , lowercase_=None , **lowercase_ ):
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
_snake_case : str = self.tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if images is not None:
_snake_case : List[str] = self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if text is not None and images is not None:
_snake_case : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase_ ) , tensor_type=lowercase_ )
def UpperCamelCase ( self , *lowercase_ , **lowercase_ ):
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def UpperCamelCase ( self , *lowercase_ , **lowercase_ ):
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def UpperCamelCase ( self ):
_snake_case : Any = self.tokenizer.model_input_names
_snake_case : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 670 | 0 |
from __future__ import annotations
class A_ :
'''simple docstring'''
def __init__( self , snake_case ):
lowercase = order
# a_{0} ... a_{k}
lowercase = [1.0] + [0.0] * order
# b_{0} ... b_{k}
lowercase = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
lowercase = [0.0] * self.order
# y[n-1] ... y[n-k]
lowercase = [0.0] * self.order
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
if len(snake_case ) < self.order:
lowercase = [1.0, *a_coeffs]
if len(snake_case ) != self.order + 1:
lowercase = (
F'''Expected a_coeffs to have {self.order + 1} elements '''
F'''for {self.order}-order filter, got {len(snake_case )}'''
)
raise ValueError(snake_case )
if len(snake_case ) != self.order + 1:
lowercase = (
F'''Expected b_coeffs to have {self.order + 1} elements '''
F'''for {self.order}-order filter, got {len(snake_case )}'''
)
raise ValueError(snake_case )
lowercase = a_coeffs
lowercase = b_coeffs
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
lowercase = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
lowercase = self.input_history[:-1]
lowercase = self.output_history[:-1]
lowercase = sample
lowercase = result
return result
| 84 | from __future__ import annotations
def snake_case (__lowercase , __lowercase , __lowercase ) -> dict[str, float]:
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance < 0:
raise ValueError("Resistance cannot be negative" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod() | 670 | 0 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"linear": PIL.Image.Resampling.BILINEAR,
"bilinear": PIL.Image.Resampling.BILINEAR,
"bicubic": PIL.Image.Resampling.BICUBIC,
"lanczos": PIL.Image.Resampling.LANCZOS,
"nearest": PIL.Image.Resampling.NEAREST,
}
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"linear": PIL.Image.LINEAR,
"bilinear": PIL.Image.BILINEAR,
"bicubic": PIL.Image.BICUBIC,
"lanczos": PIL.Image.LANCZOS,
"nearest": PIL.Image.NEAREST,
}
def _a ( lowercase__ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = (images / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ : List[Any] = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = numpy_to_pil(lowercase__ )
return images
def _a ( lowercase__ : Optional[Any] ):
'''simple docstring'''
if images.ndim == 3:
SCREAMING_SNAKE_CASE__ : Dict = images[None, ...]
SCREAMING_SNAKE_CASE__ : List[str] = (images * 2_55).round().astype('uint8' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
SCREAMING_SNAKE_CASE__ : List[Any] = [Image.fromarray(image.squeeze() , mode='L' ) for image in images]
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [Image.fromarray(lowercase__ ) for image in images]
return pil_images
| 85 | import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def snake_case (*__lowercase ) -> Dict:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
_snake_case : Dict = list(__lowercase )
for i in range(len(__lowercase ) ):
_snake_case : List[str] = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def snake_case (__lowercase ) -> bool:
'''simple docstring'''
_snake_case : str = [
"CUDA out of memory.", # CUDA OOM
"cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.", # CUDNN SNAFU
"DefaultCPUAllocator: can't allocate memory", # CPU OOM
]
if isinstance(__lowercase , __lowercase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def snake_case (__lowercase = None , __lowercase = 128 ) -> Any:
'''simple docstring'''
if function is None:
return functools.partial(__lowercase , starting_batch_size=__lowercase )
_snake_case : List[str] = starting_batch_size
def decorator(*__lowercase , **__lowercase ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
_snake_case : Optional[Any] = list(inspect.signature(__lowercase ).parameters.keys() )
# Guard against user error
if len(__lowercase ) < (len(__lowercase ) + 1):
_snake_case : str = ", ".join([F"""{arg}={value}""" for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F"""Batch size was passed into `{function.__name__}` as the first argument when called."""
F"""Remove this as the decorator already does so: `{function.__name__}({arg_str})`""" )
while True:
if batch_size == 0:
raise RuntimeError("No executable batch size found, reached zero." )
try:
return function(__lowercase , *__lowercase , **__lowercase )
except Exception as e:
if should_reduce_batch_size(__lowercase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator | 670 | 0 |
def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : int ):
"""simple docstring"""
return abs(__UpperCamelCase ) if a == 0 else greatest_common_divisor(b % a ,__UpperCamelCase )
def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : int ):
"""simple docstring"""
while y: # --> when y=0 then loop will terminate and return x as final GCD.
A_ , A_ = y, x % y
return abs(__UpperCamelCase )
def __snake_case ( ):
"""simple docstring"""
try:
A_ = input("Enter two integers separated by comma (,): " ).split("," )
A_ = int(nums[0] )
A_ = int(nums[1] )
print(
f'''greatest_common_divisor({num_a}, {num_a}) = '''
f'''{greatest_common_divisor(__UpperCamelCase ,__UpperCamelCase )}''' )
print(f'''By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(__UpperCamelCase ,__UpperCamelCase )}''' )
except (IndexError, UnboundLocalError, ValueError):
print("Wrong input" )
if __name__ == "__main__":
main() | 86 | __SCREAMING_SNAKE_CASE : Union[str, Any] = {
'a': 'AAAAA',
'b': 'AAAAB',
'c': 'AAABA',
'd': 'AAABB',
'e': 'AABAA',
'f': 'AABAB',
'g': 'AABBA',
'h': 'AABBB',
'i': 'ABAAA',
'j': 'BBBAA',
'k': 'ABAAB',
'l': 'ABABA',
'm': 'ABABB',
'n': 'ABBAA',
'o': 'ABBAB',
'p': 'ABBBA',
'q': 'ABBBB',
'r': 'BAAAA',
's': 'BAAAB',
't': 'BAABA',
'u': 'BAABB',
'v': 'BBBAB',
'w': 'BABAA',
'x': 'BABAB',
'y': 'BABBA',
'z': 'BABBB',
' ': ' ',
}
__SCREAMING_SNAKE_CASE : int = {value: key for key, value in encode_dict.items()}
def snake_case (__lowercase ) -> str:
'''simple docstring'''
_snake_case : Any = ""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("encode() accepts only letters of the alphabet and spaces" )
return encoded
def snake_case (__lowercase ) -> str:
'''simple docstring'''
if set(__lowercase ) - {"A", "B", " "} != set():
raise Exception("decode() accepts only 'A', 'B' and spaces" )
_snake_case : str = ""
for word in coded.split():
while len(__lowercase ) != 0:
decoded += decode_dict[word[:5]]
_snake_case : int = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod() | 670 | 0 |
import math
def SCREAMING_SNAKE_CASE ( lowercase_ = 100 ) -> int:
"""simple docstring"""
A__ = sum(i * i for i in range(1 , n + 1 ) )
A__ = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 87 | import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowercase_ ( unittest.TestCase ):
def UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def UpperCamelCase ( self ):
_snake_case ,_snake_case : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-2" , revision="bf16" , dtype=jnp.bfloataa , )
_snake_case : List[Any] = "A painting of a squirrel eating a burger"
_snake_case : Union[str, Any] = jax.device_count()
_snake_case : List[Any] = num_samples * [prompt]
_snake_case : Tuple = sd_pipe.prepare_inputs(lowercase_ )
_snake_case : str = replicate(lowercase_ )
_snake_case : Dict = shard(lowercase_ )
_snake_case : List[Any] = jax.random.PRNGKey(0 )
_snake_case : List[Any] = jax.random.split(lowercase_ , jax.device_count() )
_snake_case : Tuple = sd_pipe(lowercase_ , lowercase_ , lowercase_ , num_inference_steps=25 , jit=lowercase_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
_snake_case : List[Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_snake_case : str = images[0, 253:256, 253:256, -1]
_snake_case : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_snake_case : Optional[Any] = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def UpperCamelCase ( self ):
_snake_case : Optional[Any] = "stabilityai/stable-diffusion-2"
_snake_case ,_snake_case : List[Any] = FlaxDPMSolverMultistepScheduler.from_pretrained(lowercase_ , subfolder="scheduler" )
_snake_case ,_snake_case : int = FlaxStableDiffusionPipeline.from_pretrained(
lowercase_ , scheduler=lowercase_ , revision="bf16" , dtype=jnp.bfloataa , )
_snake_case : str = scheduler_params
_snake_case : Dict = "A painting of a squirrel eating a burger"
_snake_case : Dict = jax.device_count()
_snake_case : Optional[int] = num_samples * [prompt]
_snake_case : List[str] = sd_pipe.prepare_inputs(lowercase_ )
_snake_case : Optional[int] = replicate(lowercase_ )
_snake_case : Union[str, Any] = shard(lowercase_ )
_snake_case : List[Any] = jax.random.PRNGKey(0 )
_snake_case : Union[str, Any] = jax.random.split(lowercase_ , jax.device_count() )
_snake_case : str = sd_pipe(lowercase_ , lowercase_ , lowercase_ , num_inference_steps=25 , jit=lowercase_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
_snake_case : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_snake_case : List[str] = images[0, 253:256, 253:256, -1]
_snake_case : Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_snake_case : Dict = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 | 670 | 0 |
"""simple docstring"""
import math
def _snake_case ( __snake_case : float , __snake_case : float ):
"""simple docstring"""
if (
not isinstance(__snake_case , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * power_factor
def _snake_case ( __snake_case : float , __snake_case : float ):
"""simple docstring"""
if (
not isinstance(__snake_case , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 | from manim import *
class lowercase_ ( __snake_case ):
def UpperCamelCase ( self ):
_snake_case : Tuple = Rectangle(height=0.5 , width=0.5 )
_snake_case : List[str] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_snake_case : List[str] = [mem.copy() for i in range(6 )]
_snake_case : Any = [mem.copy() for i in range(6 )]
_snake_case : Any = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : Any = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : str = VGroup(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : int = Text("CPU" , font_size=24 )
_snake_case : str = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowercase_ )
_snake_case : int = [mem.copy() for i in range(4 )]
_snake_case : Dict = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : str = Text("GPU" , font_size=24 )
_snake_case : Optional[int] = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
gpu.move_to([-1, -1, 0] )
self.add(lowercase_ )
_snake_case : Any = [mem.copy() for i in range(6 )]
_snake_case : Any = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : Dict = Text("Model" , font_size=24 )
_snake_case : Dict = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
model.move_to([3, -1.0, 0] )
self.add(lowercase_ )
_snake_case : str = []
for i, rect in enumerate(lowercase_ ):
rect.set_stroke(lowercase_ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_snake_case : Union[str, Any] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowercase_ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowercase_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowercase_ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase_ , buff=0.0 )
self.add(lowercase_ )
cpu_targs.append(lowercase_ )
_snake_case : List[Any] = [mem.copy() for i in range(6 )]
_snake_case : Union[str, Any] = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : Optional[Any] = Text("Loaded Checkpoint" , font_size=24 )
_snake_case : Union[str, Any] = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , aligned_edge=lowercase_ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
_snake_case : Optional[int] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_snake_case : Optional[Any] = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowercase_ , lowercase_ )
_snake_case : Union[str, Any] = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(lowercase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
_snake_case : List[Any] = MarkupText(
f"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowercase_ ) , Write(lowercase_ ) )
self.play(Write(lowercase_ , run_time=1 ) , Create(lowercase_ , run_time=1 ) )
_snake_case : int = []
_snake_case : str = []
for i, rect in enumerate(lowercase_ ):
_snake_case : Dict = fill.copy().set_fill(lowercase_ , opacity=0.7 )
target.move_to(lowercase_ )
first_animations.append(GrowFromCenter(lowercase_ , run_time=1 ) )
_snake_case : Dict = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(lowercase_ , run_time=1.5 ) )
self.play(*lowercase_ )
self.play(*lowercase_ )
self.wait() | 670 | 0 |
from collections import defaultdict
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> bool:
_lowercase : Any = first_str.lower().strip()
_lowercase : List[str] = second_str.lower().strip()
# Remove whitespace
_lowercase : str = first_str.replace(' ' , '' )
_lowercase : List[Any] = second_str.replace(' ' , '' )
# Strings of different lengths are not anagrams
if len(lowerCamelCase_ ) != len(lowerCamelCase_ ):
return False
# Default values for count should be 0
_lowercase : defaultdict[str, int] = defaultdict(lowerCamelCase_ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(lowerCamelCase_ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
SCREAMING_SNAKE_CASE : List[Any] = input("Enter the first string ").strip()
SCREAMING_SNAKE_CASE : Any = input("Enter the second string ").strip()
SCREAMING_SNAKE_CASE : int = check_anagrams(input_a, input_b)
print(F"{input_a} and {input_b} are {'' if status else 'not '}anagrams.")
| 89 | import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
class lowercase_ ( __snake_case ):
_lowerCamelCase = 'linear'
_lowerCamelCase = 'cosine'
_lowerCamelCase = 'cosine_with_restarts'
_lowerCamelCase = 'polynomial'
_lowerCamelCase = 'constant'
_lowerCamelCase = 'constant_with_warmup'
_lowerCamelCase = 'piecewise_constant'
def snake_case (__lowercase , __lowercase = -1 ) -> List[Any]:
'''simple docstring'''
return LambdaLR(__lowercase , lambda __lowercase : 1 , last_epoch=__lowercase )
def snake_case (__lowercase , __lowercase , __lowercase = -1 ) -> List[str]:
'''simple docstring'''
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1.0 , __lowercase ) )
return 1.0
return LambdaLR(__lowercase , __lowercase , last_epoch=__lowercase )
def snake_case (__lowercase , __lowercase , __lowercase = -1 ) -> Optional[int]:
'''simple docstring'''
_snake_case : Optional[Any] = {}
_snake_case : Optional[int] = step_rules.split("," )
for rule_str in rule_list[:-1]:
_snake_case ,_snake_case : str = rule_str.split(":" )
_snake_case : Dict = int(__lowercase )
_snake_case : List[str] = float(__lowercase )
_snake_case : Tuple = value
_snake_case : str = float(rule_list[-1] )
def create_rules_function(__lowercase , __lowercase ):
def rule_func(__lowercase ) -> float:
_snake_case : List[str] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__lowercase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
_snake_case : int = create_rules_function(__lowercase , __lowercase )
return LambdaLR(__lowercase , __lowercase , last_epoch=__lowercase )
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase=-1 ) -> List[str]:
'''simple docstring'''
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1 , __lowercase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__lowercase , __lowercase , __lowercase )
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase = 0.5 , __lowercase = -1 ) -> Dict:
'''simple docstring'''
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1 , __lowercase ) )
_snake_case : Optional[int] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__lowercase ) * 2.0 * progress )) )
return LambdaLR(__lowercase , __lowercase , __lowercase )
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase = 1 , __lowercase = -1 ) -> Optional[int]:
'''simple docstring'''
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1 , __lowercase ) )
_snake_case : Any = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__lowercase ) * progress) % 1.0) )) )
return LambdaLR(__lowercase , __lowercase , __lowercase )
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase=1e-7 , __lowercase=1.0 , __lowercase=-1 ) -> List[Any]:
'''simple docstring'''
_snake_case : List[Any] = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1 , __lowercase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
_snake_case : Tuple = lr_init - lr_end
_snake_case : Any = num_training_steps - num_warmup_steps
_snake_case : Optional[int] = 1 - (current_step - num_warmup_steps) / decay_steps
_snake_case : Optional[Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__lowercase , __lowercase , __lowercase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def snake_case (__lowercase , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = 1 , __lowercase = 1.0 , __lowercase = -1 , ) -> List[Any]:
'''simple docstring'''
_snake_case : Any = SchedulerType(__lowercase )
_snake_case : Union[str, Any] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__lowercase , last_epoch=__lowercase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__lowercase , step_rules=__lowercase , last_epoch=__lowercase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__lowercase , num_warmup_steps=__lowercase , last_epoch=__lowercase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__lowercase , num_warmup_steps=__lowercase , num_training_steps=__lowercase , num_cycles=__lowercase , last_epoch=__lowercase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__lowercase , num_warmup_steps=__lowercase , num_training_steps=__lowercase , power=__lowercase , last_epoch=__lowercase , )
return schedule_func(
__lowercase , num_warmup_steps=__lowercase , num_training_steps=__lowercase , last_epoch=__lowercase ) | 670 | 0 |
'''simple docstring'''
import collections
import importlib.util
import os
import re
from pathlib import Path
__UpperCAmelCase = '''src/transformers'''
# Matches is_xxx_available()
__UpperCAmelCase = re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
__UpperCAmelCase = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__UpperCAmelCase = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
__UpperCAmelCase = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
__UpperCAmelCase = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__UpperCAmelCase = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
__UpperCAmelCase = re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
__UpperCAmelCase = re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
__UpperCAmelCase = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
__UpperCAmelCase = re.compile(R'''^\s*try:''')
# Catches a line with else:
__UpperCAmelCase = re.compile(R'''^\s*else:''')
def _snake_case ( A ) -> List[str]:
if _re_test_backend.search(A ) is None:
return None
lowerCAmelCase__ = [b[0] for b in _re_backend.findall(A )]
backends.sort()
return "_and_".join(A )
def _snake_case ( A ) -> Optional[int]:
with open(A , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCAmelCase__ = f.readlines()
lowerCAmelCase__ = 0
while line_index < len(A ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(A ):
return None
# First grab the objects without a specific backend in _import_structure
lowerCAmelCase__ = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
lowerCAmelCase__ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(A ):
lowerCAmelCase__ = _re_one_line_import_struct.search(A ).groups()[0]
lowerCAmelCase__ = re.findall('''\[([^\]]+)\]''' , A )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
lowerCAmelCase__ = _re_import_struct_key_value.search(A )
if single_line_import_search is not None:
lowerCAmelCase__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(A ) > 0]
objects.extend(A )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
lowerCAmelCase__ = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowerCAmelCase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
lowerCAmelCase__ = lines[line_index]
if _re_import_struct_add_one.search(A ) is not None:
objects.append(_re_import_struct_add_one.search(A ).groups()[0] )
elif _re_import_struct_add_many.search(A ) is not None:
lowerCAmelCase__ = _re_import_struct_add_many.search(A ).groups()[0].split(''', ''' )
lowerCAmelCase__ = [obj[1:-1] for obj in imports if len(A ) > 0]
objects.extend(A )
elif _re_between_brackets.search(A ) is not None:
lowerCAmelCase__ = _re_between_brackets.search(A ).groups()[0].split(''', ''' )
lowerCAmelCase__ = [obj[1:-1] for obj in imports if len(A ) > 0]
objects.extend(A )
elif _re_quote_object.search(A ) is not None:
objects.append(_re_quote_object.search(A ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
lowerCAmelCase__ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowerCAmelCase__ = []
while (
line_index < len(A )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
lowerCAmelCase__ = lines[line_index]
lowerCAmelCase__ = _re_import.search(A )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowerCAmelCase__ = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(A ):
# If the line is an if is_backend_available, we grab all objects associated.
lowerCAmelCase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowerCAmelCase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowerCAmelCase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
lowerCAmelCase__ = lines[line_index]
lowerCAmelCase__ = _re_import.search(A )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowerCAmelCase__ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _snake_case ( A , A ) -> str:
def find_duplicates(A ):
return [k for k, v in collections.Counter(A ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowerCAmelCase__ = []
for key in import_dict_objects.keys():
lowerCAmelCase__ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
lowerCAmelCase__ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowerCAmelCase__ = '''base imports''' if key == '''none''' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def _snake_case ( ) -> int:
lowerCAmelCase__ = []
for root, _, files in os.walk(A ):
if "__init__.py" in files:
lowerCAmelCase__ = os.path.join(A , '''__init__.py''' )
lowerCAmelCase__ = parse_init(A )
if objects is not None:
lowerCAmelCase__ = analyze_results(*A )
if len(A ) > 0:
lowerCAmelCase__ = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('''\n'''.join(A ) )
if len(A ) > 0:
raise ValueError('''\n\n'''.join(A ) )
def _snake_case ( ) -> str:
lowerCAmelCase__ = []
for path, directories, files in os.walk(A ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(A )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(A ) / folder).glob('''*.py''' ) ) ) == 0:
continue
lowerCAmelCase__ = str((Path(A ) / folder).relative_to(A ) )
lowerCAmelCase__ = short_path.replace(os.path.sep , '''.''' )
submodules.append(A )
for fname in files:
if fname == "__init__.py":
continue
lowerCAmelCase__ = str((Path(A ) / fname).relative_to(A ) )
lowerCAmelCase__ = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(A )
return submodules
__UpperCAmelCase = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def _snake_case ( ) -> str:
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase__ = importlib.util.spec_from_file_location(
'''transformers''' , os.path.join(A , '''__init__.py''' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
lowerCAmelCase__ = spec.loader.load_module()
lowerCAmelCase__ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(A ) > 0:
lowerCAmelCase__ = '''\n'''.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registered in the main init of Transformers:\n'''
F"""{list_of_modules}\n"""
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules() | 90 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : int = {
'weiweishi/roc-bert-base-zh': 'https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json',
}
class lowercase_ ( __snake_case ):
_lowerCamelCase = 'roc_bert'
def __init__( self , lowercase_=30_522 , lowercase_=768 , lowercase_=12 , lowercase_=12 , lowercase_=3_072 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=512 , lowercase_=2 , lowercase_=0.02 , lowercase_=1e-12 , lowercase_=True , lowercase_=0 , lowercase_="absolute" , lowercase_=None , lowercase_=True , lowercase_=True , lowercase_=768 , lowercase_=910 , lowercase_=512 , lowercase_=24_858 , lowercase_=True , **lowercase_ , ):
_snake_case : int = vocab_size
_snake_case : Union[str, Any] = max_position_embeddings
_snake_case : Union[str, Any] = hidden_size
_snake_case : Dict = num_hidden_layers
_snake_case : Any = num_attention_heads
_snake_case : Dict = intermediate_size
_snake_case : List[Any] = hidden_act
_snake_case : Optional[int] = hidden_dropout_prob
_snake_case : Union[str, Any] = attention_probs_dropout_prob
_snake_case : Union[str, Any] = initializer_range
_snake_case : List[Any] = type_vocab_size
_snake_case : int = layer_norm_eps
_snake_case : Optional[Any] = use_cache
_snake_case : List[Any] = enable_pronunciation
_snake_case : Dict = enable_shape
_snake_case : Dict = pronunciation_embed_dim
_snake_case : Tuple = pronunciation_vocab_size
_snake_case : Tuple = shape_embed_dim
_snake_case : List[str] = shape_vocab_size
_snake_case : Dict = concat_input
_snake_case : int = position_embedding_type
_snake_case : int = classifier_dropout
super().__init__(pad_token_id=lowercase_ , **lowercase_ ) | 670 | 0 |
"""simple docstring"""
import socket
def _snake_case ( ):
A = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
A = socket.gethostname()
A = 1_2312
sock.connect((host, port) )
sock.send(B'Hello server!' )
with open('Received_file' , 'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
A = sock.recv(1024 )
if not data:
break
out_file.write(snake_case__ )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main() | 91 | from cva import destroyAllWindows, imread, imshow, waitKey
def snake_case (__lowercase ) -> Tuple:
'''simple docstring'''
_snake_case ,_snake_case : int = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(__lowercase ):
for j in range(__lowercase ):
_snake_case : Optional[Any] = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
__SCREAMING_SNAKE_CASE : Optional[Any] = imread('image_data/lena.jpg', 1)
# convert to its negative
__SCREAMING_SNAKE_CASE : Tuple = convert_to_negative(img)
# show result image
imshow('negative of original image', img)
waitKey(0)
destroyAllWindows() | 670 | 0 |
'''simple docstring'''
import math
UpperCamelCase_ = 10
UpperCamelCase_ = 7
UpperCamelCase_ = BALLS_PER_COLOUR * NUM_COLOURS
def _lowerCAmelCase ( __magic_name__ : int = 20 ) -> str:
lowercase : Any =math.comb(__magic_name__ , __magic_name__ )
lowercase : Any =math.comb(NUM_BALLS - BALLS_PER_COLOUR , __magic_name__ )
lowercase : Optional[Any] =NUM_COLOURS * (1 - missing_colour / total)
return f'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 92 | import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
__SCREAMING_SNAKE_CASE : List[str] = Mapping[str, np.ndarray]
__SCREAMING_SNAKE_CASE : List[Any] = Mapping[str, Any] # Is a nested dict.
__SCREAMING_SNAKE_CASE : List[Any] = 0.01
@dataclasses.dataclass(frozen=__snake_case )
class lowercase_ :
_lowerCamelCase = 42 # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
_lowerCamelCase = 42 # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
_lowerCamelCase = 42 # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
_lowerCamelCase = 42 # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
_lowerCamelCase = 42 # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
_lowerCamelCase = None
# Optional remark about the protein. Included as a comment in output PDB
# files
_lowerCamelCase = None
# Templates used to generate this protein (prediction-only)
_lowerCamelCase = None
# Chain corresponding to each parent
_lowerCamelCase = None
def snake_case (__lowercase ) -> Protein:
'''simple docstring'''
_snake_case : str = r"(\[[A-Z]+\]\n)"
_snake_case : List[str] = [tag.strip() for tag in re.split(__lowercase , __lowercase ) if len(__lowercase ) > 0]
_snake_case : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split("\n" ) for l in tags[1::2]] )
_snake_case : List[str] = ["N", "CA", "C"]
_snake_case : Any = None
_snake_case : Union[str, Any] = None
_snake_case : Optional[int] = None
for g in groups:
if "[PRIMARY]" == g[0]:
_snake_case : Tuple = g[1][0].strip()
for i in range(len(__lowercase ) ):
if seq[i] not in residue_constants.restypes:
_snake_case : Tuple = "X" # FIXME: strings are immutable
_snake_case : int = np.array(
[residue_constants.restype_order.get(__lowercase , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
_snake_case : List[List[float]] = []
for axis in range(3 ):
tertiary.append(list(map(__lowercase , g[1][axis].split() ) ) )
_snake_case : Dict = np.array(__lowercase )
_snake_case : Dict = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(__lowercase ):
_snake_case : List[Any] = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
_snake_case : int = np.array(list(map({"-": 0, "+": 1}.get , g[1][0].strip() ) ) )
_snake_case : Any = np.zeros(
(
len(__lowercase ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(__lowercase ):
_snake_case : Dict = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=__lowercase , atom_mask=__lowercase , aatype=__lowercase , residue_index=np.arange(len(__lowercase ) ) , b_factors=__lowercase , )
def snake_case (__lowercase , __lowercase = 0 ) -> List[str]:
'''simple docstring'''
_snake_case : List[str] = []
_snake_case : Optional[Any] = prot.remark
if remark is not None:
pdb_headers.append(F"""REMARK {remark}""" )
_snake_case : str = prot.parents
_snake_case : str = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
_snake_case : int = [p for i, p in zip(__lowercase , __lowercase ) if i == chain_id]
if parents is None or len(__lowercase ) == 0:
_snake_case : Optional[int] = ["N/A"]
pdb_headers.append(F"""PARENT {' '.join(__lowercase )}""" )
return pdb_headers
def snake_case (__lowercase , __lowercase ) -> str:
'''simple docstring'''
_snake_case : List[str] = []
_snake_case : Optional[int] = pdb_str.split("\n" )
_snake_case : List[str] = prot.remark
if remark is not None:
out_pdb_lines.append(F"""REMARK {remark}""" )
_snake_case : List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
_snake_case : str = []
if prot.parents_chain_index is not None:
_snake_case : Dict[str, List[str]] = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(__lowercase ) , [] )
parent_dict[str(__lowercase )].append(__lowercase )
_snake_case : Any = max([int(__lowercase ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
_snake_case : Tuple = parent_dict.get(str(__lowercase ) , ["N/A"] )
parents_per_chain.append(__lowercase )
else:
parents_per_chain.append(list(prot.parents ) )
else:
_snake_case : List[str] = [["N/A"]]
def make_parent_line(__lowercase ) -> str:
return F"""PARENT {' '.join(__lowercase )}"""
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
_snake_case : int = 0
for i, l in enumerate(__lowercase ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(__lowercase )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(__lowercase ):
_snake_case : Tuple = parents_per_chain[chain_counter]
else:
_snake_case : str = ["N/A"]
out_pdb_lines.append(make_parent_line(__lowercase ) )
return "\n".join(__lowercase )
def snake_case (__lowercase ) -> str:
'''simple docstring'''
_snake_case : Optional[Any] = residue_constants.restypes + ["X"]
def res_atoa(__lowercase ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , "UNK" )
_snake_case : Optional[int] = residue_constants.atom_types
_snake_case : List[str] = []
_snake_case : Tuple = prot.atom_mask
_snake_case : List[str] = prot.aatype
_snake_case : int = prot.atom_positions
_snake_case : int = prot.residue_index.astype(np.intaa )
_snake_case : List[Any] = prot.b_factors
_snake_case : str = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("Invalid aatypes." )
_snake_case : Union[str, Any] = get_pdb_headers(__lowercase )
if len(__lowercase ) > 0:
pdb_lines.extend(__lowercase )
_snake_case : Optional[Any] = aatype.shape[0]
_snake_case : str = 1
_snake_case : Tuple = 0
_snake_case : int = string.ascii_uppercase
_snake_case : Optional[Any] = None
# Add all atom sites.
for i in range(__lowercase ):
_snake_case : Dict = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(__lowercase , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
_snake_case : List[Any] = "ATOM"
_snake_case : Union[str, Any] = atom_name if len(__lowercase ) == 4 else F""" {atom_name}"""
_snake_case : str = ""
_snake_case : str = ""
_snake_case : Any = 1.00
_snake_case : str = atom_name[0] # Protein supports only C, N, O, S, this works.
_snake_case : Dict = ""
_snake_case : Any = "A"
if chain_index is not None:
_snake_case : List[Any] = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
_snake_case : Optional[int] = (
F"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"""
F"""{res_name_a:>3} {chain_tag:>1}"""
F"""{residue_index[i]:>4}{insertion_code:>1} """
F"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"""
F"""{occupancy:>6.2f}{b_factor:>6.2f} """
F"""{element:>2}{charge:>2}"""
)
pdb_lines.append(__lowercase )
atom_index += 1
_snake_case : Dict = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
_snake_case : Optional[int] = True
_snake_case : Union[str, Any] = chain_index[i + 1]
if should_terminate:
# Close the chain.
_snake_case : List[str] = "TER"
_snake_case : str = (
F"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"""
)
pdb_lines.append(__lowercase )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(__lowercase , __lowercase ) )
pdb_lines.append("END" )
pdb_lines.append("" )
return "\n".join(__lowercase )
def snake_case (__lowercase ) -> np.ndarray:
'''simple docstring'''
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def snake_case (__lowercase , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , ) -> Protein:
'''simple docstring'''
return Protein(
aatype=features["aatype"] , atom_positions=result["final_atom_positions"] , atom_mask=result["final_atom_mask"] , residue_index=features["residue_index"] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["final_atom_mask"] ) , chain_index=__lowercase , remark=__lowercase , parents=__lowercase , parents_chain_index=__lowercase , ) | 670 | 0 |
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
__A = logging.get_logger(__name__)
@add_end_docstrings(a )
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
requires_backends(self , 'vision' )
self.check_model_type(__UpperCAmelCase )
def __call__( self , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return super().__call__(__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , **__UpperCAmelCase ):
'''simple docstring'''
return {}, {}, {}
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :str = load_image(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = image.size
lowerCAmelCase__ :Optional[Any] = self.image_processor(images=__UpperCAmelCase , return_tensors=self.framework )
return model_inputs
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Dict = self.model(**__UpperCAmelCase )
return model_outputs
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Dict = model_outputs.predicted_depth
lowerCAmelCase__ :Dict = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='bicubic' , align_corners=__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = prediction.squeeze().cpu().numpy()
lowerCAmelCase__ :int = (output * 2_5_5 / np.max(__UpperCAmelCase )).astype('uint8' )
lowerCAmelCase__ :List[Any] = Image.fromarray(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = {}
lowerCAmelCase__ :Tuple = predicted_depth
lowerCAmelCase__ :Dict = depth
return output_dict
| 93 | from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class lowercase_ ( __snake_case ):
_lowerCamelCase = ['image_processor']
_lowerCamelCase = 'SamImageProcessor'
def __init__( self , lowercase_ ):
super().__init__(lowercase_ )
_snake_case : Optional[Any] = self.image_processor
_snake_case : Tuple = -10
_snake_case : str = self.image_processor.size["longest_edge"]
def __call__( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_ = None , **lowercase_ , ):
_snake_case : List[Any] = self.image_processor(
lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
# pop arguments that are not used in the foward but used nevertheless
_snake_case : Any = encoding_image_processor["original_sizes"]
if hasattr(lowercase_ , "numpy" ): # Checks if Torch or TF tensor
_snake_case : int = original_sizes.numpy()
_snake_case ,_snake_case ,_snake_case : Union[str, Any] = self._check_and_preprocess_points(
input_points=lowercase_ , input_labels=lowercase_ , input_boxes=lowercase_ , )
_snake_case : Dict = self._normalize_and_convert(
lowercase_ , lowercase_ , input_points=lowercase_ , input_labels=lowercase_ , input_boxes=lowercase_ , return_tensors=lowercase_ , )
return encoding_image_processor
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_="pt" , ):
if input_points is not None:
if len(lowercase_ ) != len(lowercase_ ):
_snake_case : int = [
self._normalize_coordinates(self.target_size , lowercase_ , original_sizes[0] ) for point in input_points
]
else:
_snake_case : Dict = [
self._normalize_coordinates(self.target_size , lowercase_ , lowercase_ )
for point, original_size in zip(lowercase_ , lowercase_ )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
_snake_case ,_snake_case : int = self._pad_points_and_labels(lowercase_ , lowercase_ )
_snake_case : Any = np.array(lowercase_ )
if input_labels is not None:
_snake_case : Optional[Any] = np.array(lowercase_ )
if input_boxes is not None:
if len(lowercase_ ) != len(lowercase_ ):
_snake_case : Optional[Any] = [
self._normalize_coordinates(self.target_size , lowercase_ , original_sizes[0] , is_bounding_box=lowercase_ )
for box in input_boxes
]
else:
_snake_case : List[str] = [
self._normalize_coordinates(self.target_size , lowercase_ , lowercase_ , is_bounding_box=lowercase_ )
for box, original_size in zip(lowercase_ , lowercase_ )
]
_snake_case : Tuple = np.array(lowercase_ )
if input_boxes is not None:
if return_tensors == "pt":
_snake_case : List[str] = torch.from_numpy(lowercase_ )
# boxes batch size of 1 by default
_snake_case : Optional[Any] = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
_snake_case : List[str] = tf.convert_to_tensor(lowercase_ )
# boxes batch size of 1 by default
_snake_case : Optional[int] = tf.expand_dims(lowercase_ , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"input_boxes": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
_snake_case : Tuple = torch.from_numpy(lowercase_ )
# point batch size of 1 by default
_snake_case : int = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
_snake_case : List[str] = tf.convert_to_tensor(lowercase_ )
# point batch size of 1 by default
_snake_case : Tuple = tf.expand_dims(lowercase_ , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"input_points": input_points} )
if input_labels is not None:
if return_tensors == "pt":
_snake_case : Dict = torch.from_numpy(lowercase_ )
# point batch size of 1 by default
_snake_case : str = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
_snake_case : Optional[Any] = tf.convert_to_tensor(lowercase_ )
# point batch size of 1 by default
_snake_case : List[Any] = tf.expand_dims(lowercase_ , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"input_labels": input_labels} )
return encoding_image_processor
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
_snake_case : List[Any] = max([point.shape[0] for point in input_points] )
_snake_case : List[str] = []
for i, point in enumerate(lowercase_ ):
if point.shape[0] != expected_nb_points:
_snake_case : Optional[Any] = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
_snake_case : Union[str, Any] = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(lowercase_ )
_snake_case : Optional[Any] = processed_input_points
return input_points, input_labels
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=False ):
_snake_case ,_snake_case : Optional[int] = original_size
_snake_case ,_snake_case : List[str] = self.image_processor._get_preprocess_shape(lowercase_ , longest_edge=lowercase_ )
_snake_case : Optional[Any] = deepcopy(lowercase_ ).astype(lowercase_ )
if is_bounding_box:
_snake_case : str = coords.reshape(-1 , 2 , 2 )
_snake_case : Optional[Any] = coords[..., 0] * (new_w / old_w)
_snake_case : Dict = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
_snake_case : Optional[Any] = coords.reshape(-1 , 4 )
return coords
def UpperCamelCase ( self , lowercase_=None , lowercase_=None , lowercase_=None , ):
if input_points is not None:
if hasattr(lowercase_ , "numpy" ): # Checks for TF or Torch tensor
_snake_case : Union[str, Any] = input_points.numpy().tolist()
if not isinstance(lowercase_ , lowercase_ ) or not isinstance(input_points[0] , lowercase_ ):
raise ValueError("Input points must be a list of list of floating points." )
_snake_case : Any = [np.array(lowercase_ ) for input_point in input_points]
else:
_snake_case : Optional[int] = None
if input_labels is not None:
if hasattr(lowercase_ , "numpy" ):
_snake_case : Tuple = input_labels.numpy().tolist()
if not isinstance(lowercase_ , lowercase_ ) or not isinstance(input_labels[0] , lowercase_ ):
raise ValueError("Input labels must be a list of list integers." )
_snake_case : Tuple = [np.array(lowercase_ ) for label in input_labels]
else:
_snake_case : Optional[Any] = None
if input_boxes is not None:
if hasattr(lowercase_ , "numpy" ):
_snake_case : List[str] = input_boxes.numpy().tolist()
if (
not isinstance(lowercase_ , lowercase_ )
or not isinstance(input_boxes[0] , lowercase_ )
or not isinstance(input_boxes[0][0] , lowercase_ )
):
raise ValueError("Input boxes must be a list of list of list of floating points." )
_snake_case : List[Any] = [np.array(lowercase_ ).astype(np.floataa ) for box in input_boxes]
else:
_snake_case : Optional[int] = None
return input_points, input_labels, input_boxes
@property
def UpperCamelCase ( self ):
_snake_case : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(lowercase_ ) )
def UpperCamelCase ( self , *lowercase_ , **lowercase_ ):
return self.image_processor.post_process_masks(*lowercase_ , **lowercase_ ) | 670 | 0 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
SCREAMING_SNAKE_CASE = True
except (ImportError, ModuleNotFoundError):
SCREAMING_SNAKE_CASE = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def lowercase_ ( __A : str ) -> str:
"""simple docstring"""
re.sub('''<n>''' , '''''' , __A ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__A ) )
| 94 | def snake_case (__lowercase ) -> int:
'''simple docstring'''
if not grid or not grid[0]:
raise TypeError("The grid does not contain the appropriate information" )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
_snake_case : Union[str, Any] = grid[0]
for row_n in range(1 , len(__lowercase ) ):
_snake_case : Union[str, Any] = grid[row_n]
_snake_case : List[Any] = fill_row(__lowercase , __lowercase )
_snake_case : List[Any] = grid[row_n]
return grid[-1][-1]
def snake_case (__lowercase , __lowercase ) -> list:
'''simple docstring'''
current_row[0] += row_above[0]
for cell_n in range(1 , len(__lowercase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod() | 670 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json''',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class UpperCamelCase_ (__A ):
__magic_name__ = '''realm'''
def __init__( self : int , lowerCAmelCase_ : str=30_522 , lowerCAmelCase_ : Dict=768 , lowerCAmelCase_ : Union[str, Any]=128 , lowerCAmelCase_ : Union[str, Any]=12 , lowerCAmelCase_ : List[str]=12 , lowerCAmelCase_ : List[str]=8 , lowerCAmelCase_ : List[str]=3_072 , lowerCAmelCase_ : List[str]="gelu_new" , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : int=512 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : int=0.0_2 , lowerCAmelCase_ : List[Any]=1e-12 , lowerCAmelCase_ : str=256 , lowerCAmelCase_ : Optional[Any]=10 , lowerCAmelCase_ : Dict=1e-3 , lowerCAmelCase_ : Tuple=5 , lowerCAmelCase_ : List[str]=320 , lowerCAmelCase_ : List[str]=13_353_718 , lowerCAmelCase_ : Tuple=5_000 , lowerCAmelCase_ : Dict=1 , lowerCAmelCase_ : List[Any]=0 , lowerCAmelCase_ : int=2 , **lowerCAmelCase_ : str , ) -> List[Any]:
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
# Common config
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : List[str] = max_position_embeddings
UpperCAmelCase_ : List[str] = hidden_size
UpperCAmelCase_ : List[Any] = retriever_proj_size
UpperCAmelCase_ : Optional[int] = num_hidden_layers
UpperCAmelCase_ : int = num_attention_heads
UpperCAmelCase_ : Dict = num_candidates
UpperCAmelCase_ : List[str] = intermediate_size
UpperCAmelCase_ : Optional[int] = hidden_act
UpperCAmelCase_ : str = hidden_dropout_prob
UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : str = type_vocab_size
UpperCAmelCase_ : Any = layer_norm_eps
# Reader config
UpperCAmelCase_ : Optional[Any] = span_hidden_size
UpperCAmelCase_ : Tuple = max_span_width
UpperCAmelCase_ : Dict = reader_layer_norm_eps
UpperCAmelCase_ : str = reader_beam_size
UpperCAmelCase_ : Dict = reader_seq_len
# Retrieval config
UpperCAmelCase_ : Optional[int] = num_block_records
UpperCAmelCase_ : Dict = searcher_beam_size
| 95 | import random
def snake_case (__lowercase , __lowercase ) -> tuple:
'''simple docstring'''
_snake_case ,_snake_case ,_snake_case : List[Any] = [], [], []
for element in data:
if element < pivot:
less.append(__lowercase )
elif element > pivot:
greater.append(__lowercase )
else:
equal.append(__lowercase )
return less, equal, greater
def snake_case (__lowercase , __lowercase ) -> List[Any]:
'''simple docstring'''
if index >= len(__lowercase ) or index < 0:
return None
_snake_case : Any = items[random.randint(0 , len(__lowercase ) - 1 )]
_snake_case : Tuple = 0
_snake_case ,_snake_case ,_snake_case : Tuple = _partition(__lowercase , __lowercase )
_snake_case : Tuple = len(__lowercase )
_snake_case : List[str] = len(__lowercase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(__lowercase , __lowercase )
# must be in larger
else:
return quick_select(__lowercase , index - (m + count) ) | 670 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
__lowerCamelCase = logging.get_logger(__name__)
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Union[str, Any] , __snake_case : int , __snake_case : int , __snake_case : float , **__snake_case : Optional[Any] ) -> str:
__magic_name__: Optional[Any] = feature_size
__magic_name__: List[Any] = sampling_rate
__magic_name__: Tuple = padding_value
__magic_name__: int = kwargs.pop("""padding_side""" , """right""" )
__magic_name__: Optional[Any] = kwargs.pop("""return_attention_mask""" , __snake_case )
super().__init__(**__snake_case )
def lowerCamelCase__ ( self : Union[str, Any] , __snake_case : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , __snake_case : Union[bool, str, PaddingStrategy] = True , __snake_case : Optional[int] = None , __snake_case : bool = False , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[Union[str, TensorType]] = None , ) -> BatchFeature:
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(__snake_case , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
__magic_name__: Union[str, Any] = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"""You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"""
F' to this method that includes {self.model_input_names[0]}, but you provided'
F' {list(processed_features.keys() )}' )
__magic_name__: Any = processed_features[self.model_input_names[0]]
__magic_name__: Tuple = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(__snake_case ) == 0:
if return_attention_mask:
__magic_name__: Optional[Any] = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
__magic_name__: Tuple = required_input[0]
if isinstance(__snake_case , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
__magic_name__: str = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(__snake_case ):
__magic_name__: Optional[int] = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(__snake_case ):
__magic_name__: List[str] = """tf"""
elif is_torch_tensor(__snake_case ):
__magic_name__: Any = """pt"""
elif isinstance(__snake_case , (int, float, list, tuple, np.ndarray) ):
__magic_name__: int = """np"""
else:
raise ValueError(
F'type of {first_element} unknown: {type(__snake_case )}. '
"""Should be one of a python, numpy, pytorch or tensorflow object.""" )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
__magic_name__: List[str] = to_numpy(__snake_case )
else:
__magic_name__: Any = [to_numpy(__snake_case ) for v in value]
# Convert padding_strategy in PaddingStrategy
__magic_name__: str = self._get_padding_strategies(padding=__snake_case , max_length=__snake_case )
__magic_name__: str = processed_features[self.model_input_names[0]]
__magic_name__: str = len(__snake_case )
if not all(len(__snake_case ) == batch_size for v in processed_features.values() ):
raise ValueError("""Some items in the output dictionary have a different batch size than others.""" )
__magic_name__: List[Any] = []
for i in range(__snake_case ):
__magic_name__: List[Any] = {k: v[i] for k, v in processed_features.items()}
# truncation
__magic_name__: List[Any] = self._truncate(
__snake_case , max_length=__snake_case , pad_to_multiple_of=__snake_case , truncation=__snake_case , )
truncated_inputs.append(__snake_case )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
__magic_name__: Union[str, Any] = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
__magic_name__: Union[str, Any] = PaddingStrategy.MAX_LENGTH
__magic_name__: List[str] = {}
for i in range(__snake_case ):
# padding
__magic_name__: str = self._pad(
truncated_inputs[i] , max_length=__snake_case , padding_strategy=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=__snake_case , )
for key, value in outputs.items():
if key not in batch_outputs:
__magic_name__: Optional[Any] = []
if value.dtype is np.dtype(np.floataa ):
__magic_name__: Any = value.astype(np.floataa )
batch_outputs[key].append(__snake_case )
return BatchFeature(__snake_case , tensor_type=__snake_case )
def lowerCamelCase__ ( self : Tuple , __snake_case : Union[Dict[str, np.ndarray], BatchFeature] , __snake_case : Optional[int] = None , __snake_case : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , ) -> dict:
__magic_name__: List[str] = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
__magic_name__: List[Any] = len(__snake_case )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__magic_name__: List[str] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__magic_name__: str = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(__snake_case ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
__magic_name__: int = np.ones(len(__snake_case ) , dtype=np.intaa )
if needs_to_be_padded:
__magic_name__: str = max_length - len(__snake_case )
if self.padding_side == "right":
if return_attention_mask:
__magic_name__: List[Any] = np.pad(
processed_features["""attention_mask"""] , (0, difference) )
__magic_name__: Union[str, Any] = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
__magic_name__: int = np.pad(
__snake_case , __snake_case , """constant""" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
__magic_name__: Dict = np.pad(
processed_features["""attention_mask"""] , (difference, 0) )
__magic_name__: Optional[Any] = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
__magic_name__: int = np.pad(
__snake_case , __snake_case , """constant""" , constant_values=self.padding_value )
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return processed_features
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : Union[Dict[str, np.ndarray], BatchFeature] , __snake_case : Optional[int] = None , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , ) -> int:
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("""When setting ``truncation=True``, make sure that ``max_length`` is defined.""" )
__magic_name__: Dict = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__magic_name__: Optional[Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__magic_name__: Tuple = len(__snake_case ) > max_length
if needs_to_be_truncated:
__magic_name__: Any = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
__magic_name__: List[Any] = processed_features["""attention_mask"""][:max_length]
return processed_features
def lowerCamelCase__ ( self : List[Any] , __snake_case : int=False , __snake_case : Tuple=None ) -> Optional[Any]:
# Get padding strategy
if padding is not False:
if padding is True:
__magic_name__: Optional[Any] = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(__snake_case , __snake_case ):
__magic_name__: Tuple = PaddingStrategy(__snake_case )
elif isinstance(__snake_case , __snake_case ):
__magic_name__: Dict = padding
else:
__magic_name__: int = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F'When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"""Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"""
""" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.""" )
return padding_strategy
| 96 | from math import pow, sqrt
def snake_case (*__lowercase ) -> bool:
'''simple docstring'''
_snake_case : str = len(__lowercase ) > 0 and all(value > 0.0 for value in values )
return result
def snake_case (__lowercase , __lowercase ) -> float | ValueError:
'''simple docstring'''
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__lowercase , __lowercase )
else ValueError("Input Error: Molar mass values must greater than 0." )
)
def snake_case (__lowercase , __lowercase , __lowercase ) -> float | ValueError:
'''simple docstring'''
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__lowercase , __lowercase , __lowercase )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def snake_case (__lowercase , __lowercase , __lowercase ) -> float | ValueError:
'''simple docstring'''
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__lowercase , __lowercase , __lowercase )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def snake_case (__lowercase , __lowercase , __lowercase ) -> float | ValueError:
'''simple docstring'''
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(__lowercase , __lowercase , __lowercase )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def snake_case (__lowercase , __lowercase , __lowercase ) -> float | ValueError:
'''simple docstring'''
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(__lowercase , __lowercase , __lowercase )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
) | 670 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class lowercase__( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any]=7 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE_ : List[Any]=3_0 , SCREAMING_SNAKE_CASE_ : Dict=4_0_0 , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : str=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE_ : int=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1 / 2_5_5 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , ) -> Union[str, Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowercase_ = size if size is not None else {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3}
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = num_channels
lowercase_ = min_resolution
lowercase_ = max_resolution
lowercase_ = do_resize
lowercase_ = size
lowercase_ = do_normalize
lowercase_ = image_mean
lowercase_ = image_std
lowercase_ = do_rescale
lowercase_ = rescale_factor
lowercase_ = do_pad
def _lowercase ( self : Optional[int] ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Dict=False ) -> List[str]:
if not batched:
lowercase_ = image_inputs[0]
if isinstance(SCREAMING_SNAKE_CASE_ , Image.Image ):
lowercase_ , lowercase_ = image.size
else:
lowercase_ , lowercase_ = image.shape[1], image.shape[2]
if w < h:
lowercase_ = int(self.size['''shortest_edge'''] * h / w )
lowercase_ = self.size['''shortest_edge''']
elif w > h:
lowercase_ = self.size['''shortest_edge''']
lowercase_ = int(self.size['''shortest_edge'''] * w / h )
else:
lowercase_ = self.size['''shortest_edge''']
lowercase_ = self.size['''shortest_edge''']
else:
lowercase_ = []
for image in image_inputs:
lowercase_ , lowercase_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowercase_ = max(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : item[0] )[0]
lowercase_ = max(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase__( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :Optional[int] = YolosImageProcessor if is_vision_available() else None
def _lowercase ( self : List[Any] ) -> Dict:
lowercase_ = YolosImageProcessingTester(self )
@property
def _lowercase ( self : List[str] ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self : Any ) -> List[Any]:
lowercase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''image_mean''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''image_std''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_normalize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''do_resize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , '''size''' ) )
def _lowercase ( self : str ) -> Any:
lowercase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE_ )
lowercase_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=SCREAMING_SNAKE_CASE_ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2, '''longest_edge''': 8_4} )
self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[str] ) -> Tuple:
pass
def _lowercase ( self : Optional[Any] ) -> Any:
# Initialize image_processing
lowercase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image )
# Test not batched input
lowercase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase_ , lowercase_ = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase_ , lowercase_ = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ )
lowercase_ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : List[Any] ) -> Tuple:
# Initialize image_processing
lowercase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , numpify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray )
# Test not batched input
lowercase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase_ , lowercase_ = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase_ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).pixel_values
lowercase_ , lowercase_ = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : List[Any] ) -> str:
# Initialize image_processing
lowercase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
# Test not batched input
lowercase_ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase_ , lowercase_ = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase_ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).pixel_values
lowercase_ , lowercase_ = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : Optional[Any] ) -> Tuple:
# Initialize image_processings
lowercase_ = self.image_processing_class(**self.image_processor_dict )
lowercase_ = self.image_processing_class(do_resize=SCREAMING_SNAKE_CASE_ , do_normalize=SCREAMING_SNAKE_CASE_ , do_rescale=SCREAMING_SNAKE_CASE_ )
# create random PyTorch tensors
lowercase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
lowercase_ = image_processing_a.pad(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
lowercase_ = image_processing_a(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
self.assertTrue(
torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1e-4 ) )
@slow
def _lowercase ( self : List[Any] ) -> Dict:
# prepare image and target
lowercase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
lowercase_ = json.loads(f.read() )
lowercase_ = {'''image_id''': 3_9_7_6_9, '''annotations''': target}
# encode them
lowercase_ = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' )
lowercase_ = image_processing(images=SCREAMING_SNAKE_CASE_ , annotations=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
# verify pixel values
lowercase_ = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , SCREAMING_SNAKE_CASE_ )
lowercase_ = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
# verify area
lowercase_ = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , SCREAMING_SNAKE_CASE_ ) )
# verify boxes
lowercase_ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , SCREAMING_SNAKE_CASE_ )
lowercase_ = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
# verify image_id
lowercase_ = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , SCREAMING_SNAKE_CASE_ ) )
# verify is_crowd
lowercase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , SCREAMING_SNAKE_CASE_ ) )
# verify class_labels
lowercase_ = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , SCREAMING_SNAKE_CASE_ ) )
# verify orig_size
lowercase_ = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , SCREAMING_SNAKE_CASE_ ) )
# verify size
lowercase_ = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , SCREAMING_SNAKE_CASE_ ) )
@slow
def _lowercase ( self : List[Any] ) -> Union[str, Any]:
# prepare image, target and masks_path
lowercase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
lowercase_ = json.loads(f.read() )
lowercase_ = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9_7_6_9, '''segments_info''': target}
lowercase_ = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
lowercase_ = YolosImageProcessor(format='''coco_panoptic''' )
lowercase_ = image_processing(images=SCREAMING_SNAKE_CASE_ , annotations=SCREAMING_SNAKE_CASE_ , masks_path=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
# verify pixel values
lowercase_ = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , SCREAMING_SNAKE_CASE_ )
lowercase_ = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
# verify area
lowercase_ = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , SCREAMING_SNAKE_CASE_ ) )
# verify boxes
lowercase_ = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , SCREAMING_SNAKE_CASE_ )
lowercase_ = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) )
# verify image_id
lowercase_ = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , SCREAMING_SNAKE_CASE_ ) )
# verify is_crowd
lowercase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , SCREAMING_SNAKE_CASE_ ) )
# verify class_labels
lowercase_ = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , SCREAMING_SNAKE_CASE_ ) )
# verify masks
lowercase_ = 8_2_2_8_7_3
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , SCREAMING_SNAKE_CASE_ )
# verify orig_size
lowercase_ = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , SCREAMING_SNAKE_CASE_ ) )
# verify size
lowercase_ = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , SCREAMING_SNAKE_CASE_ ) )
| 97 | import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
__SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
class lowercase_ ( __snake_case ):
def __init__( self , *lowercase_ , **lowercase_ ):
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ ) | 670 | 0 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str=100 , lowerCAmelCase__ : str=13 , lowerCAmelCase__ : Optional[Any]=30 , lowerCAmelCase__ : Tuple=2 , lowerCAmelCase__ : Optional[int]=3 , lowerCAmelCase__ : List[Any]=True , lowerCAmelCase__ : int=True , lowerCAmelCase__ : Optional[Any]=32 , lowerCAmelCase__ : int=5 , lowerCAmelCase__ : str=4 , lowerCAmelCase__ : Tuple=37 , lowerCAmelCase__ : Optional[int]="gelu" , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : List[str]=0.1 , lowerCAmelCase__ : Tuple=10 , lowerCAmelCase__ : Any=0.02 , lowerCAmelCase__ : List[str]=3 , ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = vocab_size
_UpperCamelCase = batch_size
_UpperCamelCase = image_size
_UpperCamelCase = patch_size
_UpperCamelCase = num_channels
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCamelCase = (image_size // patch_size) ** 2
_UpperCamelCase = num_patches + 1
def snake_case__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : int ) -> Any:
'''simple docstring'''
_UpperCamelCase = FlaxBeitModel(config=lowerCAmelCase__ )
_UpperCamelCase = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Any ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = FlaxBeitForMaskedImageModeling(config=lowerCAmelCase__ )
_UpperCamelCase = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def snake_case__ ( self : str , lowerCAmelCase__ : str , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : int ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.type_sequence_label_size
_UpperCamelCase = FlaxBeitForImageClassification(config=lowerCAmelCase__ )
_UpperCamelCase = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCamelCase = 1
_UpperCamelCase = FlaxBeitForImageClassification(lowerCAmelCase__ )
_UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCamelCase = model(lowerCAmelCase__ )
def snake_case__ ( self : int ) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Optional[int] = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def snake_case__ ( self : List[Any] ) -> None:
'''simple docstring'''
_UpperCamelCase = FlaxBeitModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 )
def snake_case__ ( self : Any ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self : Optional[int] ) -> str:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(lowerCAmelCase__ )
_UpperCamelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def snake_case__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCamelCase = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = model_class(lowerCAmelCase__ )
@jax.jit
def model_jitted(lowerCAmelCase__ : Tuple , **lowerCAmelCase__ : str ):
return model(pixel_values=lowerCAmelCase__ , **lowerCAmelCase__ )
with self.subTest('''JIT Enabled''' ):
_UpperCamelCase = model_jitted(**lowerCAmelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_UpperCamelCase = model_jitted(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) )
for jitted_output, output in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def snake_case__ ( self : Any ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase__ )
def snake_case__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def snake_case__ ( self : int ) -> int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCamelCase = model_class_name.from_pretrained('''microsoft/beit-base-patch16-224''' )
_UpperCamelCase = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(lowerCAmelCase__ )
def a__ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def snake_case__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
_UpperCamelCase = FlaxBeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' )
_UpperCamelCase = self.default_image_processor
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=lowerCAmelCase__ , return_tensors='''np''' ).pixel_values
# prepare bool_masked_pos
_UpperCamelCase = np.ones((1, 196) , dtype=lowerCAmelCase__ )
# forward pass
_UpperCamelCase = model(pixel_values=lowerCAmelCase__ , bool_masked_pos=lowerCAmelCase__ )
_UpperCamelCase = outputs.logits
# verify the logits
_UpperCamelCase = (1, 196, 8192)
self.assertEqual(logits.shape , lowerCAmelCase__ )
_UpperCamelCase = np.array(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , lowerCAmelCase__ , atol=1e-2 ) )
@slow
def snake_case__ ( self : str ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' )
_UpperCamelCase = self.default_image_processor
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=lowerCAmelCase__ , return_tensors='''np''' )
# forward pass
_UpperCamelCase = model(**lowerCAmelCase__ )
_UpperCamelCase = outputs.logits
# verify the logits
_UpperCamelCase = (1, 1000)
self.assertEqual(logits.shape , lowerCAmelCase__ )
_UpperCamelCase = np.array([-1.2385, -1.0987, -1.0108] )
self.assertTrue(np.allclose(logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
_UpperCamelCase = 281
self.assertEqual(logits.argmax(-1 ).item() , lowerCAmelCase__ )
@slow
def snake_case__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' )
_UpperCamelCase = self.default_image_processor
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=lowerCAmelCase__ , return_tensors='''np''' )
# forward pass
_UpperCamelCase = model(**lowerCAmelCase__ )
_UpperCamelCase = outputs.logits
# verify the logits
_UpperCamelCase = (1, 21841)
self.assertEqual(logits.shape , lowerCAmelCase__ )
_UpperCamelCase = np.array([1.6881, -0.2787, 0.5901] )
self.assertTrue(np.allclose(logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
_UpperCamelCase = 2396
self.assertEqual(logits.argmax(-1 ).item() , lowerCAmelCase__ )
| 98 | from __future__ import annotations
from typing import TypedDict
class lowercase_ ( __snake_case ):
_lowerCamelCase = 42
_lowerCamelCase = 42
def snake_case (__lowercase ) -> list[str]:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise TypeError("The parameter s type must be str." )
return [s[i:] + s[:i] for i in range(len(__lowercase ) )]
def snake_case (__lowercase ) -> BWTTransformDict:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise TypeError("The parameter s type must be str." )
if not s:
raise ValueError("The parameter s must not be empty." )
_snake_case : List[str] = all_rotations(__lowercase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_snake_case : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__lowercase ),
}
return response
def snake_case (__lowercase , __lowercase ) -> str:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise TypeError("The parameter bwt_string type must be str." )
if not bwt_string:
raise ValueError("The parameter bwt_string must not be empty." )
try:
_snake_case : Union[str, Any] = int(__lowercase )
except ValueError:
raise TypeError(
"The parameter idx_original_string type must be int or passive"
" of cast to int." )
if idx_original_string < 0:
raise ValueError("The parameter idx_original_string must not be lower than 0." )
if idx_original_string >= len(__lowercase ):
raise ValueError(
"The parameter idx_original_string must be lower than" " len(bwt_string)." )
_snake_case : Optional[Any] = [""] * len(__lowercase )
for _ in range(len(__lowercase ) ):
for i in range(len(__lowercase ) ):
_snake_case : Tuple = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[Any] = 'Provide a string that I will generate its BWT transform: '
__SCREAMING_SNAKE_CASE : Optional[Any] = input(entry_msg).strip()
__SCREAMING_SNAKE_CASE : int = bwt_transform(s)
print(
F'''Burrows Wheeler transform for string \'{s}\' results '''
F'''in \'{result['bwt_string']}\''''
)
__SCREAMING_SNAKE_CASE : List[str] = reverse_bwt(result['bwt_string'], result['idx_original_string'])
print(
F'''Reversing Burrows Wheeler transform for entry \'{result['bwt_string']}\' '''
F'''we get original string \'{original_string}\''''
) | 670 | 0 |
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'spiece.model'}
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
SCREAMING_SNAKE_CASE = {
'AI-Sweden/gpt-sw3-126m': 2_0_4_8,
'AI-Sweden/gpt-sw3-350m': 2_0_4_8,
'AI-Sweden/gpt-sw3-1.6b': 2_0_4_8,
'AI-Sweden/gpt-sw3-6.7b': 2_0_4_8,
'AI-Sweden/gpt-sw3-20b': 2_0_4_8,
}
class __UpperCAmelCase ( __A ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self , __A , __A=False , __A=False , __A=False , __A=None , __A=None , __A=None , __A=None , __A = None , **__A , ):
__a = {} if sp_model_kwargs is None else sp_model_kwargs
__a = kwargs.get("""name_or_path""" )
if name_or_path is None:
logger.warning(
"""name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"""
""" you are testing the model, this can safely be ignored""" )
__a = """None"""
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
__a = """<|endoftext|>""" if eos_token is None else eos_token
__a = """<unk>""" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
__a = unk_token if pad_token is None else pad_token
__a = eos_token if bos_token is None else bos_token
else:
__a = """<pad>""" if pad_token is None else pad_token
__a = """<s>""" if bos_token is None else bos_token
super().__init__(
do_lower_case=__A , remove_space=__A , keep_accents=__A , bos_token=__A , eos_token=__A , unk_token=__A , pad_token=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , )
__a = do_lower_case
__a = remove_space
__a = keep_accents
__a = vocab_file
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__A )
# Used for whitespace normalization in input texts
# fmt : off
__a = {""" """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """ """, """""", """"""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
__a = re.compile(
f'''[{''.join(map(__A , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]''' )
def __getstate__( self ):
__a = self.__dict__.copy()
__a = None
return state
def __setstate__( self , __A ):
__a = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__a = {}
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def snake_case_ ( self ):
return len(self.sp_model )
def snake_case_ ( self , __A ):
__a = self.non_printing_characters_re.sub("""""" , __A )
# Normalize whitespaces
__a = """""".join([char if char not in self.whitespaces else """ """ for char in text] )
# NFC Unicode normalization
__a = unicodedata.normalize("""NFC""" , __A )
return text
def snake_case_ ( self , __A , **__A ):
__a = self.preprocess_text(__A )
return self.sp_model.encode(__A , out_type=__A )
def snake_case_ ( self , __A ):
return self.sp_model.PieceToId(__A )
def snake_case_ ( self , __A ):
return self.sp_model.IdToPiece(__A )
@staticmethod
def snake_case_ ( __A ):
return out_string
def snake_case_ ( self , __A ):
__a = []
__a = """"""
__a = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__A ) + token
__a = True
__a = []
else:
current_sub_tokens.append(__A )
__a = False
out_string += self.sp_model.decode(__A )
return out_string
def snake_case_ ( self ):
__a = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case_ ( self , __A , __A = None ):
if not os.path.isdir(__A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a = os.path.join(
__A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __A )
elif not os.path.isfile(self.vocab_file ):
with open(__A , """wb""" ) as fi:
__a = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
def snake_case_ ( self , __A , __A = False ):
if isinstance(__A , __A ):
__a = self.preprocess_text(__A )
__a = self.sp_model.encode(__A )
else:
__a = [self.preprocess_text(__A ) for t in text]
__a = self.sp_model.encode(__A )
if return_tensors is True or return_tensors == "pt":
__a = torch.tensor(__A )
return token_ids
def snake_case_ ( self , __A ):
return self.sp_model.decode(__A )
def snake_case_ ( self , __A ):
__a = [f'''User: {text}''' if is_user else f'''Bot: {text}''' for is_user, text in conversation.iter_texts()]
__a = (
f'''{self.eos_token}{self.bos_token}''' + f'''{self.bos_token}'''.join(__A ) + f'''{self.bos_token}Bot:'''
)
return self.encode(text=__A )
| 99 | # NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
) | 670 | 0 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_A : Dict = 16
_A : Tuple = 32
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ = 1_6 ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
SCREAMING_SNAKE_CASE__ = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(lowerCAmelCase_ ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE__ = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowerCAmelCase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE__ = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE__ = 1_6
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE__ = 8
else:
SCREAMING_SNAKE_CASE__ = None
return tokenizer.pad(
lowerCAmelCase_ , padding='''longest''' , max_length=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_tensors='''pt''' , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE__ = DataLoader(
tokenized_datasets['''train'''] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_A : Optional[Any] = mocked_dataloaders # noqa: F811
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , lowerCAmelCase_ ) == "1":
SCREAMING_SNAKE_CASE__ = 2
# Initialize accelerator
SCREAMING_SNAKE_CASE__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE__ = config['''lr''']
SCREAMING_SNAKE_CASE__ = int(config['''num_epochs'''] )
SCREAMING_SNAKE_CASE__ = int(config['''seed'''] )
SCREAMING_SNAKE_CASE__ = int(config['''batch_size'''] )
SCREAMING_SNAKE_CASE__ = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
SCREAMING_SNAKE_CASE__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
SCREAMING_SNAKE_CASE__ = batch_size // MAX_GPU_BATCH_SIZE
SCREAMING_SNAKE_CASE__ = MAX_GPU_BATCH_SIZE
set_seed(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_dataloaders(lowerCAmelCase_ , lowerCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE__ = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=lowerCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE__ = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE__ = AdamW(params=model.parameters() , lr=lowerCAmelCase_ )
# Instantiate scheduler
SCREAMING_SNAKE_CASE__ = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_ , num_warmup_steps=1_0_0 , num_training_steps=(len(lowerCAmelCase_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Now we train the model
for epoch in range(lowerCAmelCase_ ):
model.train()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE__ = model(**lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = outputs.loss
SCREAMING_SNAKE_CASE__ = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
SCREAMING_SNAKE_CASE__ = 0
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((predictions, batch['''labels''']) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(lowerCAmelCase_ ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
SCREAMING_SNAKE_CASE__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
SCREAMING_SNAKE_CASE__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=lowerCAmelCase_ , references=lowerCAmelCase_ , )
SCREAMING_SNAKE_CASE__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , lowerCAmelCase_ )
def __snake_case ( ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=lowerCAmelCase_ , default=lowerCAmelCase_ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 4_2, '''batch_size''': 1_6}
training_function(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 100 | from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class lowercase_ :
_lowerCamelCase = LEDConfig
_lowerCamelCase = {}
_lowerCamelCase = 'gelu'
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=7 , lowercase_=True , lowercase_=False , lowercase_=99 , lowercase_=32 , lowercase_=2 , lowercase_=4 , lowercase_=37 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=20 , lowercase_=2 , lowercase_=1 , lowercase_=0 , lowercase_=4 , ):
_snake_case : Optional[int] = parent
_snake_case : str = batch_size
_snake_case : int = seq_length
_snake_case : Dict = is_training
_snake_case : Optional[Any] = use_labels
_snake_case : Tuple = vocab_size
_snake_case : str = hidden_size
_snake_case : int = num_hidden_layers
_snake_case : Union[str, Any] = num_attention_heads
_snake_case : int = intermediate_size
_snake_case : List[str] = hidden_dropout_prob
_snake_case : List[Any] = attention_probs_dropout_prob
_snake_case : int = max_position_embeddings
_snake_case : Union[str, Any] = eos_token_id
_snake_case : str = pad_token_id
_snake_case : Any = bos_token_id
_snake_case : str = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_snake_case : List[Any] = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_snake_case : List[str] = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def UpperCamelCase ( self ):
_snake_case : str = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_snake_case : Any = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_snake_case : Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 )
_snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case : List[str] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_snake_case : Optional[Any] = prepare_led_inputs_dict(lowercase_ , lowercase_ , lowercase_ )
_snake_case : int = tf.concat(
[tf.zeros_like(lowercase_ )[:, :-1], tf.ones_like(lowercase_ )[:, -1:]] , axis=-1 , )
_snake_case : List[Any] = global_attention_mask
return config, inputs_dict
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
_snake_case : Dict = TFLEDModel(config=lowercase_ ).get_decoder()
_snake_case : Optional[Any] = inputs_dict["input_ids"]
_snake_case : Optional[int] = input_ids[:1, :]
_snake_case : int = inputs_dict["attention_mask"][:1, :]
_snake_case : int = 1
# first forward pass
_snake_case : str = model(lowercase_ , attention_mask=lowercase_ , use_cache=lowercase_ )
_snake_case ,_snake_case : Optional[int] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_snake_case : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case : List[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_snake_case : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 )
_snake_case : List[str] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_snake_case : str = model(lowercase_ , attention_mask=lowercase_ )[0]
_snake_case : List[str] = model(lowercase_ , attention_mask=lowercase_ , past_key_values=lowercase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_snake_case : Optional[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_snake_case : List[str] = output_from_no_past[:, -3:, random_slice_idx]
_snake_case : List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase_ , lowercase_ , rtol=1e-3 )
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , ) -> List[Any]:
'''simple docstring'''
if attention_mask is None:
_snake_case : int = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_snake_case : Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_snake_case : Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_snake_case : Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class lowercase_ ( __snake_case , __snake_case , unittest.TestCase ):
_lowerCamelCase = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_lowerCamelCase = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_lowerCamelCase = (
{
'conversational': TFLEDForConditionalGeneration,
'feature-extraction': TFLEDModel,
'summarization': TFLEDForConditionalGeneration,
'text2text-generation': TFLEDForConditionalGeneration,
'translation': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def UpperCamelCase ( self ):
_snake_case : Optional[Any] = TFLEDModelTester(self )
_snake_case : List[Any] = ConfigTester(self , config_class=lowercase_ )
def UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase ( self ):
_snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase_ )
def UpperCamelCase ( self ):
_snake_case ,_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Tuple = tf.zeros_like(inputs_dict["attention_mask"] )
_snake_case : Tuple = 2
_snake_case : Dict = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
_snake_case : Tuple = True
_snake_case : Union[str, Any] = self.model_tester.seq_length
_snake_case : Union[str, Any] = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowercase_ ):
_snake_case : Optional[Any] = outputs.decoder_attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(lowercase_ ):
_snake_case : int = [t.numpy() for t in outputs.encoder_attentions]
_snake_case : Optional[int] = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_snake_case : Union[str, Any] = True
_snake_case : Dict = False
_snake_case : Any = False
_snake_case : Any = model_class(lowercase_ )
_snake_case : Union[str, Any] = model(self._prepare_for_class(lowercase_ , lowercase_ ) )
_snake_case : Tuple = len(lowercase_ )
self.assertEqual(config.output_hidden_states , lowercase_ )
check_encoder_attentions_output(lowercase_ )
if self.is_encoder_decoder:
_snake_case : int = model_class(lowercase_ )
_snake_case : Union[str, Any] = model(self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(config.output_hidden_states , lowercase_ )
check_decoder_attentions_output(lowercase_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_snake_case : List[Any] = True
_snake_case : Any = model_class(lowercase_ )
_snake_case : Optional[Any] = model(self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(config.output_hidden_states , lowercase_ )
check_encoder_attentions_output(lowercase_ )
# Check attention is always last and order is fine
_snake_case : Optional[int] = True
_snake_case : Optional[int] = True
_snake_case : List[Any] = model_class(lowercase_ )
_snake_case : Union[str, Any] = model(self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase_ ) )
self.assertEqual(model.config.output_hidden_states , lowercase_ )
check_encoder_attentions_output(lowercase_ )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
# TODO: Head-masking not yet implement
pass
def snake_case (__lowercase ) -> Optional[Any]:
'''simple docstring'''
return tf.constant(__lowercase , dtype=tf.intaa )
__SCREAMING_SNAKE_CASE : List[Any] = 1E-4
@slow
@require_tf
class lowercase_ ( unittest.TestCase ):
def UpperCamelCase ( self ):
_snake_case : Dict = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
_snake_case : Union[str, Any] = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Optional[int] = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Union[str, Any] = prepare_led_inputs_dict(model.config , lowercase_ , lowercase_ )
_snake_case : Optional[Any] = model(**lowercase_ )[0]
_snake_case : str = (1, 1_024, 768)
self.assertEqual(output.shape , lowercase_ )
# change to expected output here
_snake_case : Optional[Any] = tf.convert_to_tensor(
[[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase_ , atol=1e-3 )
def UpperCamelCase ( self ):
_snake_case : List[Any] = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
_snake_case : int = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : int = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Optional[Any] = prepare_led_inputs_dict(model.config , lowercase_ , lowercase_ )
_snake_case : Tuple = model(**lowercase_ )[0]
_snake_case : Any = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , lowercase_ )
# change to expected output here
_snake_case : Optional[int] = tf.convert_to_tensor(
[[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase_ , atol=1e-3 , rtol=1e-3 ) | 670 | 0 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def a__ ( A__, A__, A__, A__, A__, A__ = None, ):
SCREAMING_SNAKE_CASE_ : int = {}
if train_file is not None:
SCREAMING_SNAKE_CASE_ : Dict = [train_file]
if eval_file is not None:
SCREAMING_SNAKE_CASE_ : str = [eval_file]
if test_file is not None:
SCREAMING_SNAKE_CASE_ : str = [test_file]
SCREAMING_SNAKE_CASE_ : Tuple = datasets.load_dataset('csv', data_files=A__ )
SCREAMING_SNAKE_CASE_ : List[Any] = list(ds[list(files.keys() )[0]].features.keys() )
SCREAMING_SNAKE_CASE_ : List[str] = features_name.pop(A__ )
SCREAMING_SNAKE_CASE_ : int = list(set(ds[list(files.keys() )[0]][label_name] ) )
SCREAMING_SNAKE_CASE_ : Any = {label: i for i, label in enumerate(A__ )}
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.model_input_names
SCREAMING_SNAKE_CASE_ : Optional[int] = {}
if len(A__ ) == 1:
for k in files.keys():
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ds[k].map(
lambda A__ : tokenizer.batch_encode_plus(
example[features_name[0]], truncation=A__, max_length=A__, padding='max_length' ), batched=A__, )
elif len(A__ ) == 2:
for k in files.keys():
SCREAMING_SNAKE_CASE_ : Optional[int] = ds[k].map(
lambda A__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]), truncation=A__, max_length=A__, padding='max_length', ), batched=A__, )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
SCREAMING_SNAKE_CASE_ : Tuple = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE_ : List[Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE_ : List[Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
SCREAMING_SNAKE_CASE_ : Dict = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE_ : Tuple = labelaid[ex[label_name]]
yield (d, label)
SCREAMING_SNAKE_CASE_ : Optional[int] = (
tf.data.Dataset.from_generator(
A__, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
SCREAMING_SNAKE_CASE_ : int = (
tf.data.Dataset.from_generator(
A__, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
SCREAMING_SNAKE_CASE_ : Optional[int] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
SCREAMING_SNAKE_CASE_ : Optional[int] = (
tf.data.Dataset.from_generator(
A__, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
SCREAMING_SNAKE_CASE_ : Optional[int] = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
lowerCAmelCase__ : Union[str, Any] =logging.getLogger(__name__)
@dataclass
class __lowercase :
"""simple docstring"""
_UpperCAmelCase = field(metadata={"""help""": """Which column contains the label"""} )
_UpperCAmelCase = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """The path of the training file"""} )
_UpperCAmelCase = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """The path of the development file"""} )
_UpperCAmelCase = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """The path of the test file"""} )
_UpperCAmelCase = field(
default=1_2_8 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_UpperCAmelCase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
@dataclass
class __lowercase :
"""simple docstring"""
_UpperCAmelCase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_UpperCAmelCase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_UpperCAmelCase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_UpperCAmelCase = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_UpperCAmelCase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
def a__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO, )
logger.info(
F'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '''
F'''16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE_ : Dict = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = get_tfds(
train_file=data_args.train_file, eval_file=data_args.dev_file, test_file=data_args.test_file, tokenizer=A__, label_column_id=data_args.label_column_id, max_seq_length=data_args.max_seq_length, )
SCREAMING_SNAKE_CASE_ : int = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=len(A__ ), labelaid=A__, idalabel={id: label for label, id in labelaid.items()}, finetuning_task='text-classification', cache_dir=model_args.cache_dir, )
with training_args.strategy.scope():
SCREAMING_SNAKE_CASE_ : Optional[int] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path, from_pt=bool('.bin' in model_args.model_name_or_path ), config=A__, cache_dir=model_args.cache_dir, )
def compute_metrics(A__ ) -> Dict:
SCREAMING_SNAKE_CASE_ : Tuple = np.argmax(p.predictions, axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
SCREAMING_SNAKE_CASE_ : int = TFTrainer(
model=A__, args=A__, train_dataset=A__, eval_dataset=A__, compute_metrics=A__, )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE_ : List[Any] = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
SCREAMING_SNAKE_CASE_ : Dict = trainer.evaluate()
SCREAMING_SNAKE_CASE_ : int = os.path.join(training_args.output_dir, 'eval_results.txt' )
with open(A__, 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(F''' {key} = {value}''' )
writer.write(F'''{key} = {value}\n''' )
results.update(A__ )
return results
if __name__ == "__main__":
main()
| 101 | import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class lowercase_ ( __snake_case , unittest.TestCase ):
_lowerCamelCase = ReformerTokenizer
_lowerCamelCase = ReformerTokenizerFast
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = True
def UpperCamelCase ( self ):
super().setUp()
_snake_case : Union[str, Any] = ReformerTokenizer(lowercase_ , keep_accents=lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self ):
_snake_case : int = "<s>"
_snake_case : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(lowercase_ ) , 1_000 )
def UpperCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def UpperCamelCase ( self ):
if not self.test_rust_tokenizer:
return
_snake_case : Tuple = self.get_tokenizer()
_snake_case : List[str] = self.get_rust_tokenizer()
_snake_case : int = "I was born in 92000, and this is falsé."
_snake_case : Tuple = tokenizer.tokenize(lowercase_ )
_snake_case : List[Any] = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_snake_case : str = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
_snake_case : Tuple = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_snake_case : Dict = self.get_rust_tokenizer()
_snake_case : List[Any] = tokenizer.encode(lowercase_ )
_snake_case : str = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def UpperCamelCase ( self , lowercase_=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_snake_case : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
# Simple input
_snake_case : List[str] = "This is a simple input"
_snake_case : Optional[Any] = ["This is a simple input 1", "This is a simple input 2"]
_snake_case : Union[str, Any] = ("This is a simple input", "This is a pair")
_snake_case : int = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(lowercase_ , tokenizer_r.encode , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Simple input
self.assertRaises(lowercase_ , tokenizer_r.encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Simple input
self.assertRaises(
lowercase_ , tokenizer_r.batch_encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" , )
# Pair input
self.assertRaises(lowercase_ , tokenizer_r.encode , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Pair input
self.assertRaises(lowercase_ , tokenizer_r.encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Pair input
self.assertRaises(
lowercase_ , tokenizer_r.batch_encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" , )
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
_snake_case : Dict = ReformerTokenizer(lowercase_ , keep_accents=lowercase_ )
_snake_case : Tuple = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowercase_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ) , [285, 46, 10, 170, 382] , )
_snake_case : str = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_snake_case : Any = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_snake_case : List[Any] = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def UpperCamelCase ( self ):
return ReformerTokenizer.from_pretrained("google/reformer-crime-and-punishment" )
@slow
def UpperCamelCase ( self ):
_snake_case : int = "Hello World!"
_snake_case : Dict = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@slow
def UpperCamelCase ( self ):
_snake_case : Optional[int] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
_snake_case : Dict = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@require_torch
@slow
def UpperCamelCase ( self ):
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
_snake_case : str = list(self.big_tokenizer.get_vocab().keys() )[:10]
_snake_case : str = " ".join(lowercase_ )
_snake_case : Tuple = self.big_tokenizer.encode_plus(lowercase_ , return_tensors="pt" )
_snake_case : Tuple = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="pt" )
_snake_case : int = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
_snake_case : Union[str, Any] = encoded_sequence["input_ids"].shape
_snake_case : List[str] = ReformerModel(lowercase_ )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowercase_ )
model(**lowercase_ )
@slow
def UpperCamelCase ( self ):
# fmt: off
_snake_case : Union[str, Any] = {"input_ids": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
_snake_case : Tuple = [
"This is a very simple sentence.",
"The quick brown fox jumps over the lazy dog.",
]
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name="google/reformer-crime-and-punishment" , revision="0e6c3decb8211d49bf881013425dc8b0448b3f5a" , padding=lowercase_ , sequences=lowercase_ , ) | 670 | 0 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
__magic_name__ : Union[str, Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__magic_name__ : Tuple = 2_5_0_0_0_4
__magic_name__ : Tuple = 2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class lowercase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : Any = MBartaaTokenizer
__lowerCAmelCase : str = MBartaaTokenizerFast
__lowerCAmelCase : Tuple = True
__lowerCAmelCase : Optional[int] = True
def _a ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase : Dict = MBartaaTokenizer(_A , src_lang="""en_XX""" , tgt_lang="""ro_RO""" , keep_accents=_A )
tokenizer.save_pretrained(self.tmpdirname )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = """<s>"""
UpperCamelCase : int = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(_A ) , 1_0_5_4 )
def _a ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_5_4 )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = MBartaaTokenizer(_A , src_lang="""en_XX""" , tgt_lang="""ro_RO""" , keep_accents=_A )
UpperCamelCase : List[str] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
UpperCamelCase : str = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_A , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""] , )
UpperCamelCase : List[Any] = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(
_A , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
UpperCamelCase : str = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""] , )
@slow
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = {"""input_ids""": [[2_5_0_0_0_4, 1_1_0_6_2, 8_2_7_7_2, 7, 1_5, 8_2_7_7_2, 5_3_8, 5_1_5_2_9, 2_3_7, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 2_1_5_1_7_5, 1_3_1_4, 1_3_6, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 5_6_3_5_9, 4_2, 1_2_2_0_0_9, 9, 1_6_4_6_6, 1_6, 8_7_3_4_4, 4_5_3_7, 9, 4_7_1_7, 7_8_3_8_1, 6, 1_5_9_9_5_8, 7, 1_5, 2_4_4_8_0, 6_1_8, 4, 5_2_7, 2_2_6_9_3, 5_4_2_8, 4, 2_7_7_7, 2_4_4_8_0, 9_8_7_4, 4, 4_3_5_2_3, 5_9_4, 4, 8_0_3, 1_8_3_9_2, 3_3_1_8_9, 1_8, 4, 4_3_5_2_3, 2_4_4_4_7, 1_2_3_9_9, 1_0_0, 2_4_9_5_5, 8_3_6_5_8, 9_6_2_6, 1_4_4_0_5_7, 1_5, 8_3_9, 2_2_3_3_5, 1_6, 1_3_6, 2_4_9_5_5, 8_3_6_5_8, 8_3_4_7_9, 1_5, 3_9_1_0_2, 7_2_4, 1_6, 6_7_8, 6_4_5, 2_7_8_9, 1_3_2_8, 4_5_8_9, 4_2, 1_2_2_0_0_9, 1_1_5_7_7_4, 2_3, 8_0_5, 1_3_2_8, 4_6_8_7_6, 7, 1_3_6, 5_3_8_9_4, 1_9_4_0, 4_2_2_2_7, 4_1_1_5_9, 1_7_7_2_1, 8_2_3, 4_2_5, 4, 2_7_5_1_2, 9_8_7_2_2, 2_0_6, 1_3_6, 5_5_3_1, 4_9_7_0, 9_1_9, 1_7_3_3_6, 5, 2], [2_5_0_0_0_4, 2_0_0_8_0, 6_1_8, 8_3, 8_2_7_7_5, 4_7, 4_7_9, 9, 1_5_1_7, 7_3, 5_3_8_9_4, 3_3_3, 8_0_5_8_1, 1_1_0_1_1_7, 1_8_8_1_1, 5_2_5_6, 1_2_9_5, 5_1, 1_5_2_5_2_6, 2_9_7, 7_9_8_6, 3_9_0, 1_2_4_4_1_6, 5_3_8, 3_5_4_3_1, 2_1_4, 9_8, 1_5_0_4_4, 2_5_7_3_7, 1_3_6, 7_1_0_8, 4_3_7_0_1, 2_3, 7_5_6, 1_3_5_3_5_5, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_5_0_0_0_4, 5_8_1, 6_3_7_7_3, 1_1_9_4_5_5, 6, 1_4_7_7_9_7, 8_8_2_0_3, 7, 6_4_5, 7_0, 2_1, 3_2_8_5, 1_0_2_6_9, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name="""facebook/mbart-large-50""" , revision="""d3913889c59cd5c9e456b269c376325eabad57e2""" , )
def _a ( self ):
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCamelCase : Any = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart50""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCamelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(_A , **_A )
UpperCamelCase : Optional[Any] = self.tokenizer_class.from_pretrained(_A , **_A )
UpperCamelCase : Any = tempfile.mkdtemp()
UpperCamelCase : List[str] = tokenizer_r.save_pretrained(_A )
UpperCamelCase : Dict = tokenizer_p.save_pretrained(_A )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
UpperCamelCase : Optional[Any] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(_A , _A )
# Checks everything loads correctly in the same way
UpperCamelCase : Any = tokenizer_r.from_pretrained(_A )
UpperCamelCase : Tuple = tokenizer_p.from_pretrained(_A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_A , _A ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_A )
# Save tokenizer rust, legacy_format=True
UpperCamelCase : Any = tempfile.mkdtemp()
UpperCamelCase : Union[str, Any] = tokenizer_r.save_pretrained(_A , legacy_format=_A )
UpperCamelCase : Optional[int] = tokenizer_p.save_pretrained(_A )
# Checks it save with the same files
self.assertSequenceEqual(_A , _A )
# Checks everything loads correctly in the same way
UpperCamelCase : Union[str, Any] = tokenizer_r.from_pretrained(_A )
UpperCamelCase : Optional[int] = tokenizer_p.from_pretrained(_A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_A , _A ) )
shutil.rmtree(_A )
# Save tokenizer rust, legacy_format=False
UpperCamelCase : Union[str, Any] = tempfile.mkdtemp()
UpperCamelCase : List[str] = tokenizer_r.save_pretrained(_A , legacy_format=_A )
UpperCamelCase : int = tokenizer_p.save_pretrained(_A )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCamelCase : Dict = tokenizer_r.from_pretrained(_A )
UpperCamelCase : Dict = tokenizer_p.from_pretrained(_A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_A , _A ) )
shutil.rmtree(_A )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase__ ( unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : Any = """facebook/mbart-large-50-one-to-many-mmt"""
__lowerCAmelCase : List[str] = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
__lowerCAmelCase : Optional[int] = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
__lowerCAmelCase : Optional[int] = [EN_CODE, 8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2]
@classmethod
def _a ( cls ):
'''simple docstring'''
UpperCamelCase : MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
UpperCamelCase : str = 1
return cls
def _a ( self ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 2_5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 2_5_0_0_0_4 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 2_5_0_0_2_0 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""mr_IN"""] , 2_5_0_0_3_8 )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _A )
def _a ( self ):
'''simple docstring'''
self.assertIn(_A , self.tokenizer.all_special_ids )
UpperCamelCase : Dict = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
UpperCamelCase : int = self.tokenizer.decode(_A , skip_special_tokens=_A )
UpperCamelCase : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_A )
self.assertEqual(_A , _A )
self.assertNotIn(self.tokenizer.eos_token , _A )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = ["""this is gunna be a long sentence """ * 2_0]
assert isinstance(src_text[0] , _A )
UpperCamelCase : List[str] = 1_0
UpperCamelCase : int = self.tokenizer(_A , max_length=_A , truncation=_A ).input_ids[0]
self.assertEqual(ids[0] , _A )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(_A ) , _A )
def _a ( self ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [2_5_0_0_5_3, 2_5_0_0_0_1] )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Dict = tempfile.mkdtemp()
UpperCamelCase : List[str] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_A )
UpperCamelCase : int = MBartaaTokenizer.from_pretrained(_A )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _A )
@require_torch
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_A , return_tensors="""pt""" )
UpperCamelCase : List[str] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_A , truncation=_A , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
UpperCamelCase : Any = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(_A , _A )
self.assertEqual((2, 1_4) , batch.input_ids.shape )
self.assertEqual((2, 1_4) , batch.attention_mask.shape )
UpperCamelCase : Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _A )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.tokenizer(self.src_text , padding=_A , truncation=_A , max_length=3 , return_tensors="""pt""" )
UpperCamelCase : Optional[int] = self.tokenizer(
text_target=self.tgt_text , padding=_A , truncation=_A , max_length=1_0 , return_tensors="""pt""" )
UpperCamelCase : Any = targets["""input_ids"""]
UpperCamelCase : Union[str, Any] = shift_tokens_right(_A , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Any = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(_A ) , {
# en_XX, A, test, EOS
"""input_ids""": [[2_5_0_0_0_4, 6_2, 3_0_3_4, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 2_5_0_0_0_1,
} , )
| 102 | import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class lowercase_ ( unittest.TestCase ):
def UpperCamelCase ( self ):
_snake_case : Any = tempfile.mkdtemp()
# fmt: off
_snake_case : Optional[Any] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
_snake_case : Dict = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
_snake_case : Dict = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
_snake_case : Optional[int] = {"unk_token": "<unk>"}
_snake_case : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_snake_case : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowercase_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowercase_ ) )
_snake_case : Any = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
_snake_case : Optional[Any] = os.path.join(self.tmpdirname , lowercase_ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(lowercase_ , lowercase_ )
def UpperCamelCase ( self , **lowercase_ ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCamelCase ( self , **lowercase_ ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCamelCase ( self , **lowercase_ ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self ):
_snake_case : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_snake_case : Union[str, Any] = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase ( self ):
_snake_case : Tuple = self.get_tokenizer()
_snake_case : Any = self.get_rust_tokenizer()
_snake_case : Optional[Any] = self.get_image_processor()
_snake_case : Any = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
processor_slow.save_pretrained(self.tmpdirname )
_snake_case : Optional[int] = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase_ )
_snake_case : List[Any] = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
processor_fast.save_pretrained(self.tmpdirname )
_snake_case : Optional[Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowercase_ )
self.assertIsInstance(processor_fast.tokenizer , lowercase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowercase_ )
self.assertIsInstance(processor_fast.image_processor , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : List[Any] = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_snake_case : List[Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_snake_case : Optional[Any] = self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0 )
_snake_case : Tuple = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowercase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : Union[str, Any] = self.get_image_processor()
_snake_case : Any = self.get_tokenizer()
_snake_case : int = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : Optional[int] = self.prepare_image_inputs()
_snake_case : Optional[Any] = image_processor(lowercase_ , return_tensors="np" )
_snake_case : str = processor(images=lowercase_ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase ( self ):
_snake_case : Optional[Any] = self.get_image_processor()
_snake_case : Any = self.get_tokenizer()
_snake_case : Dict = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : List[str] = "lower newer"
_snake_case : int = processor(text=lowercase_ )
_snake_case : str = tokenizer(lowercase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase ( self ):
_snake_case : List[Any] = self.get_image_processor()
_snake_case : int = self.get_tokenizer()
_snake_case : Tuple = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : List[Any] = "lower newer"
_snake_case : int = self.prepare_image_inputs()
_snake_case : Dict = processor(text=lowercase_ , images=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowercase_ ):
processor()
def UpperCamelCase ( self ):
_snake_case : Dict = self.get_image_processor()
_snake_case : List[str] = self.get_tokenizer()
_snake_case : Union[str, Any] = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : Optional[int] = self.prepare_image_inputs()
_snake_case : Dict = self.prepare_image_inputs()
_snake_case : List[Any] = processor(images=lowercase_ , visual_prompt=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "conditional_pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowercase_ ):
processor()
def UpperCamelCase ( self ):
_snake_case : Dict = self.get_image_processor()
_snake_case : List[Any] = self.get_tokenizer()
_snake_case : str = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_snake_case : Any = processor.batch_decode(lowercase_ )
_snake_case : Any = tokenizer.batch_decode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ ) | 670 | 0 |
"""simple docstring"""
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
snake_case = datasets.load_iris()
snake_case = np.array(data['''data'''])
snake_case = np.array(data['''target'''])
snake_case = data['''target_names''']
snake_case , snake_case , snake_case , snake_case = train_test_split(X, y)
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
return np.linalg.norm(np.array(lowerCAmelCase_ ) - np.array(lowerCAmelCase_ ) )
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=5 ) -> List[Any]:
_snake_case = zip(lowerCAmelCase_ , lowerCAmelCase_ )
# List of distances of all points from the point to be classified
_snake_case = []
for data_point in data:
_snake_case = euclidean_distance(data_point[0] , lowerCAmelCase_ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
_snake_case = [i[1] for i in sorted(lowerCAmelCase_ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
_snake_case = Counter(lowerCAmelCase_ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 103 | from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
__SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__) # pylint: disable=invalid-name
def snake_case (__lowercase ) -> Any:
'''simple docstring'''
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(__lowercase ):
return ext
raise Exception(
F"""Unable to determine file format from file extension {path}. """
F"""Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}""" )
def snake_case (__lowercase ) -> Any:
'''simple docstring'''
_snake_case : int = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
_snake_case : List[Any] = try_infer_format_from_ext(args.input ) if args.format == "infer" else args.format
_snake_case : Optional[int] = PipelineDataFormat.from_str(
format=__lowercase , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(__lowercase , __lowercase )
class lowercase_ ( __snake_case ):
def __init__( self , lowercase_ , lowercase_ ):
_snake_case : str = nlp
_snake_case : str = reader
@staticmethod
def UpperCamelCase ( lowercase_ ):
_snake_case : Dict = parser.add_parser("run" , help="Run a pipeline through the CLI" )
run_parser.add_argument("--task" , choices=get_supported_tasks() , help="Task to run" )
run_parser.add_argument("--input" , type=lowercase_ , help="Path to the file to use for inference" )
run_parser.add_argument("--output" , type=lowercase_ , help="Path to the file that will be used post to write results." )
run_parser.add_argument("--model" , type=lowercase_ , help="Name or path to the model to instantiate." )
run_parser.add_argument("--config" , type=lowercase_ , help="Name or path to the model's config to instantiate." )
run_parser.add_argument(
"--tokenizer" , type=lowercase_ , help="Name of the tokenizer to use. (default: same as the model name)" )
run_parser.add_argument(
"--column" , type=lowercase_ , help="Name of the column to use as input. (For multi columns input as QA use column1,columns2)" , )
run_parser.add_argument(
"--format" , type=lowercase_ , default="infer" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="Input format to read from" , )
run_parser.add_argument(
"--device" , type=lowercase_ , default=-1 , help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" , )
run_parser.add_argument("--overwrite" , action="store_true" , help="Allow overwriting the output file." )
run_parser.set_defaults(func=lowercase_ )
def UpperCamelCase ( self ):
_snake_case ,_snake_case : Tuple = self._nlp, []
for entry in self._reader:
_snake_case : Optional[Any] = nlp(**lowercase_ ) if self._reader.is_multi_columns else nlp(lowercase_ )
if isinstance(lowercase_ , lowercase_ ):
outputs.append(lowercase_ )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
_snake_case : str = self._reader.save_binary(lowercase_ )
logger.warning(f"""Current pipeline requires output to be in binary format, saving at {binary_path}""" )
else:
self._reader.save(lowercase_ ) | 670 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : List[Any] = "facebook/bart-large-mnli"
A__ : int = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
A__ : Union[str, Any] = "text_classifier"
A__ : int = AutoTokenizer
A__ : str = AutoModelForSequenceClassification
A__ : Union[str, Any] = ["text", ["text"]]
A__ : int = ["text"]
def snake_case__ ( self ) -> Tuple:
super().setup()
A__ = self.model.config
A__ = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("entail" ):
A__ = int(SCREAMING_SNAKE_CASE__ )
if self.entailment_id == -1:
raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
A__ = labels
return self.pre_processor(
[text] * len(SCREAMING_SNAKE_CASE__ ) , [f"""This example is {label}""" for label in labels] , return_tensors="pt" , padding="max_length" , )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> Dict:
A__ = outputs.logits
A__ = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 104 | import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
__SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
class lowercase_ ( __snake_case ):
def __init__( self , lowercase_ ):
super().__init__()
_snake_case : List[str] = nn.ModuleList(lowercase_ )
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = False , lowercase_ = True , ):
for i, (image, scale, controlnet) in enumerate(zip(lowercase_ , lowercase_ , self.nets ) ):
_snake_case ,_snake_case : Optional[int] = controlnet(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , )
# merge samples
if i == 0:
_snake_case ,_snake_case : Tuple = down_samples, mid_sample
else:
_snake_case : Tuple = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowercase_ , lowercase_ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def UpperCamelCase ( self , lowercase_ , lowercase_ = True , lowercase_ = None , lowercase_ = False , lowercase_ = None , ):
_snake_case : Tuple = 0
_snake_case : Dict = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowercase_ , is_main_process=lowercase_ , save_function=lowercase_ , safe_serialization=lowercase_ , variant=lowercase_ , )
idx += 1
_snake_case : int = model_path_to_save + f"""_{idx}"""
@classmethod
def UpperCamelCase ( cls , lowercase_ , **lowercase_ ):
_snake_case : List[str] = 0
_snake_case : Optional[Any] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_snake_case : Optional[Any] = pretrained_model_path
while os.path.isdir(lowercase_ ):
_snake_case : int = ControlNetModel.from_pretrained(lowercase_ , **lowercase_ )
controlnets.append(lowercase_ )
idx += 1
_snake_case : str = pretrained_model_path + f"""_{idx}"""
logger.info(f"""{len(lowercase_ )} controlnets loaded from {pretrained_model_path}.""" )
if len(lowercase_ ) == 0:
raise ValueError(
f"""No ControlNets found under {os.path.dirname(lowercase_ )}. Expected at least {pretrained_model_path + '_0'}.""" )
return cls(lowercase_ ) | 670 | 0 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
UpperCamelCase__ : Dict = logging.getLogger(__name__)
class lowerCAmelCase_ ( lowerCamelCase_ ):
def __init__( self ,snake_case__=-1 ):
# in NER datasets, the last column is usually reserved for NER label
SCREAMING_SNAKE_CASE_ : List[Any] = label_idx
def snake_case ( self ,snake_case__ ,snake_case__ ):
if isinstance(snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = mode.value
SCREAMING_SNAKE_CASE_ : int = os.path.join(snake_case__ ,F'{mode}.txt' )
SCREAMING_SNAKE_CASE_ : Dict = 1
SCREAMING_SNAKE_CASE_ : Any = []
with open(snake_case__ ,encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Any = []
SCREAMING_SNAKE_CASE_ : Tuple = []
for line in f:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F'{mode}-{guid_index}' ,words=snake_case__ ,labels=snake_case__ ) )
guid_index += 1
SCREAMING_SNAKE_CASE_ : int = []
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
else:
SCREAMING_SNAKE_CASE_ : Dict = line.split(' ' )
words.append(splits[0] )
if len(snake_case__ ) > 1:
labels.append(splits[self.label_idx].replace('\n' ,'' ) )
else:
# Examples could have no label for mode = "test"
labels.append('O' )
if words:
examples.append(InputExample(guid=F'{mode}-{guid_index}' ,words=snake_case__ ,labels=snake_case__ ) )
return examples
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
for line in test_input_reader:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
writer.write(snake_case__ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
SCREAMING_SNAKE_CASE_ : int = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n'
writer.write(snake_case__ )
else:
logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' ,line.split()[0] )
def snake_case ( self ,snake_case__ ):
if path:
with open(snake_case__ ,'r' ) as f:
SCREAMING_SNAKE_CASE_ : Dict = f.read().splitlines()
if "O" not in labels:
SCREAMING_SNAKE_CASE_ : int = ['O'] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class lowerCAmelCase_ ( lowerCamelCase_ ):
def __init__( self ):
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def snake_case ( self ,snake_case__ ):
if path:
with open(snake_case__ ,'r' ) as f:
SCREAMING_SNAKE_CASE_ : str = f.read().splitlines()
if "O" not in labels:
SCREAMING_SNAKE_CASE_ : int = ['O'] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class lowerCAmelCase_ ( lowerCamelCase_ ):
def snake_case ( self ,snake_case__ ,snake_case__ ):
if isinstance(snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[str] = mode.value
SCREAMING_SNAKE_CASE_ : str = os.path.join(snake_case__ ,F'{mode}.txt' )
SCREAMING_SNAKE_CASE_ : Optional[int] = 1
SCREAMING_SNAKE_CASE_ : List[str] = []
with open(snake_case__ ,encoding='utf-8' ) as f:
for sentence in parse_incr(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Tuple = []
SCREAMING_SNAKE_CASE_ : List[str] = []
for token in sentence:
words.append(token['form'] )
labels.append(token['upos'] )
assert len(snake_case__ ) == len(snake_case__ )
if words:
examples.append(InputExample(guid=F'{mode}-{guid_index}' ,words=snake_case__ ,labels=snake_case__ ) )
guid_index += 1
return examples
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : int = 0
for sentence in parse_incr(snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = preds_list[example_id]
SCREAMING_SNAKE_CASE_ : Tuple = ''
for token in sentence:
out += F'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '
out += "\n"
writer.write(snake_case__ )
example_id += 1
def snake_case ( self ,snake_case__ ):
if path:
with open(snake_case__ ,'r' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 105 | import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase_ ( __snake_case ):
_lowerCamelCase = ['image_processor', 'tokenizer']
_lowerCamelCase = 'CLIPImageProcessor'
_lowerCamelCase = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self , lowercase_=None , lowercase_=None , **lowercase_ ):
_snake_case : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowercase_ , )
_snake_case : Dict = kwargs.pop("feature_extractor" )
_snake_case : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowercase_ , lowercase_ )
def __call__( self , lowercase_=None , lowercase_=None , lowercase_=None , **lowercase_ ):
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
_snake_case : str = self.tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if images is not None:
_snake_case : List[str] = self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if text is not None and images is not None:
_snake_case : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase_ ) , tensor_type=lowercase_ )
def UpperCamelCase ( self , *lowercase_ , **lowercase_ ):
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def UpperCamelCase ( self , *lowercase_ , **lowercase_ ):
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def UpperCamelCase ( self ):
_snake_case : Any = self.tokenizer.model_input_names
_snake_case : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 670 | 0 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
__snake_case :str =False
class lowerCAmelCase__ ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Dict ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
A = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
A = torch.manual_seed(0 )
A = pipe.dual_guided(
prompt='first prompt' , image=__UpperCamelCase , text_to_image_strength=0.7_5 , generator=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__UpperCamelCase )
A = VersatileDiffusionPipeline.from_pretrained(__UpperCamelCase , torch_dtype=torch.floataa )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = generator.manual_seed(0 )
A = pipe.dual_guided(
prompt='first prompt' , image=__UpperCamelCase , text_to_image_strength=0.7_5 , generator=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def __UpperCamelCase ( self : Tuple ) -> List[str]:
A = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = 'cyberpunk 2077'
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
A = torch.manual_seed(0 )
A = pipe.dual_guided(
prompt=__UpperCamelCase , image=__UpperCamelCase , text_to_image_strength=0.7_5 , generator=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
A = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
A = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
A = 'A painting of a squirrel eating a burger '
A = torch.manual_seed(0 )
A = pipe.text_to_image(
prompt=__UpperCamelCase , generator=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
A = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
A = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
A = pipe.image_variation(__UpperCamelCase , generator=__UpperCamelCase , output_type='numpy' ).images
A = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
A = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 | 106 | from __future__ import annotations
def snake_case (__lowercase , __lowercase , __lowercase ) -> dict[str, float]:
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance < 0:
raise ValueError("Resistance cannot be negative" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod() | 670 | 0 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('''Googling.....''')
_UpperCAmelCase : Any = '''https://www.google.com/search?q=''' + ''' '''.join(sys.argv[1:])
_UpperCAmelCase : List[str] = requests.get(url, headers={'''UserAgent''': UserAgent().random})
# res.raise_for_status()
with open('''project1a.html''', '''wb''') as out_file: # only for knowing the class
for data in res.iter_content(1_00_00):
out_file.write(data)
_UpperCAmelCase : Tuple = BeautifulSoup(res.text, '''html.parser''')
_UpperCAmelCase : Tuple = list(soup.select('''.eZt8xd'''))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('''href'''))
else:
webbrowser.open(F'''https://google.com{link.get('href')}''')
| 107 | import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def snake_case (*__lowercase ) -> Dict:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
_snake_case : Dict = list(__lowercase )
for i in range(len(__lowercase ) ):
_snake_case : List[str] = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def snake_case (__lowercase ) -> bool:
'''simple docstring'''
_snake_case : str = [
"CUDA out of memory.", # CUDA OOM
"cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.", # CUDNN SNAFU
"DefaultCPUAllocator: can't allocate memory", # CPU OOM
]
if isinstance(__lowercase , __lowercase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def snake_case (__lowercase = None , __lowercase = 128 ) -> Any:
'''simple docstring'''
if function is None:
return functools.partial(__lowercase , starting_batch_size=__lowercase )
_snake_case : List[str] = starting_batch_size
def decorator(*__lowercase , **__lowercase ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
_snake_case : Optional[Any] = list(inspect.signature(__lowercase ).parameters.keys() )
# Guard against user error
if len(__lowercase ) < (len(__lowercase ) + 1):
_snake_case : str = ", ".join([F"""{arg}={value}""" for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F"""Batch size was passed into `{function.__name__}` as the first argument when called."""
F"""Remove this as the decorator already does so: `{function.__name__}({arg_str})`""" )
while True:
if batch_size == 0:
raise RuntimeError("No executable batch size found, reached zero." )
try:
return function(__lowercase , *__lowercase , **__lowercase )
except Exception as e:
if should_reduce_batch_size(__lowercase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator | 670 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
__a: Union[str, Any] = logging.get_logger(__name__)
__a: Optional[int] = {
'''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = '''gpt_neo'''
_lowerCamelCase = ['''past_key_values''']
_lowerCamelCase = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : int , lowerCamelCase : int=5_0257 , lowerCamelCase : str=2048 , lowerCamelCase : Dict=2048 , lowerCamelCase : List[str]=24 , lowerCamelCase : Any=[[["global", "local"], 12]] , lowerCamelCase : str=16 , lowerCamelCase : List[str]=None , lowerCamelCase : List[str]=256 , lowerCamelCase : Union[str, Any]="gelu_new" , lowerCamelCase : str=0.0 , lowerCamelCase : Tuple=0.0 , lowerCamelCase : List[str]=0.0 , lowerCamelCase : Union[str, Any]=0.1 , lowerCamelCase : Optional[Any]=1E-5 , lowerCamelCase : Tuple=0.02 , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : Optional[int]=5_0256 , lowerCamelCase : int=5_0256 , **lowerCamelCase : Tuple , ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_layers
_UpperCAmelCase = num_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = window_size
_UpperCAmelCase = activation_function
_UpperCAmelCase = resid_dropout
_UpperCAmelCase = embed_dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = classifier_dropout
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_range
_UpperCAmelCase = use_cache
_UpperCAmelCase = bos_token_id
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = attention_types
_UpperCAmelCase = self.expand_attention_types_params(lowerCamelCase )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.attention_layers)` == `config.num_layers` """
f"""but is `len(config.attention_layers) = {len(self.attention_layers )}`, """
f"""`config.num_layers = {self.num_layers}`. """
"""`config.attention_layers` is prepared using `config.attention_types`. """
"""Please verify the value of `config.attention_types` argument.""" )
super().__init__(bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
@staticmethod
def lowerCamelCase ( lowerCamelCase : Any ) -> int:
"""simple docstring"""
_UpperCAmelCase = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case , __snake_case ) -> Dict:
import torch
_UpperCAmelCase = input.size()
_UpperCAmelCase = len(__snake_case )
_UpperCAmelCase = shape[dimension]
_UpperCAmelCase = torch.arange(0 , __snake_case , __snake_case )
_UpperCAmelCase = torch.div(sizedim - size , __snake_case , rounding_mode="""floor""" ) + 1
_UpperCAmelCase = torch.arange(__snake_case ) + low_indices[:min_length][:, None]
_UpperCAmelCase = [slice(__snake_case )] * rank
_UpperCAmelCase = indices
_UpperCAmelCase = input[s]
_UpperCAmelCase = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(__snake_case )
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> Optional[int]:
import torch
_UpperCAmelCase = torch.arange(1 , __snake_case )
_UpperCAmelCase = torch.remainder(__snake_case , __snake_case )
_UpperCAmelCase = remainders == 0
_UpperCAmelCase = candidates[divisor_indices]
_UpperCAmelCase = torch.max(__snake_case )
return largest_divisor, torch.div(__snake_case , __snake_case , rounding_mode="""floor""" )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
@property
def lowerCamelCase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
_UpperCAmelCase = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase , direction="""inputs""" )
_UpperCAmelCase = {0: """batch""", 1: """past_sequence + sequence"""}
else:
_UpperCAmelCase = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowerCamelCase ( self : Any ) -> int:
"""simple docstring"""
return self._config.num_heads
def lowerCamelCase ( self : Optional[int] , lowerCamelCase : PreTrainedTokenizer , lowerCamelCase : int = -1 , lowerCamelCase : int = -1 , lowerCamelCase : bool = False , lowerCamelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
_UpperCAmelCase = super(lowerCamelCase , self ).generate_dummy_inputs(
lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
# We need to order the input in the way they appears in the forward()
_UpperCAmelCase = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
_UpperCAmelCase , _UpperCAmelCase = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
_UpperCAmelCase = seqlen + 2
_UpperCAmelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_UpperCAmelCase = [
(torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) for _ in range(self.num_layers )
]
_UpperCAmelCase = common_inputs["""attention_mask"""]
if self.use_past:
_UpperCAmelCase = ordered_inputs["""attention_mask"""].dtype
_UpperCAmelCase = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(lowerCamelCase , lowerCamelCase , dtype=lowerCamelCase )] , dim=1 )
return ordered_inputs
@property
def lowerCamelCase ( self : str ) -> int:
"""simple docstring"""
return 13 | 108 | __SCREAMING_SNAKE_CASE : Union[str, Any] = {
'a': 'AAAAA',
'b': 'AAAAB',
'c': 'AAABA',
'd': 'AAABB',
'e': 'AABAA',
'f': 'AABAB',
'g': 'AABBA',
'h': 'AABBB',
'i': 'ABAAA',
'j': 'BBBAA',
'k': 'ABAAB',
'l': 'ABABA',
'm': 'ABABB',
'n': 'ABBAA',
'o': 'ABBAB',
'p': 'ABBBA',
'q': 'ABBBB',
'r': 'BAAAA',
's': 'BAAAB',
't': 'BAABA',
'u': 'BAABB',
'v': 'BBBAB',
'w': 'BABAA',
'x': 'BABAB',
'y': 'BABBA',
'z': 'BABBB',
' ': ' ',
}
__SCREAMING_SNAKE_CASE : int = {value: key for key, value in encode_dict.items()}
def snake_case (__lowercase ) -> str:
'''simple docstring'''
_snake_case : Any = ""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("encode() accepts only letters of the alphabet and spaces" )
return encoded
def snake_case (__lowercase ) -> str:
'''simple docstring'''
if set(__lowercase ) - {"A", "B", " "} != set():
raise Exception("decode() accepts only 'A', 'B' and spaces" )
_snake_case : str = ""
for word in coded.split():
while len(__lowercase ) != 0:
decoded += decode_dict[word[:5]]
_snake_case : int = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod() | 670 | 0 |
'''simple docstring'''
from __future__ import annotations
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(__UpperCAmelCase ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(__UpperCAmelCase ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109 | import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowercase_ ( unittest.TestCase ):
def UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def UpperCamelCase ( self ):
_snake_case ,_snake_case : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-2" , revision="bf16" , dtype=jnp.bfloataa , )
_snake_case : List[Any] = "A painting of a squirrel eating a burger"
_snake_case : Union[str, Any] = jax.device_count()
_snake_case : List[Any] = num_samples * [prompt]
_snake_case : Tuple = sd_pipe.prepare_inputs(lowercase_ )
_snake_case : str = replicate(lowercase_ )
_snake_case : Dict = shard(lowercase_ )
_snake_case : List[Any] = jax.random.PRNGKey(0 )
_snake_case : List[Any] = jax.random.split(lowercase_ , jax.device_count() )
_snake_case : Tuple = sd_pipe(lowercase_ , lowercase_ , lowercase_ , num_inference_steps=25 , jit=lowercase_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
_snake_case : List[Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_snake_case : str = images[0, 253:256, 253:256, -1]
_snake_case : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_snake_case : Optional[Any] = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def UpperCamelCase ( self ):
_snake_case : Optional[Any] = "stabilityai/stable-diffusion-2"
_snake_case ,_snake_case : List[Any] = FlaxDPMSolverMultistepScheduler.from_pretrained(lowercase_ , subfolder="scheduler" )
_snake_case ,_snake_case : int = FlaxStableDiffusionPipeline.from_pretrained(
lowercase_ , scheduler=lowercase_ , revision="bf16" , dtype=jnp.bfloataa , )
_snake_case : str = scheduler_params
_snake_case : Dict = "A painting of a squirrel eating a burger"
_snake_case : Dict = jax.device_count()
_snake_case : Optional[int] = num_samples * [prompt]
_snake_case : List[str] = sd_pipe.prepare_inputs(lowercase_ )
_snake_case : Optional[int] = replicate(lowercase_ )
_snake_case : Union[str, Any] = shard(lowercase_ )
_snake_case : List[Any] = jax.random.PRNGKey(0 )
_snake_case : Union[str, Any] = jax.random.split(lowercase_ , jax.device_count() )
_snake_case : str = sd_pipe(lowercase_ , lowercase_ , lowercase_ , num_inference_steps=25 , jit=lowercase_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
_snake_case : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_snake_case : List[str] = images[0, 253:256, 253:256, -1]
_snake_case : Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_snake_case : Dict = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 | 670 | 0 |
'''simple docstring'''
import string
import numpy
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> int:
return b if a == 0 else greatest_common_divisor(b % a ,__lowercase )
class UpperCAmelCase :
"""simple docstring"""
A__ : List[Any] = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
A__ : Dict = numpy.vectorize(lambda a_ : x % 36 )
A__ : str = numpy.vectorize(__snake_case )
def __init__( self , _snake_case ) -> Union[str, Any]:
_UpperCamelCase : Union[str, Any] = self.modulus(lowercase_ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
_UpperCamelCase : Any = encrypt_key.shape[0]
def _lowercase ( self , _snake_case ) -> List[Any]:
return self.key_string.index(lowercase_ )
def _lowercase ( self , _snake_case ) -> Union[str, Any]:
return self.key_string[round(lowercase_ )]
def _lowercase ( self ) -> Tuple:
_UpperCamelCase : int = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
_UpperCamelCase : Tuple = det % len(self.key_string )
_UpperCamelCase : Tuple = len(self.key_string )
if greatest_common_divisor(lowercase_ , len(self.key_string ) ) != 1:
_UpperCamelCase : Optional[int] = (
F'''determinant modular {req_l} of encryption key({det}) '''
F'''is not co prime w.r.t {req_l}.\nTry another key.'''
)
raise ValueError(lowercase_ )
def _lowercase ( self , _snake_case ) -> Tuple:
_UpperCamelCase : Union[str, Any] = [char for char in text.upper() if char in self.key_string]
_UpperCamelCase : Dict = chars[-1]
while len(lowercase_ ) % self.break_key != 0:
chars.append(lowercase_ )
return "".join(lowercase_ )
def _lowercase ( self , _snake_case ) -> List[str]:
_UpperCamelCase : str = self.process_text(text.upper() )
_UpperCamelCase : Tuple = ""
for i in range(0 , len(lowercase_ ) - self.break_key + 1 , self.break_key ):
_UpperCamelCase : Optional[int] = text[i : i + self.break_key]
_UpperCamelCase : Union[str, Any] = [self.replace_letters(lowercase_ ) for char in batch]
_UpperCamelCase : int = numpy.array([vec] ).T
_UpperCamelCase : List[Any] = self.modulus(self.encrypt_key.dot(lowercase_ ) ).T.tolist()[
0
]
_UpperCamelCase : str = "".join(
self.replace_digits(lowercase_ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def _lowercase ( self ) -> Tuple:
_UpperCamelCase : Any = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
_UpperCamelCase : int = det % len(self.key_string )
_UpperCamelCase : List[str] = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
_UpperCamelCase : Optional[Any] = i
break
_UpperCamelCase : int = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(lowercase_ ) )
def _lowercase ( self , _snake_case ) -> Any:
_UpperCamelCase : Union[str, Any] = self.make_decrypt_key()
_UpperCamelCase : Union[str, Any] = self.process_text(text.upper() )
_UpperCamelCase : str = ""
for i in range(0 , len(lowercase_ ) - self.break_key + 1 , self.break_key ):
_UpperCamelCase : int = text[i : i + self.break_key]
_UpperCamelCase : str = [self.replace_letters(lowercase_ ) for char in batch]
_UpperCamelCase : Dict = numpy.array([vec] ).T
_UpperCamelCase : int = self.modulus(decrypt_key.dot(lowercase_ ) ).T.tolist()[0]
_UpperCamelCase : Dict = "".join(
self.replace_digits(lowercase_ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def snake_case__ ( ) -> None:
_UpperCamelCase : Any = int(input('''Enter the order of the encryption key: ''' ) )
_UpperCamelCase : Optional[Any] = []
print('''Enter each row of the encryption key with space separated integers''' )
for _ in range(__lowercase ):
_UpperCamelCase : Tuple = [int(__lowercase ) for x in input().split()]
hill_matrix.append(__lowercase )
_UpperCamelCase : Tuple = HillCipher(numpy.array(__lowercase ) )
print('''Would you like to encrypt or decrypt some text? (1 or 2)''' )
_UpperCamelCase : Tuple = input('''\n1. Encrypt\n2. Decrypt\n''' )
if option == "1":
_UpperCamelCase : int = input('''What text would you like to encrypt?: ''' )
print('''Your encrypted text is:''' )
print(hc.encrypt(__lowercase ) )
elif option == "2":
_UpperCamelCase : Optional[Any] = input('''What text would you like to decrypt?: ''' )
print('''Your decrypted text is:''' )
print(hc.decrypt(__lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 683 | from manim import *
class lowercase_ ( __snake_case ):
def UpperCamelCase ( self ):
_snake_case : Tuple = Rectangle(height=0.5 , width=0.5 )
_snake_case : List[str] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_snake_case : List[str] = [mem.copy() for i in range(6 )]
_snake_case : Any = [mem.copy() for i in range(6 )]
_snake_case : Any = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : Any = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : str = VGroup(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : int = Text("CPU" , font_size=24 )
_snake_case : str = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowercase_ )
_snake_case : int = [mem.copy() for i in range(4 )]
_snake_case : Dict = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : str = Text("GPU" , font_size=24 )
_snake_case : Optional[int] = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
gpu.move_to([-1, -1, 0] )
self.add(lowercase_ )
_snake_case : Any = [mem.copy() for i in range(6 )]
_snake_case : Any = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : Dict = Text("Model" , font_size=24 )
_snake_case : Dict = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
model.move_to([3, -1.0, 0] )
self.add(lowercase_ )
_snake_case : str = []
for i, rect in enumerate(lowercase_ ):
rect.set_stroke(lowercase_ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_snake_case : Union[str, Any] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowercase_ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowercase_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowercase_ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase_ , buff=0.0 )
self.add(lowercase_ )
cpu_targs.append(lowercase_ )
_snake_case : List[Any] = [mem.copy() for i in range(6 )]
_snake_case : Union[str, Any] = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : Optional[Any] = Text("Loaded Checkpoint" , font_size=24 )
_snake_case : Union[str, Any] = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , aligned_edge=lowercase_ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
_snake_case : Optional[int] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_snake_case : Optional[Any] = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowercase_ , lowercase_ )
_snake_case : Union[str, Any] = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(lowercase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
_snake_case : List[Any] = MarkupText(
f"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowercase_ ) , Write(lowercase_ ) )
self.play(Write(lowercase_ , run_time=1 ) , Create(lowercase_ , run_time=1 ) )
_snake_case : int = []
_snake_case : str = []
for i, rect in enumerate(lowercase_ ):
_snake_case : Dict = fill.copy().set_fill(lowercase_ , opacity=0.7 )
target.move_to(lowercase_ )
first_animations.append(GrowFromCenter(lowercase_ , run_time=1 ) )
_snake_case : Dict = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(lowercase_ , run_time=1.5 ) )
self.play(*lowercase_ )
self.play(*lowercase_ )
self.wait() | 670 | 0 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
A : int = transforms.Compose(
[
transforms.Resize((2_5_6, 2_5_6)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if isinstance(__lowercase , torch.Tensor ):
return image
elif isinstance(__lowercase , PIL.Image.Image ):
__lowerCAmelCase = [image]
__lowerCAmelCase = [trans(img.convert("RGB" ) ) for img in image]
__lowerCAmelCase = torch.stack(__lowercase )
return image
class _UpperCamelCase ( __snake_case ):
'''simple docstring'''
def __init__( self , __a , __a ):
super().__init__()
# make sure scheduler can always be converted to DDIM
__lowerCAmelCase = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowercase_ , scheduler=lowercase_ )
def snake_case ( self , __a ):
if strength < 0 or strength > 1:
raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}" )
def snake_case ( self , __a , __a , __a ):
# get the original timestep using init_timestep
__lowerCAmelCase = min(int(num_inference_steps * strength ) , lowercase_ )
__lowerCAmelCase = max(num_inference_steps - init_timestep , 0 )
__lowerCAmelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def snake_case ( self , __a , __a , __a , __a , __a , __a=None ):
if not isinstance(lowercase_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowercase_ )}" )
__lowerCAmelCase = image.to(device=lowercase_ , dtype=lowercase_ )
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
__lowerCAmelCase = init_latents.shape
__lowerCAmelCase = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ )
# get latents
print("add noise to latents at timestep" , lowercase_ )
__lowerCAmelCase = self.scheduler.add_noise(lowercase_ , lowercase_ , lowercase_ )
__lowerCAmelCase = init_latents
return latents
@torch.no_grad()
def __call__( self , __a = None , __a = 0.8 , __a = 1 , __a = None , __a = 0.0 , __a = 50 , __a = None , __a = "pil" , __a = True , ):
self.check_inputs(lowercase_ )
# 2. Preprocess image
__lowerCAmelCase = preprocess(lowercase_ )
# 3. set timesteps
self.scheduler.set_timesteps(lowercase_ , device=self.device )
__lowerCAmelCase = self.get_timesteps(lowercase_ , lowercase_ , self.device )
__lowerCAmelCase = timesteps[:1].repeat(lowercase_ )
# 4. Prepare latent variables
__lowerCAmelCase = self.prepare_latents(lowercase_ , lowercase_ , lowercase_ , self.unet.dtype , self.device , lowercase_ )
__lowerCAmelCase = latents
# 5. Denoising loop
for t in self.progress_bar(lowercase_ ):
# 1. predict noise model_output
__lowerCAmelCase = self.unet(lowercase_ , lowercase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__lowerCAmelCase = self.scheduler.step(
lowercase_ , lowercase_ , lowercase_ , eta=lowercase_ , use_clipped_model_output=lowercase_ , generator=lowercase_ , ).prev_sample
__lowerCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
__lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowerCAmelCase = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=lowercase_ )
| 636 | import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
class lowercase_ ( __snake_case ):
_lowerCamelCase = 'linear'
_lowerCamelCase = 'cosine'
_lowerCamelCase = 'cosine_with_restarts'
_lowerCamelCase = 'polynomial'
_lowerCamelCase = 'constant'
_lowerCamelCase = 'constant_with_warmup'
_lowerCamelCase = 'piecewise_constant'
def snake_case (__lowercase , __lowercase = -1 ) -> List[Any]:
'''simple docstring'''
return LambdaLR(__lowercase , lambda __lowercase : 1 , last_epoch=__lowercase )
def snake_case (__lowercase , __lowercase , __lowercase = -1 ) -> List[str]:
'''simple docstring'''
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1.0 , __lowercase ) )
return 1.0
return LambdaLR(__lowercase , __lowercase , last_epoch=__lowercase )
def snake_case (__lowercase , __lowercase , __lowercase = -1 ) -> Optional[int]:
'''simple docstring'''
_snake_case : Optional[Any] = {}
_snake_case : Optional[int] = step_rules.split("," )
for rule_str in rule_list[:-1]:
_snake_case ,_snake_case : str = rule_str.split(":" )
_snake_case : Dict = int(__lowercase )
_snake_case : List[str] = float(__lowercase )
_snake_case : Tuple = value
_snake_case : str = float(rule_list[-1] )
def create_rules_function(__lowercase , __lowercase ):
def rule_func(__lowercase ) -> float:
_snake_case : List[str] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__lowercase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
_snake_case : int = create_rules_function(__lowercase , __lowercase )
return LambdaLR(__lowercase , __lowercase , last_epoch=__lowercase )
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase=-1 ) -> List[str]:
'''simple docstring'''
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1 , __lowercase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__lowercase , __lowercase , __lowercase )
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase = 0.5 , __lowercase = -1 ) -> Dict:
'''simple docstring'''
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1 , __lowercase ) )
_snake_case : Optional[int] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__lowercase ) * 2.0 * progress )) )
return LambdaLR(__lowercase , __lowercase , __lowercase )
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase = 1 , __lowercase = -1 ) -> Optional[int]:
'''simple docstring'''
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1 , __lowercase ) )
_snake_case : Any = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__lowercase ) * progress) % 1.0) )) )
return LambdaLR(__lowercase , __lowercase , __lowercase )
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase=1e-7 , __lowercase=1.0 , __lowercase=-1 ) -> List[Any]:
'''simple docstring'''
_snake_case : List[Any] = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1 , __lowercase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
_snake_case : Tuple = lr_init - lr_end
_snake_case : Any = num_training_steps - num_warmup_steps
_snake_case : Optional[int] = 1 - (current_step - num_warmup_steps) / decay_steps
_snake_case : Optional[Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__lowercase , __lowercase , __lowercase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def snake_case (__lowercase , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = 1 , __lowercase = 1.0 , __lowercase = -1 , ) -> List[Any]:
'''simple docstring'''
_snake_case : Any = SchedulerType(__lowercase )
_snake_case : Union[str, Any] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__lowercase , last_epoch=__lowercase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__lowercase , step_rules=__lowercase , last_epoch=__lowercase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__lowercase , num_warmup_steps=__lowercase , last_epoch=__lowercase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__lowercase , num_warmup_steps=__lowercase , num_training_steps=__lowercase , num_cycles=__lowercase , last_epoch=__lowercase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__lowercase , num_warmup_steps=__lowercase , num_training_steps=__lowercase , power=__lowercase , last_epoch=__lowercase , )
return schedule_func(
__lowercase , num_warmup_steps=__lowercase , num_training_steps=__lowercase , last_epoch=__lowercase ) | 670 | 0 |
import warnings
from ..trainer import Trainer
from ..utils import logging
lowerCAmelCase = logging.get_logger(__name__)
class lowerCamelCase ( __snake_case ):
def __init__( self , lowercase__=None , **lowercase__):
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''' , lowercase_ , )
super().__init__(args=lowercase_ , **lowercase_)
| 462 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : int = {
'weiweishi/roc-bert-base-zh': 'https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json',
}
class lowercase_ ( __snake_case ):
_lowerCamelCase = 'roc_bert'
def __init__( self , lowercase_=30_522 , lowercase_=768 , lowercase_=12 , lowercase_=12 , lowercase_=3_072 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=512 , lowercase_=2 , lowercase_=0.02 , lowercase_=1e-12 , lowercase_=True , lowercase_=0 , lowercase_="absolute" , lowercase_=None , lowercase_=True , lowercase_=True , lowercase_=768 , lowercase_=910 , lowercase_=512 , lowercase_=24_858 , lowercase_=True , **lowercase_ , ):
_snake_case : int = vocab_size
_snake_case : Union[str, Any] = max_position_embeddings
_snake_case : Union[str, Any] = hidden_size
_snake_case : Dict = num_hidden_layers
_snake_case : Any = num_attention_heads
_snake_case : Dict = intermediate_size
_snake_case : List[Any] = hidden_act
_snake_case : Optional[int] = hidden_dropout_prob
_snake_case : Union[str, Any] = attention_probs_dropout_prob
_snake_case : Union[str, Any] = initializer_range
_snake_case : List[Any] = type_vocab_size
_snake_case : int = layer_norm_eps
_snake_case : Optional[Any] = use_cache
_snake_case : List[Any] = enable_pronunciation
_snake_case : Dict = enable_shape
_snake_case : Dict = pronunciation_embed_dim
_snake_case : Tuple = pronunciation_vocab_size
_snake_case : Tuple = shape_embed_dim
_snake_case : List[str] = shape_vocab_size
_snake_case : Dict = concat_input
_snake_case : int = position_embedding_type
_snake_case : int = classifier_dropout
super().__init__(pad_token_id=lowercase_ , **lowercase_ ) | 670 | 0 |
"""simple docstring"""
a : str = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def lowercase__(A , A , A , A ) ->List[str]:
"""simple docstring"""
lowercase__ : Union[str, Any]= [False] * len(__lowercase )
lowercase__ : Any= [s]
lowercase__ : List[Any]= True
while queue:
lowercase__ : Optional[Any]= queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__lowercase )
lowercase__ : Dict= True
lowercase__ : Dict= u
return visited[t]
def lowercase__(A , A , A ) ->int:
"""simple docstring"""
lowercase__ : Union[str, Any]= [-1] * (len(__lowercase ))
lowercase__ : Any= 0
lowercase__ : Any= []
lowercase__ : List[Any]= [i[:] for i in graph] # Record original cut, copy.
while bfs(__lowercase , __lowercase , __lowercase , __lowercase ):
lowercase__ : Dict= float("Inf" )
lowercase__ : Any= sink
while s != source:
# Find the minimum value in select path
lowercase__ : Union[str, Any]= min(__lowercase , graph[parent[s]][s] )
lowercase__ : Union[str, Any]= parent[s]
max_flow += path_flow
lowercase__ : List[Any]= sink
while v != source:
lowercase__ : Union[str, Any]= parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowercase__ : List[str]= parent[v]
for i in range(len(__lowercase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 218 | from cva import destroyAllWindows, imread, imshow, waitKey
def snake_case (__lowercase ) -> Tuple:
'''simple docstring'''
_snake_case ,_snake_case : int = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(__lowercase ):
for j in range(__lowercase ):
_snake_case : Optional[Any] = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
__SCREAMING_SNAKE_CASE : Optional[Any] = imread('image_data/lena.jpg', 1)
# convert to its negative
__SCREAMING_SNAKE_CASE : Tuple = convert_to_negative(img)
# show result image
imshow('negative of original image', img)
waitKey(0)
destroyAllWindows() | 670 | 0 |
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
lowerCamelCase_ = HUGGINGFACE_HUB_CACHE
lowerCamelCase_ = 'config.json'
lowerCamelCase_ = 'diffusion_pytorch_model.bin'
lowerCamelCase_ = 'diffusion_flax_model.msgpack'
lowerCamelCase_ = 'model.onnx'
lowerCamelCase_ = 'diffusion_pytorch_model.safetensors'
lowerCamelCase_ = 'weights.pb'
lowerCamelCase_ = 'https://huggingface.co'
lowerCamelCase_ = default_cache_path
lowerCamelCase_ = 'diffusers_modules'
lowerCamelCase_ = os.getenv('HF_MODULES_CACHE', os.path.join(hf_cache_home, 'modules'))
lowerCamelCase_ = ['fp16', 'non-ema']
lowerCamelCase_ = '.self_attn'
| 418 | import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
__SCREAMING_SNAKE_CASE : List[str] = Mapping[str, np.ndarray]
__SCREAMING_SNAKE_CASE : List[Any] = Mapping[str, Any] # Is a nested dict.
__SCREAMING_SNAKE_CASE : List[Any] = 0.01
@dataclasses.dataclass(frozen=__snake_case )
class lowercase_ :
_lowerCamelCase = 42 # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
_lowerCamelCase = 42 # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
_lowerCamelCase = 42 # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
_lowerCamelCase = 42 # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
_lowerCamelCase = 42 # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
_lowerCamelCase = None
# Optional remark about the protein. Included as a comment in output PDB
# files
_lowerCamelCase = None
# Templates used to generate this protein (prediction-only)
_lowerCamelCase = None
# Chain corresponding to each parent
_lowerCamelCase = None
def snake_case (__lowercase ) -> Protein:
'''simple docstring'''
_snake_case : str = r"(\[[A-Z]+\]\n)"
_snake_case : List[str] = [tag.strip() for tag in re.split(__lowercase , __lowercase ) if len(__lowercase ) > 0]
_snake_case : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split("\n" ) for l in tags[1::2]] )
_snake_case : List[str] = ["N", "CA", "C"]
_snake_case : Any = None
_snake_case : Union[str, Any] = None
_snake_case : Optional[int] = None
for g in groups:
if "[PRIMARY]" == g[0]:
_snake_case : Tuple = g[1][0].strip()
for i in range(len(__lowercase ) ):
if seq[i] not in residue_constants.restypes:
_snake_case : Tuple = "X" # FIXME: strings are immutable
_snake_case : int = np.array(
[residue_constants.restype_order.get(__lowercase , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
_snake_case : List[List[float]] = []
for axis in range(3 ):
tertiary.append(list(map(__lowercase , g[1][axis].split() ) ) )
_snake_case : Dict = np.array(__lowercase )
_snake_case : Dict = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(__lowercase ):
_snake_case : List[Any] = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
_snake_case : int = np.array(list(map({"-": 0, "+": 1}.get , g[1][0].strip() ) ) )
_snake_case : Any = np.zeros(
(
len(__lowercase ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(__lowercase ):
_snake_case : Dict = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=__lowercase , atom_mask=__lowercase , aatype=__lowercase , residue_index=np.arange(len(__lowercase ) ) , b_factors=__lowercase , )
def snake_case (__lowercase , __lowercase = 0 ) -> List[str]:
'''simple docstring'''
_snake_case : List[str] = []
_snake_case : Optional[Any] = prot.remark
if remark is not None:
pdb_headers.append(F"""REMARK {remark}""" )
_snake_case : str = prot.parents
_snake_case : str = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
_snake_case : int = [p for i, p in zip(__lowercase , __lowercase ) if i == chain_id]
if parents is None or len(__lowercase ) == 0:
_snake_case : Optional[int] = ["N/A"]
pdb_headers.append(F"""PARENT {' '.join(__lowercase )}""" )
return pdb_headers
def snake_case (__lowercase , __lowercase ) -> str:
'''simple docstring'''
_snake_case : List[str] = []
_snake_case : Optional[int] = pdb_str.split("\n" )
_snake_case : List[str] = prot.remark
if remark is not None:
out_pdb_lines.append(F"""REMARK {remark}""" )
_snake_case : List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
_snake_case : str = []
if prot.parents_chain_index is not None:
_snake_case : Dict[str, List[str]] = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(__lowercase ) , [] )
parent_dict[str(__lowercase )].append(__lowercase )
_snake_case : Any = max([int(__lowercase ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
_snake_case : Tuple = parent_dict.get(str(__lowercase ) , ["N/A"] )
parents_per_chain.append(__lowercase )
else:
parents_per_chain.append(list(prot.parents ) )
else:
_snake_case : List[str] = [["N/A"]]
def make_parent_line(__lowercase ) -> str:
return F"""PARENT {' '.join(__lowercase )}"""
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
_snake_case : int = 0
for i, l in enumerate(__lowercase ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(__lowercase )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(__lowercase ):
_snake_case : Tuple = parents_per_chain[chain_counter]
else:
_snake_case : str = ["N/A"]
out_pdb_lines.append(make_parent_line(__lowercase ) )
return "\n".join(__lowercase )
def snake_case (__lowercase ) -> str:
'''simple docstring'''
_snake_case : Optional[Any] = residue_constants.restypes + ["X"]
def res_atoa(__lowercase ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , "UNK" )
_snake_case : Optional[int] = residue_constants.atom_types
_snake_case : List[str] = []
_snake_case : Tuple = prot.atom_mask
_snake_case : List[str] = prot.aatype
_snake_case : int = prot.atom_positions
_snake_case : int = prot.residue_index.astype(np.intaa )
_snake_case : List[Any] = prot.b_factors
_snake_case : str = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("Invalid aatypes." )
_snake_case : Union[str, Any] = get_pdb_headers(__lowercase )
if len(__lowercase ) > 0:
pdb_lines.extend(__lowercase )
_snake_case : Optional[Any] = aatype.shape[0]
_snake_case : str = 1
_snake_case : Tuple = 0
_snake_case : int = string.ascii_uppercase
_snake_case : Optional[Any] = None
# Add all atom sites.
for i in range(__lowercase ):
_snake_case : Dict = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(__lowercase , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
_snake_case : List[Any] = "ATOM"
_snake_case : Union[str, Any] = atom_name if len(__lowercase ) == 4 else F""" {atom_name}"""
_snake_case : str = ""
_snake_case : str = ""
_snake_case : Any = 1.00
_snake_case : str = atom_name[0] # Protein supports only C, N, O, S, this works.
_snake_case : Dict = ""
_snake_case : Any = "A"
if chain_index is not None:
_snake_case : List[Any] = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
_snake_case : Optional[int] = (
F"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"""
F"""{res_name_a:>3} {chain_tag:>1}"""
F"""{residue_index[i]:>4}{insertion_code:>1} """
F"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"""
F"""{occupancy:>6.2f}{b_factor:>6.2f} """
F"""{element:>2}{charge:>2}"""
)
pdb_lines.append(__lowercase )
atom_index += 1
_snake_case : Dict = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
_snake_case : Optional[int] = True
_snake_case : Union[str, Any] = chain_index[i + 1]
if should_terminate:
# Close the chain.
_snake_case : List[str] = "TER"
_snake_case : str = (
F"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"""
)
pdb_lines.append(__lowercase )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(__lowercase , __lowercase ) )
pdb_lines.append("END" )
pdb_lines.append("" )
return "\n".join(__lowercase )
def snake_case (__lowercase ) -> np.ndarray:
'''simple docstring'''
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def snake_case (__lowercase , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , ) -> Protein:
'''simple docstring'''
return Protein(
aatype=features["aatype"] , atom_positions=result["final_atom_positions"] , atom_mask=result["final_atom_mask"] , residue_index=features["residue_index"] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["final_atom_mask"] ) , chain_index=__lowercase , remark=__lowercase , parents=__lowercase , parents_chain_index=__lowercase , ) | 670 | 0 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__A = logging.get_logger(__name__)
__A = OrderedDict(
[
('''align''', '''EfficientNetImageProcessor'''),
('''beit''', '''BeitImageProcessor'''),
('''bit''', '''BitImageProcessor'''),
('''blip''', '''BlipImageProcessor'''),
('''blip-2''', '''BlipImageProcessor'''),
('''bridgetower''', '''BridgeTowerImageProcessor'''),
('''chinese_clip''', '''ChineseCLIPImageProcessor'''),
('''clip''', '''CLIPImageProcessor'''),
('''clipseg''', '''ViTImageProcessor'''),
('''conditional_detr''', '''ConditionalDetrImageProcessor'''),
('''convnext''', '''ConvNextImageProcessor'''),
('''convnextv2''', '''ConvNextImageProcessor'''),
('''cvt''', '''ConvNextImageProcessor'''),
('''data2vec-vision''', '''BeitImageProcessor'''),
('''deformable_detr''', '''DeformableDetrImageProcessor'''),
('''deit''', '''DeiTImageProcessor'''),
('''deta''', '''DetaImageProcessor'''),
('''detr''', '''DetrImageProcessor'''),
('''dinat''', '''ViTImageProcessor'''),
('''donut-swin''', '''DonutImageProcessor'''),
('''dpt''', '''DPTImageProcessor'''),
('''efficientformer''', '''EfficientFormerImageProcessor'''),
('''efficientnet''', '''EfficientNetImageProcessor'''),
('''flava''', '''FlavaImageProcessor'''),
('''focalnet''', '''BitImageProcessor'''),
('''git''', '''CLIPImageProcessor'''),
('''glpn''', '''GLPNImageProcessor'''),
('''groupvit''', '''CLIPImageProcessor'''),
('''imagegpt''', '''ImageGPTImageProcessor'''),
('''instructblip''', '''BlipImageProcessor'''),
('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''),
('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''),
('''levit''', '''LevitImageProcessor'''),
('''mask2former''', '''Mask2FormerImageProcessor'''),
('''maskformer''', '''MaskFormerImageProcessor'''),
('''mgp-str''', '''ViTImageProcessor'''),
('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''),
('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevitv2''', '''MobileViTImageProcessor'''),
('''nat''', '''ViTImageProcessor'''),
('''oneformer''', '''OneFormerImageProcessor'''),
('''owlvit''', '''OwlViTImageProcessor'''),
('''perceiver''', '''PerceiverImageProcessor'''),
('''pix2struct''', '''Pix2StructImageProcessor'''),
('''poolformer''', '''PoolFormerImageProcessor'''),
('''regnet''', '''ConvNextImageProcessor'''),
('''resnet''', '''ConvNextImageProcessor'''),
('''sam''', '''SamImageProcessor'''),
('''segformer''', '''SegformerImageProcessor'''),
('''swiftformer''', '''ViTImageProcessor'''),
('''swin''', '''ViTImageProcessor'''),
('''swin2sr''', '''Swin2SRImageProcessor'''),
('''swinv2''', '''ViTImageProcessor'''),
('''table-transformer''', '''DetrImageProcessor'''),
('''timesformer''', '''VideoMAEImageProcessor'''),
('''tvlt''', '''TvltImageProcessor'''),
('''upernet''', '''SegformerImageProcessor'''),
('''van''', '''ConvNextImageProcessor'''),
('''videomae''', '''VideoMAEImageProcessor'''),
('''vilt''', '''ViltImageProcessor'''),
('''vit''', '''ViTImageProcessor'''),
('''vit_hybrid''', '''ViTHybridImageProcessor'''),
('''vit_mae''', '''ViTImageProcessor'''),
('''vit_msn''', '''ViTImageProcessor'''),
('''xclip''', '''CLIPImageProcessor'''),
('''yolos''', '''YolosImageProcessor'''),
]
)
__A = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def __a ( lowerCAmelCase_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
UpperCAmelCase_= model_type_to_module_name(__lowercase )
UpperCAmelCase_= importlib.import_module(F""".{module_name}""" ,"""transformers.models""" )
try:
return getattr(__lowercase ,__lowercase )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(__lowercase ,"""__name__""" ,__lowercase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
UpperCAmelCase_= importlib.import_module("""transformers""" )
if hasattr(__lowercase ,__lowercase ):
return getattr(__lowercase ,__lowercase )
return None
def __a ( lowerCAmelCase_ : Optional[int] ,lowerCAmelCase_ : Dict = None ,lowerCAmelCase_ : str = False ,lowerCAmelCase_ : Dict = False ,lowerCAmelCase_ : Optional[Any] = None ,lowerCAmelCase_ : Optional[Any] = None ,lowerCAmelCase_ : str = None ,lowerCAmelCase_ : int = False ,**lowerCAmelCase_ : Dict ,) -> Any:
'''simple docstring'''
UpperCAmelCase_= get_file_from_repo(
__lowercase ,__lowercase ,cache_dir=__lowercase ,force_download=__lowercase ,resume_download=__lowercase ,proxies=__lowercase ,use_auth_token=__lowercase ,revision=__lowercase ,local_files_only=__lowercase ,)
if resolved_config_file is None:
logger.info(
"""Could not locate the image processor configuration file, will try to use the model config instead.""" )
return {}
with open(__lowercase ,encoding="""utf-8""" ) as reader:
return json.load(__lowercase )
class lowercase :
"""simple docstring"""
def __init__( self : str ) -> Union[str, Any]:
raise EnvironmentError(
"""AutoImageProcessor is designed to be instantiated """
"""using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(lowercase_ )
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : str ) -> Dict:
UpperCAmelCase_= kwargs.pop("""config""" , lowercase_ )
UpperCAmelCase_= kwargs.pop("""trust_remote_code""" , lowercase_ )
UpperCAmelCase_= True
UpperCAmelCase_= ImageProcessingMixin.get_image_processor_dict(lowercase_ , **lowercase_ )
UpperCAmelCase_= config_dict.get("""image_processor_type""" , lowercase_ )
UpperCAmelCase_= None
if "AutoImageProcessor" in config_dict.get("""auto_map""" , {} ):
UpperCAmelCase_= config_dict["auto_map"]["AutoImageProcessor"]
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
UpperCAmelCase_= config_dict.pop("""feature_extractor_type""" , lowercase_ )
if feature_extractor_class is not None:
logger.warning(
"""Could not find image processor class in the image processor config or the model config. Loading"""
""" based on pattern matching with the model's feature extractor configuration.""" )
UpperCAmelCase_= feature_extractor_class.replace("""FeatureExtractor""" , """ImageProcessor""" )
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ):
UpperCAmelCase_= config_dict["auto_map"]["AutoFeatureExtractor"]
UpperCAmelCase_= feature_extractor_auto_map.replace("""FeatureExtractor""" , """ImageProcessor""" )
logger.warning(
"""Could not find image processor auto map in the image processor config or the model config."""
""" Loading based on pattern matching with the model's feature extractor configuration.""" )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_= AutoConfig.from_pretrained(lowercase_ , **lowercase_ )
# It could be in `config.image_processor_type``
UpperCAmelCase_= getattr(lowercase_ , """image_processor_type""" , lowercase_ )
if hasattr(lowercase_ , """auto_map""" ) and "AutoImageProcessor" in config.auto_map:
UpperCAmelCase_= config.auto_map["AutoImageProcessor"]
if image_processor_class is not None:
UpperCAmelCase_= image_processor_class_from_name(lowercase_ )
UpperCAmelCase_= image_processor_auto_map is not None
UpperCAmelCase_= image_processor_class is not None or type(lowercase_ ) in IMAGE_PROCESSOR_MAPPING
UpperCAmelCase_= resolve_trust_remote_code(
lowercase_ , lowercase_ , lowercase_ , lowercase_ )
if has_remote_code and trust_remote_code:
UpperCAmelCase_= get_class_from_dynamic_module(
lowercase_ , lowercase_ , **lowercase_ )
UpperCAmelCase_= kwargs.pop("""code_revision""" , lowercase_ )
if os.path.isdir(lowercase_ ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(lowercase_ , **lowercase_ )
elif image_processor_class is not None:
return image_processor_class.from_dict(lowercase_ , **lowercase_ )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(lowercase_ ) in IMAGE_PROCESSOR_MAPPING:
UpperCAmelCase_= IMAGE_PROCESSOR_MAPPING[type(lowercase_ )]
return image_processor_class.from_dict(lowercase_ , **lowercase_ )
raise ValueError(
F"""Unrecognized image processor in {pretrained_model_name_or_path}. Should have a """
F"""`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def _SCREAMING_SNAKE_CASE ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] ) -> Tuple:
IMAGE_PROCESSOR_MAPPING.register(lowercase_ , lowercase_ )
| 593 | from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class lowercase_ ( __snake_case ):
_lowerCamelCase = ['image_processor']
_lowerCamelCase = 'SamImageProcessor'
def __init__( self , lowercase_ ):
super().__init__(lowercase_ )
_snake_case : Optional[Any] = self.image_processor
_snake_case : Tuple = -10
_snake_case : str = self.image_processor.size["longest_edge"]
def __call__( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_ = None , **lowercase_ , ):
_snake_case : List[Any] = self.image_processor(
lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
# pop arguments that are not used in the foward but used nevertheless
_snake_case : Any = encoding_image_processor["original_sizes"]
if hasattr(lowercase_ , "numpy" ): # Checks if Torch or TF tensor
_snake_case : int = original_sizes.numpy()
_snake_case ,_snake_case ,_snake_case : Union[str, Any] = self._check_and_preprocess_points(
input_points=lowercase_ , input_labels=lowercase_ , input_boxes=lowercase_ , )
_snake_case : Dict = self._normalize_and_convert(
lowercase_ , lowercase_ , input_points=lowercase_ , input_labels=lowercase_ , input_boxes=lowercase_ , return_tensors=lowercase_ , )
return encoding_image_processor
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_="pt" , ):
if input_points is not None:
if len(lowercase_ ) != len(lowercase_ ):
_snake_case : int = [
self._normalize_coordinates(self.target_size , lowercase_ , original_sizes[0] ) for point in input_points
]
else:
_snake_case : Dict = [
self._normalize_coordinates(self.target_size , lowercase_ , lowercase_ )
for point, original_size in zip(lowercase_ , lowercase_ )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
_snake_case ,_snake_case : int = self._pad_points_and_labels(lowercase_ , lowercase_ )
_snake_case : Any = np.array(lowercase_ )
if input_labels is not None:
_snake_case : Optional[Any] = np.array(lowercase_ )
if input_boxes is not None:
if len(lowercase_ ) != len(lowercase_ ):
_snake_case : Optional[Any] = [
self._normalize_coordinates(self.target_size , lowercase_ , original_sizes[0] , is_bounding_box=lowercase_ )
for box in input_boxes
]
else:
_snake_case : List[str] = [
self._normalize_coordinates(self.target_size , lowercase_ , lowercase_ , is_bounding_box=lowercase_ )
for box, original_size in zip(lowercase_ , lowercase_ )
]
_snake_case : Tuple = np.array(lowercase_ )
if input_boxes is not None:
if return_tensors == "pt":
_snake_case : List[str] = torch.from_numpy(lowercase_ )
# boxes batch size of 1 by default
_snake_case : Optional[Any] = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
_snake_case : List[str] = tf.convert_to_tensor(lowercase_ )
# boxes batch size of 1 by default
_snake_case : Optional[int] = tf.expand_dims(lowercase_ , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"input_boxes": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
_snake_case : Tuple = torch.from_numpy(lowercase_ )
# point batch size of 1 by default
_snake_case : int = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
_snake_case : List[str] = tf.convert_to_tensor(lowercase_ )
# point batch size of 1 by default
_snake_case : Tuple = tf.expand_dims(lowercase_ , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"input_points": input_points} )
if input_labels is not None:
if return_tensors == "pt":
_snake_case : Dict = torch.from_numpy(lowercase_ )
# point batch size of 1 by default
_snake_case : str = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
_snake_case : Optional[Any] = tf.convert_to_tensor(lowercase_ )
# point batch size of 1 by default
_snake_case : List[Any] = tf.expand_dims(lowercase_ , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"input_labels": input_labels} )
return encoding_image_processor
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
_snake_case : List[Any] = max([point.shape[0] for point in input_points] )
_snake_case : List[str] = []
for i, point in enumerate(lowercase_ ):
if point.shape[0] != expected_nb_points:
_snake_case : Optional[Any] = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
_snake_case : Union[str, Any] = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(lowercase_ )
_snake_case : Optional[Any] = processed_input_points
return input_points, input_labels
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=False ):
_snake_case ,_snake_case : Optional[int] = original_size
_snake_case ,_snake_case : List[str] = self.image_processor._get_preprocess_shape(lowercase_ , longest_edge=lowercase_ )
_snake_case : Optional[Any] = deepcopy(lowercase_ ).astype(lowercase_ )
if is_bounding_box:
_snake_case : str = coords.reshape(-1 , 2 , 2 )
_snake_case : Optional[Any] = coords[..., 0] * (new_w / old_w)
_snake_case : Dict = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
_snake_case : Optional[Any] = coords.reshape(-1 , 4 )
return coords
def UpperCamelCase ( self , lowercase_=None , lowercase_=None , lowercase_=None , ):
if input_points is not None:
if hasattr(lowercase_ , "numpy" ): # Checks for TF or Torch tensor
_snake_case : Union[str, Any] = input_points.numpy().tolist()
if not isinstance(lowercase_ , lowercase_ ) or not isinstance(input_points[0] , lowercase_ ):
raise ValueError("Input points must be a list of list of floating points." )
_snake_case : Any = [np.array(lowercase_ ) for input_point in input_points]
else:
_snake_case : Optional[int] = None
if input_labels is not None:
if hasattr(lowercase_ , "numpy" ):
_snake_case : Tuple = input_labels.numpy().tolist()
if not isinstance(lowercase_ , lowercase_ ) or not isinstance(input_labels[0] , lowercase_ ):
raise ValueError("Input labels must be a list of list integers." )
_snake_case : Tuple = [np.array(lowercase_ ) for label in input_labels]
else:
_snake_case : Optional[Any] = None
if input_boxes is not None:
if hasattr(lowercase_ , "numpy" ):
_snake_case : List[str] = input_boxes.numpy().tolist()
if (
not isinstance(lowercase_ , lowercase_ )
or not isinstance(input_boxes[0] , lowercase_ )
or not isinstance(input_boxes[0][0] , lowercase_ )
):
raise ValueError("Input boxes must be a list of list of list of floating points." )
_snake_case : List[Any] = [np.array(lowercase_ ).astype(np.floataa ) for box in input_boxes]
else:
_snake_case : Optional[int] = None
return input_points, input_labels, input_boxes
@property
def UpperCamelCase ( self ):
_snake_case : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(lowercase_ ) )
def UpperCamelCase ( self , *lowercase_ , **lowercase_ ):
return self.image_processor.post_process_masks(*lowercase_ , **lowercase_ ) | 670 | 0 |
A_ = frozenset(
[
"prompt",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
A_ = frozenset(["prompt", "negative_prompt"])
A_ = frozenset([])
A_ = frozenset(["image"])
A_ = frozenset(
[
"image",
"height",
"width",
"guidance_scale",
]
)
A_ = frozenset(["image"])
A_ = frozenset(
[
"prompt",
"image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
A_ = frozenset(["prompt", "image", "negative_prompt"])
A_ = frozenset(
[
# Text guided image variation with an image mask
"prompt",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
A_ = frozenset(["prompt", "image", "mask_image", "negative_prompt"])
A_ = frozenset(
[
# image variation with an image mask
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
A_ = frozenset(["image", "mask_image"])
A_ = frozenset(
[
"example_image",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
A_ = frozenset(["example_image", "image", "mask_image"])
A_ = frozenset(["class_labels"])
A_ = frozenset(["class_labels"])
A_ = frozenset(["batch_size"])
A_ = frozenset([])
A_ = frozenset(["batch_size"])
A_ = frozenset([])
A_ = frozenset(
[
"prompt",
"audio_length_in_s",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
A_ = frozenset(["prompt", "negative_prompt"])
A_ = frozenset(["input_tokens"])
A_ = frozenset(["input_tokens"])
| 393 | def snake_case (__lowercase ) -> int:
'''simple docstring'''
if not grid or not grid[0]:
raise TypeError("The grid does not contain the appropriate information" )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
_snake_case : Union[str, Any] = grid[0]
for row_n in range(1 , len(__lowercase ) ):
_snake_case : Union[str, Any] = grid[row_n]
_snake_case : List[Any] = fill_row(__lowercase , __lowercase )
_snake_case : List[Any] = grid[row_n]
return grid[-1][-1]
def snake_case (__lowercase , __lowercase ) -> list:
'''simple docstring'''
current_row[0] += row_above[0]
for cell_n in range(1 , len(__lowercase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod() | 670 | 0 |
'''simple docstring'''
import math
import random
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase = False )-> float:
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
_A: int = 0.02
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase )-> float:
__UpperCAmelCase = float(2 * (random.randint(1 , 1_00 )) - 1 )
for _ in range(__lowercase ):
# Forward propagation
__UpperCAmelCase = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
__UpperCAmelCase = (expected / 1_00) - layer_a
# Error delta
__UpperCAmelCase = layer_1_error * sigmoid_function(__lowercase , __lowercase )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 1_00
if __name__ == "__main__":
import doctest
doctest.testmod()
_A: Union[str, Any] = int(input("""Expected value: """))
_A: Optional[Any] = int(input("""Number of propagations: """))
print(forward_propagation(expected, number_propagations))
| 126 | import random
def snake_case (__lowercase , __lowercase ) -> tuple:
'''simple docstring'''
_snake_case ,_snake_case ,_snake_case : List[Any] = [], [], []
for element in data:
if element < pivot:
less.append(__lowercase )
elif element > pivot:
greater.append(__lowercase )
else:
equal.append(__lowercase )
return less, equal, greater
def snake_case (__lowercase , __lowercase ) -> List[Any]:
'''simple docstring'''
if index >= len(__lowercase ) or index < 0:
return None
_snake_case : Any = items[random.randint(0 , len(__lowercase ) - 1 )]
_snake_case : Tuple = 0
_snake_case ,_snake_case ,_snake_case : Tuple = _partition(__lowercase , __lowercase )
_snake_case : Tuple = len(__lowercase )
_snake_case : List[str] = len(__lowercase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(__lowercase , __lowercase )
# must be in larger
else:
return quick_select(__lowercase , index - (m + count) ) | 670 | 0 |
"""simple docstring"""
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class __magic_name__ ( unittest.TestCase ):
def lowerCAmelCase ( self : Tuple , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : Optional[int] ):
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for a, b in zip(lowercase_ , lowercase_ ):
self.assertAlmostEqual(lowercase_ , lowercase_ , delta=lowercase_ )
def lowerCAmelCase ( self : Optional[int] ):
__snake_case = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(lowercase_ ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 )
def lowerCAmelCase ( self : str ):
__snake_case = None
ops.enable_eager_execution_internal()
__snake_case = tf.config.list_physical_devices("CPU" )
if len(lowercase_ ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
__snake_case = tf.config.list_logical_devices(device_type="CPU" )
__snake_case = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
__snake_case = GradientAccumulator()
__snake_case = tf.Variable([4.0, 3.0] )
__snake_case = create_optimizer(5e-5 , 10 , 5 )
__snake_case = tf.Variable([0.0, 0.0] , trainable=lowercase_ )
def accumulate_on_replica(snake_case_ : int ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(snake_case_ : Any , snake_case_ : Any ):
with strategy.scope():
__snake_case = strategy.experimental_local_results(lowercase_ )
local_variables[0].assign(lowercase_ )
local_variables[1].assign(lowercase_ )
strategy.run(lowercase_ , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(lowercase_ )
def _check_local_values(snake_case_ : List[str] , snake_case_ : Dict ):
__snake_case = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , lowercase_ , tol=1e-2 )
self.assertListAlmostEqual(values[1].value() , lowercase_ , tol=1e-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 163 | from math import pow, sqrt
def snake_case (*__lowercase ) -> bool:
'''simple docstring'''
_snake_case : str = len(__lowercase ) > 0 and all(value > 0.0 for value in values )
return result
def snake_case (__lowercase , __lowercase ) -> float | ValueError:
'''simple docstring'''
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__lowercase , __lowercase )
else ValueError("Input Error: Molar mass values must greater than 0." )
)
def snake_case (__lowercase , __lowercase , __lowercase ) -> float | ValueError:
'''simple docstring'''
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__lowercase , __lowercase , __lowercase )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def snake_case (__lowercase , __lowercase , __lowercase ) -> float | ValueError:
'''simple docstring'''
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__lowercase , __lowercase , __lowercase )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def snake_case (__lowercase , __lowercase , __lowercase ) -> float | ValueError:
'''simple docstring'''
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(__lowercase , __lowercase , __lowercase )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def snake_case (__lowercase , __lowercase , __lowercase ) -> float | ValueError:
'''simple docstring'''
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(__lowercase , __lowercase , __lowercase )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
) | 670 | 0 |
def _lowerCAmelCase ( _lowerCAmelCase ):
'''simple docstring'''
return 1_0 - x * x
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
if equation(__lowercase ) * equation(__lowercase ) >= 0:
raise ValueError("""Wrong space!""" )
A_ : List[Any] = a
while (b - a) >= 0.01:
# Find middle point
A_ : str = (a + b) / 2
# Check if middle point is root
if equation(__lowercase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(__lowercase ) * equation(__lowercase ) < 0:
A_ : Tuple = c
else:
A_ : Tuple = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 569 | import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
__SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
class lowercase_ ( __snake_case ):
def __init__( self , *lowercase_ , **lowercase_ ):
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ ) | 670 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ : int ={'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict =[
'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FocalNetForImageClassification',
'FocalNetForMaskedImageModeling',
'FocalNetBackbone',
'FocalNetModel',
'FocalNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
A_ : str =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 483 | from __future__ import annotations
from typing import TypedDict
class lowercase_ ( __snake_case ):
_lowerCamelCase = 42
_lowerCamelCase = 42
def snake_case (__lowercase ) -> list[str]:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise TypeError("The parameter s type must be str." )
return [s[i:] + s[:i] for i in range(len(__lowercase ) )]
def snake_case (__lowercase ) -> BWTTransformDict:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise TypeError("The parameter s type must be str." )
if not s:
raise ValueError("The parameter s must not be empty." )
_snake_case : List[str] = all_rotations(__lowercase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_snake_case : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__lowercase ),
}
return response
def snake_case (__lowercase , __lowercase ) -> str:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise TypeError("The parameter bwt_string type must be str." )
if not bwt_string:
raise ValueError("The parameter bwt_string must not be empty." )
try:
_snake_case : Union[str, Any] = int(__lowercase )
except ValueError:
raise TypeError(
"The parameter idx_original_string type must be int or passive"
" of cast to int." )
if idx_original_string < 0:
raise ValueError("The parameter idx_original_string must not be lower than 0." )
if idx_original_string >= len(__lowercase ):
raise ValueError(
"The parameter idx_original_string must be lower than" " len(bwt_string)." )
_snake_case : Optional[Any] = [""] * len(__lowercase )
for _ in range(len(__lowercase ) ):
for i in range(len(__lowercase ) ):
_snake_case : Tuple = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[Any] = 'Provide a string that I will generate its BWT transform: '
__SCREAMING_SNAKE_CASE : Optional[Any] = input(entry_msg).strip()
__SCREAMING_SNAKE_CASE : int = bwt_transform(s)
print(
F'''Burrows Wheeler transform for string \'{s}\' results '''
F'''in \'{result['bwt_string']}\''''
)
__SCREAMING_SNAKE_CASE : List[str] = reverse_bwt(result['bwt_string'], result['idx_original_string'])
print(
F'''Reversing Burrows Wheeler transform for entry \'{result['bwt_string']}\' '''
F'''we get original string \'{original_string}\''''
) | 670 | 0 |
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
_UpperCAmelCase : List[str] = 3
def snake_case__ ( UpperCamelCase ) -> int:
print('''Generating primitive root of p''' )
while True:
_UpperCamelCase : Optional[Any] = random.randrange(3 ,__lowercase )
if pow(__lowercase ,2 ,__lowercase ) == 1:
continue
if pow(__lowercase ,__lowercase ,__lowercase ) == 1:
continue
return g
def snake_case__ ( UpperCamelCase ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print('''Generating prime p...''' )
_UpperCamelCase : List[str] = rabin_miller.generate_large_prime(__lowercase ) # select large prime number.
_UpperCamelCase : List[str] = primitive_root(__lowercase ) # one primitive root on modulo p.
_UpperCamelCase : Any = random.randrange(3 ,__lowercase ) # private_key -> have to be greater than 2 for safety.
_UpperCamelCase : List[Any] = cryptomath.find_mod_inverse(pow(__lowercase ,__lowercase ,__lowercase ) ,__lowercase )
_UpperCamelCase : Tuple = (key_size, e_a, e_a, p)
_UpperCamelCase : Union[str, Any] = (key_size, d)
return public_key, private_key
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> None:
if os.path.exists(f'''{name}_pubkey.txt''' ) or os.path.exists(f'''{name}_privkey.txt''' ):
print('''\nWARNING:''' )
print(
f'''\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n'''
'''Use a different name or delete these files and re-run this program.''' )
sys.exit()
_UpperCamelCase : Union[str, Any] = generate_key(__lowercase )
print(f'''\nWriting public key to file {name}_pubkey.txt...''' )
with open(f'''{name}_pubkey.txt''' ,'''w''' ) as fo:
fo.write(f'''{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}''' )
print(f'''Writing private key to file {name}_privkey.txt...''' )
with open(f'''{name}_privkey.txt''' ,'''w''' ) as fo:
fo.write(f'''{private_key[0]},{private_key[1]}''' )
def snake_case__ ( ) -> None:
print('''Making key files...''' )
make_key_files('''elgamal''' ,20_48 )
print('''Key files generation successful''' )
if __name__ == "__main__":
main()
| 683 | # NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
) | 670 | 0 |
"""simple docstring"""
import re
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return [char.split() for char in re.split(R"[^ a-z A-Z 0-9 \s]" , str_ )]
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = split_input(str_ )
return "".join(
["".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
try:
__lowerCAmelCase = split_input(__lowercase )
if upper:
__lowerCAmelCase = "".join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
__lowerCAmelCase = "".join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return to_simple_case(__lowercase )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
try:
__lowerCAmelCase = to_simple_case(__lowercase )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
return to_complex_case(__lowercase , __lowercase , "_" )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
return to_complex_case(__lowercase , __lowercase , "-" )
if __name__ == "__main__":
__import__("doctest").testmod()
| 636 | from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class lowercase_ :
_lowerCamelCase = LEDConfig
_lowerCamelCase = {}
_lowerCamelCase = 'gelu'
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=7 , lowercase_=True , lowercase_=False , lowercase_=99 , lowercase_=32 , lowercase_=2 , lowercase_=4 , lowercase_=37 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=20 , lowercase_=2 , lowercase_=1 , lowercase_=0 , lowercase_=4 , ):
_snake_case : Optional[int] = parent
_snake_case : str = batch_size
_snake_case : int = seq_length
_snake_case : Dict = is_training
_snake_case : Optional[Any] = use_labels
_snake_case : Tuple = vocab_size
_snake_case : str = hidden_size
_snake_case : int = num_hidden_layers
_snake_case : Union[str, Any] = num_attention_heads
_snake_case : int = intermediate_size
_snake_case : List[str] = hidden_dropout_prob
_snake_case : List[Any] = attention_probs_dropout_prob
_snake_case : int = max_position_embeddings
_snake_case : Union[str, Any] = eos_token_id
_snake_case : str = pad_token_id
_snake_case : Any = bos_token_id
_snake_case : str = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_snake_case : List[Any] = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_snake_case : List[str] = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def UpperCamelCase ( self ):
_snake_case : str = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_snake_case : Any = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_snake_case : Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 )
_snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case : List[str] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_snake_case : Optional[Any] = prepare_led_inputs_dict(lowercase_ , lowercase_ , lowercase_ )
_snake_case : int = tf.concat(
[tf.zeros_like(lowercase_ )[:, :-1], tf.ones_like(lowercase_ )[:, -1:]] , axis=-1 , )
_snake_case : List[Any] = global_attention_mask
return config, inputs_dict
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
_snake_case : Dict = TFLEDModel(config=lowercase_ ).get_decoder()
_snake_case : Optional[Any] = inputs_dict["input_ids"]
_snake_case : Optional[int] = input_ids[:1, :]
_snake_case : int = inputs_dict["attention_mask"][:1, :]
_snake_case : int = 1
# first forward pass
_snake_case : str = model(lowercase_ , attention_mask=lowercase_ , use_cache=lowercase_ )
_snake_case ,_snake_case : Optional[int] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_snake_case : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case : List[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_snake_case : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 )
_snake_case : List[str] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_snake_case : str = model(lowercase_ , attention_mask=lowercase_ )[0]
_snake_case : List[str] = model(lowercase_ , attention_mask=lowercase_ , past_key_values=lowercase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_snake_case : Optional[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_snake_case : List[str] = output_from_no_past[:, -3:, random_slice_idx]
_snake_case : List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase_ , lowercase_ , rtol=1e-3 )
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , ) -> List[Any]:
'''simple docstring'''
if attention_mask is None:
_snake_case : int = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_snake_case : Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_snake_case : Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_snake_case : Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class lowercase_ ( __snake_case , __snake_case , unittest.TestCase ):
_lowerCamelCase = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_lowerCamelCase = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_lowerCamelCase = (
{
'conversational': TFLEDForConditionalGeneration,
'feature-extraction': TFLEDModel,
'summarization': TFLEDForConditionalGeneration,
'text2text-generation': TFLEDForConditionalGeneration,
'translation': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def UpperCamelCase ( self ):
_snake_case : Optional[Any] = TFLEDModelTester(self )
_snake_case : List[Any] = ConfigTester(self , config_class=lowercase_ )
def UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase ( self ):
_snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase_ )
def UpperCamelCase ( self ):
_snake_case ,_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Tuple = tf.zeros_like(inputs_dict["attention_mask"] )
_snake_case : Tuple = 2
_snake_case : Dict = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
_snake_case : Tuple = True
_snake_case : Union[str, Any] = self.model_tester.seq_length
_snake_case : Union[str, Any] = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowercase_ ):
_snake_case : Optional[Any] = outputs.decoder_attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(lowercase_ ):
_snake_case : int = [t.numpy() for t in outputs.encoder_attentions]
_snake_case : Optional[int] = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_snake_case : Union[str, Any] = True
_snake_case : Dict = False
_snake_case : Any = False
_snake_case : Any = model_class(lowercase_ )
_snake_case : Union[str, Any] = model(self._prepare_for_class(lowercase_ , lowercase_ ) )
_snake_case : Tuple = len(lowercase_ )
self.assertEqual(config.output_hidden_states , lowercase_ )
check_encoder_attentions_output(lowercase_ )
if self.is_encoder_decoder:
_snake_case : int = model_class(lowercase_ )
_snake_case : Union[str, Any] = model(self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(config.output_hidden_states , lowercase_ )
check_decoder_attentions_output(lowercase_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_snake_case : List[Any] = True
_snake_case : Any = model_class(lowercase_ )
_snake_case : Optional[Any] = model(self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(config.output_hidden_states , lowercase_ )
check_encoder_attentions_output(lowercase_ )
# Check attention is always last and order is fine
_snake_case : Optional[int] = True
_snake_case : Optional[int] = True
_snake_case : List[Any] = model_class(lowercase_ )
_snake_case : Union[str, Any] = model(self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase_ ) )
self.assertEqual(model.config.output_hidden_states , lowercase_ )
check_encoder_attentions_output(lowercase_ )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
# TODO: Head-masking not yet implement
pass
def snake_case (__lowercase ) -> Optional[Any]:
'''simple docstring'''
return tf.constant(__lowercase , dtype=tf.intaa )
__SCREAMING_SNAKE_CASE : List[Any] = 1E-4
@slow
@require_tf
class lowercase_ ( unittest.TestCase ):
def UpperCamelCase ( self ):
_snake_case : Dict = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
_snake_case : Union[str, Any] = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Optional[int] = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Union[str, Any] = prepare_led_inputs_dict(model.config , lowercase_ , lowercase_ )
_snake_case : Optional[Any] = model(**lowercase_ )[0]
_snake_case : str = (1, 1_024, 768)
self.assertEqual(output.shape , lowercase_ )
# change to expected output here
_snake_case : Optional[Any] = tf.convert_to_tensor(
[[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase_ , atol=1e-3 )
def UpperCamelCase ( self ):
_snake_case : List[Any] = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
_snake_case : int = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : int = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Optional[Any] = prepare_led_inputs_dict(model.config , lowercase_ , lowercase_ )
_snake_case : Tuple = model(**lowercase_ )[0]
_snake_case : Any = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , lowercase_ )
# change to expected output here
_snake_case : Optional[int] = tf.convert_to_tensor(
[[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase_ , atol=1e-3 , rtol=1e-3 ) | 670 | 0 |
from graphs.minimum_spanning_tree_kruskal import kruskal
def __SCREAMING_SNAKE_CASE ( ) -> int:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = 9
__UpperCAmelCase : Optional[int] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
__UpperCAmelCase : Optional[int] = kruskal(__lowercase , __lowercase )
__UpperCAmelCase : Dict = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(__lowercase ) == sorted(__lowercase )
| 462 | import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class lowercase_ ( __snake_case , unittest.TestCase ):
_lowerCamelCase = ReformerTokenizer
_lowerCamelCase = ReformerTokenizerFast
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = True
def UpperCamelCase ( self ):
super().setUp()
_snake_case : Union[str, Any] = ReformerTokenizer(lowercase_ , keep_accents=lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self ):
_snake_case : int = "<s>"
_snake_case : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(lowercase_ ) , 1_000 )
def UpperCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def UpperCamelCase ( self ):
if not self.test_rust_tokenizer:
return
_snake_case : Tuple = self.get_tokenizer()
_snake_case : List[str] = self.get_rust_tokenizer()
_snake_case : int = "I was born in 92000, and this is falsé."
_snake_case : Tuple = tokenizer.tokenize(lowercase_ )
_snake_case : List[Any] = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_snake_case : str = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
_snake_case : Tuple = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_snake_case : Dict = self.get_rust_tokenizer()
_snake_case : List[Any] = tokenizer.encode(lowercase_ )
_snake_case : str = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def UpperCamelCase ( self , lowercase_=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_snake_case : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
# Simple input
_snake_case : List[str] = "This is a simple input"
_snake_case : Optional[Any] = ["This is a simple input 1", "This is a simple input 2"]
_snake_case : Union[str, Any] = ("This is a simple input", "This is a pair")
_snake_case : int = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(lowercase_ , tokenizer_r.encode , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Simple input
self.assertRaises(lowercase_ , tokenizer_r.encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Simple input
self.assertRaises(
lowercase_ , tokenizer_r.batch_encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" , )
# Pair input
self.assertRaises(lowercase_ , tokenizer_r.encode , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Pair input
self.assertRaises(lowercase_ , tokenizer_r.encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Pair input
self.assertRaises(
lowercase_ , tokenizer_r.batch_encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" , )
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
_snake_case : Dict = ReformerTokenizer(lowercase_ , keep_accents=lowercase_ )
_snake_case : Tuple = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowercase_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ) , [285, 46, 10, 170, 382] , )
_snake_case : str = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_snake_case : Any = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_snake_case : List[Any] = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def UpperCamelCase ( self ):
return ReformerTokenizer.from_pretrained("google/reformer-crime-and-punishment" )
@slow
def UpperCamelCase ( self ):
_snake_case : int = "Hello World!"
_snake_case : Dict = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@slow
def UpperCamelCase ( self ):
_snake_case : Optional[int] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
_snake_case : Dict = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@require_torch
@slow
def UpperCamelCase ( self ):
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
_snake_case : str = list(self.big_tokenizer.get_vocab().keys() )[:10]
_snake_case : str = " ".join(lowercase_ )
_snake_case : Tuple = self.big_tokenizer.encode_plus(lowercase_ , return_tensors="pt" )
_snake_case : Tuple = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="pt" )
_snake_case : int = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
_snake_case : Union[str, Any] = encoded_sequence["input_ids"].shape
_snake_case : List[str] = ReformerModel(lowercase_ )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowercase_ )
model(**lowercase_ )
@slow
def UpperCamelCase ( self ):
# fmt: off
_snake_case : Union[str, Any] = {"input_ids": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
_snake_case : Tuple = [
"This is a very simple sentence.",
"The quick brown fox jumps over the lazy dog.",
]
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name="google/reformer-crime-and-punishment" , revision="0e6c3decb8211d49bf881013425dc8b0448b3f5a" , padding=lowercase_ , sequences=lowercase_ , ) | 670 | 0 |
"""simple docstring"""
def lowercase__(A ) ->list:
"""simple docstring"""
def merge(A , A ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(__lowercase ) <= 1:
return collection
lowercase__ : Union[str, Any]= len(__lowercase ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
a : int = input("""Enter numbers separated by a comma:\n""").strip()
a : Any = [int(item) for item in user_input.split(""",""")]
print(*merge_sort(unsorted), sep=""",""")
| 218 | import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class lowercase_ ( unittest.TestCase ):
def UpperCamelCase ( self ):
_snake_case : Any = tempfile.mkdtemp()
# fmt: off
_snake_case : Optional[Any] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
_snake_case : Dict = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
_snake_case : Dict = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
_snake_case : Optional[int] = {"unk_token": "<unk>"}
_snake_case : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_snake_case : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowercase_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowercase_ ) )
_snake_case : Any = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
_snake_case : Optional[Any] = os.path.join(self.tmpdirname , lowercase_ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(lowercase_ , lowercase_ )
def UpperCamelCase ( self , **lowercase_ ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCamelCase ( self , **lowercase_ ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCamelCase ( self , **lowercase_ ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self ):
_snake_case : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_snake_case : Union[str, Any] = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase ( self ):
_snake_case : Tuple = self.get_tokenizer()
_snake_case : Any = self.get_rust_tokenizer()
_snake_case : Optional[Any] = self.get_image_processor()
_snake_case : Any = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
processor_slow.save_pretrained(self.tmpdirname )
_snake_case : Optional[int] = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase_ )
_snake_case : List[Any] = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
processor_fast.save_pretrained(self.tmpdirname )
_snake_case : Optional[Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowercase_ )
self.assertIsInstance(processor_fast.tokenizer , lowercase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowercase_ )
self.assertIsInstance(processor_fast.image_processor , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : List[Any] = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_snake_case : List[Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_snake_case : Optional[Any] = self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0 )
_snake_case : Tuple = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowercase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : Union[str, Any] = self.get_image_processor()
_snake_case : Any = self.get_tokenizer()
_snake_case : int = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : Optional[int] = self.prepare_image_inputs()
_snake_case : Optional[Any] = image_processor(lowercase_ , return_tensors="np" )
_snake_case : str = processor(images=lowercase_ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase ( self ):
_snake_case : Optional[Any] = self.get_image_processor()
_snake_case : Any = self.get_tokenizer()
_snake_case : Dict = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : List[str] = "lower newer"
_snake_case : int = processor(text=lowercase_ )
_snake_case : str = tokenizer(lowercase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase ( self ):
_snake_case : List[Any] = self.get_image_processor()
_snake_case : int = self.get_tokenizer()
_snake_case : Tuple = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : List[Any] = "lower newer"
_snake_case : int = self.prepare_image_inputs()
_snake_case : Dict = processor(text=lowercase_ , images=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowercase_ ):
processor()
def UpperCamelCase ( self ):
_snake_case : Dict = self.get_image_processor()
_snake_case : List[str] = self.get_tokenizer()
_snake_case : Union[str, Any] = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : Optional[int] = self.prepare_image_inputs()
_snake_case : Dict = self.prepare_image_inputs()
_snake_case : List[Any] = processor(images=lowercase_ , visual_prompt=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "conditional_pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowercase_ ):
processor()
def UpperCamelCase ( self ):
_snake_case : Dict = self.get_image_processor()
_snake_case : List[Any] = self.get_tokenizer()
_snake_case : str = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_snake_case : Any = processor.batch_decode(lowercase_ )
_snake_case : Any = tokenizer.batch_decode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ ) | 670 | 0 |
'''simple docstring'''
import operator as op
def SCREAMING_SNAKE_CASE_ ( __A : List[str] ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = lambda __A , __A : int(x / y ) # noqa: E731 integer division operation
_SCREAMING_SNAKE_CASE = {
"^": op.pow,
"*": op.mul,
"/": div,
"+": op.add,
"-": op.sub,
} # operators & their respective operation
# print table header
print("Symbol".center(8 ) , "Action".center(12 ) , "Stack" , sep=" | " )
print("-" * (30 + len(__lowercase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(__lowercase ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ("push(" + x + ")").ljust(12 ) , ",".join(__lowercase ) , sep=" | " )
else:
_SCREAMING_SNAKE_CASE = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + b + ")").ljust(12 ) , ",".join(__lowercase ) , sep=" | " )
_SCREAMING_SNAKE_CASE = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + a + ")").ljust(12 ) , ",".join(__lowercase ) , sep=" | " )
stack.append(
str(opr[x](int(__lowercase ) , int(__lowercase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ("push(" + a + x + b + ")").ljust(12 ) , ",".join(__lowercase ) , sep=" | " , )
return int(stack[0] )
if __name__ == "__main__":
lowerCamelCase_ = input('\n\nEnter a Postfix Equation (space separated) = ').split(' ')
print('\n\tResult = ', solve(Postfix))
| 418 | from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
__SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__) # pylint: disable=invalid-name
def snake_case (__lowercase ) -> Any:
'''simple docstring'''
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(__lowercase ):
return ext
raise Exception(
F"""Unable to determine file format from file extension {path}. """
F"""Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}""" )
def snake_case (__lowercase ) -> Any:
'''simple docstring'''
_snake_case : int = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
_snake_case : List[Any] = try_infer_format_from_ext(args.input ) if args.format == "infer" else args.format
_snake_case : Optional[int] = PipelineDataFormat.from_str(
format=__lowercase , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(__lowercase , __lowercase )
class lowercase_ ( __snake_case ):
def __init__( self , lowercase_ , lowercase_ ):
_snake_case : str = nlp
_snake_case : str = reader
@staticmethod
def UpperCamelCase ( lowercase_ ):
_snake_case : Dict = parser.add_parser("run" , help="Run a pipeline through the CLI" )
run_parser.add_argument("--task" , choices=get_supported_tasks() , help="Task to run" )
run_parser.add_argument("--input" , type=lowercase_ , help="Path to the file to use for inference" )
run_parser.add_argument("--output" , type=lowercase_ , help="Path to the file that will be used post to write results." )
run_parser.add_argument("--model" , type=lowercase_ , help="Name or path to the model to instantiate." )
run_parser.add_argument("--config" , type=lowercase_ , help="Name or path to the model's config to instantiate." )
run_parser.add_argument(
"--tokenizer" , type=lowercase_ , help="Name of the tokenizer to use. (default: same as the model name)" )
run_parser.add_argument(
"--column" , type=lowercase_ , help="Name of the column to use as input. (For multi columns input as QA use column1,columns2)" , )
run_parser.add_argument(
"--format" , type=lowercase_ , default="infer" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="Input format to read from" , )
run_parser.add_argument(
"--device" , type=lowercase_ , default=-1 , help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" , )
run_parser.add_argument("--overwrite" , action="store_true" , help="Allow overwriting the output file." )
run_parser.set_defaults(func=lowercase_ )
def UpperCamelCase ( self ):
_snake_case ,_snake_case : Tuple = self._nlp, []
for entry in self._reader:
_snake_case : Optional[Any] = nlp(**lowercase_ ) if self._reader.is_multi_columns else nlp(lowercase_ )
if isinstance(lowercase_ , lowercase_ ):
outputs.append(lowercase_ )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
_snake_case : str = self._reader.save_binary(lowercase_ )
logger.warning(f"""Current pipeline requires output to be in binary format, saving at {binary_path}""" )
else:
self._reader.save(lowercase_ ) | 670 | 0 |
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
__A = logging.get_logger(__name__)
class lowercase ( __snake_case):
"""simple docstring"""
def __init__( self : Dict , __UpperCAmelCase : List[Any] ) -> int:
super().__init__()
UpperCAmelCase_= nn.ModuleList(lowercase_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str = None , __UpperCAmelCase : Any = None , __UpperCAmelCase : Optional[Any] = None , __UpperCAmelCase : Union[str, Any] = None , __UpperCAmelCase : Optional[int] = False , __UpperCAmelCase : Union[str, Any] = True , ) -> Dict:
for i, (image, scale, controlnet) in enumerate(zip(lowercase_ , lowercase_ , self.nets ) ):
UpperCAmelCase_= controlnet(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , )
# merge samples
if i == 0:
UpperCAmelCase_= down_samples, mid_sample
else:
UpperCAmelCase_= [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowercase_ , lowercase_ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def _SCREAMING_SNAKE_CASE ( self : List[str] , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str] = True , __UpperCAmelCase : Union[str, Any] = None , __UpperCAmelCase : int = False , __UpperCAmelCase : List[Any] = None , ) -> Union[str, Any]:
UpperCAmelCase_= 0
UpperCAmelCase_= save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowercase_ , is_main_process=lowercase_ , save_function=lowercase_ , safe_serialization=lowercase_ , variant=lowercase_ , )
idx += 1
UpperCAmelCase_= model_path_to_save + F"""_{idx}"""
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str , __UpperCAmelCase : str , **__UpperCAmelCase : Optional[Any] ) -> Optional[int]:
UpperCAmelCase_= 0
UpperCAmelCase_= []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
UpperCAmelCase_= pretrained_model_path
while os.path.isdir(lowercase_ ):
UpperCAmelCase_= ControlNetModel.from_pretrained(lowercase_ , **lowercase_ )
controlnets.append(lowercase_ )
idx += 1
UpperCAmelCase_= pretrained_model_path + F"""_{idx}"""
logger.info(F"""{len(lowercase_ )} controlnets loaded from {pretrained_model_path}.""" )
if len(lowercase_ ) == 0:
raise ValueError(
F"""No ControlNets found under {os.path.dirname(lowercase_ )}. Expected at least {pretrained_model_path + "_0"}.""" )
return cls(lowercase_ )
| 593 | import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
__SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
class lowercase_ ( __snake_case ):
def __init__( self , lowercase_ ):
super().__init__()
_snake_case : List[str] = nn.ModuleList(lowercase_ )
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = False , lowercase_ = True , ):
for i, (image, scale, controlnet) in enumerate(zip(lowercase_ , lowercase_ , self.nets ) ):
_snake_case ,_snake_case : Optional[int] = controlnet(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , )
# merge samples
if i == 0:
_snake_case ,_snake_case : Tuple = down_samples, mid_sample
else:
_snake_case : Tuple = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowercase_ , lowercase_ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def UpperCamelCase ( self , lowercase_ , lowercase_ = True , lowercase_ = None , lowercase_ = False , lowercase_ = None , ):
_snake_case : Tuple = 0
_snake_case : Dict = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowercase_ , is_main_process=lowercase_ , save_function=lowercase_ , safe_serialization=lowercase_ , variant=lowercase_ , )
idx += 1
_snake_case : int = model_path_to_save + f"""_{idx}"""
@classmethod
def UpperCamelCase ( cls , lowercase_ , **lowercase_ ):
_snake_case : List[str] = 0
_snake_case : Optional[Any] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_snake_case : Optional[Any] = pretrained_model_path
while os.path.isdir(lowercase_ ):
_snake_case : int = ControlNetModel.from_pretrained(lowercase_ , **lowercase_ )
controlnets.append(lowercase_ )
idx += 1
_snake_case : str = pretrained_model_path + f"""_{idx}"""
logger.info(f"""{len(lowercase_ )} controlnets loaded from {pretrained_model_path}.""" )
if len(lowercase_ ) == 0:
raise ValueError(
f"""No ControlNets found under {os.path.dirname(lowercase_ )}. Expected at least {pretrained_model_path + '_0'}.""" )
return cls(lowercase_ ) | 670 | 0 |
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def UpperCAmelCase ( UpperCAmelCase="" )-> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
return os.path.join(__lowercase ,str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = torch.rand(12 , dtype=torch.floataa ) - 0.5
SCREAMING_SNAKE_CASE_ = AgentAudio(lowercase_ )
SCREAMING_SNAKE_CASE_ = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowercase_ , agent_type.to_raw() , atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(lowercase_ ) )
# Ensure that the file contains the same value as the original tensor
SCREAMING_SNAKE_CASE_ = sf.read(lowercase_ )
self.assertTrue(torch.allclose(lowercase_ , torch.tensor(lowercase_ ) , atol=1e-4 ) )
def _lowercase ( self : Dict ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = torch.rand(12 , dtype=torch.floataa ) - 0.5
SCREAMING_SNAKE_CASE_ = get_new_path(suffix='''.wav''' )
sf.write(lowercase_ , lowercase_ , 16_000 )
SCREAMING_SNAKE_CASE_ = AgentAudio(lowercase_ )
self.assertTrue(torch.allclose(lowercase_ , agent_type.to_raw() , atol=1e-4 ) )
self.assertEqual(agent_type.to_string() , lowercase_ )
@require_vision
@require_torch
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = torch.randint(0 , 256 , (64, 64, 3) )
SCREAMING_SNAKE_CASE_ = AgentImage(lowercase_ )
SCREAMING_SNAKE_CASE_ = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowercase_ , agent_type._tensor , atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowercase_ ) )
def _lowercase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / "000000039769.png"
SCREAMING_SNAKE_CASE_ = Image.open(lowercase_ )
SCREAMING_SNAKE_CASE_ = AgentImage(lowercase_ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowercase_ ) )
def _lowercase ( self : Optional[int] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / "000000039769.png"
SCREAMING_SNAKE_CASE_ = Image.open(lowercase_ )
SCREAMING_SNAKE_CASE_ = AgentImage(lowercase_ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowercase_ ) )
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self : List[str] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = "Hey!"
SCREAMING_SNAKE_CASE_ = AgentText(lowercase_ )
self.assertEqual(lowercase_ , agent_type.to_string() )
self.assertEqual(lowercase_ , agent_type.to_raw() )
self.assertEqual(lowercase_ , lowercase_ )
| 393 | import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase_ ( __snake_case ):
_lowerCamelCase = ['image_processor', 'tokenizer']
_lowerCamelCase = 'CLIPImageProcessor'
_lowerCamelCase = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self , lowercase_=None , lowercase_=None , **lowercase_ ):
_snake_case : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowercase_ , )
_snake_case : Dict = kwargs.pop("feature_extractor" )
_snake_case : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowercase_ , lowercase_ )
def __call__( self , lowercase_=None , lowercase_=None , lowercase_=None , **lowercase_ ):
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
_snake_case : str = self.tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if images is not None:
_snake_case : List[str] = self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if text is not None and images is not None:
_snake_case : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase_ ) , tensor_type=lowercase_ )
def UpperCamelCase ( self , *lowercase_ , **lowercase_ ):
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def UpperCamelCase ( self , *lowercase_ , **lowercase_ ):
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def UpperCamelCase ( self ):
_snake_case : Any = self.tokenizer.model_input_names
_snake_case : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 670 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
_A: List[Any] = logging.get_logger(__name__)
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )-> Dict:
__UpperCAmelCase = UniSpeechSatForSequenceClassification.from_pretrained(__lowercase , config=__lowercase )
__UpperCAmelCase = downstream_dict["projector.weight"]
__UpperCAmelCase = downstream_dict["projector.bias"]
__UpperCAmelCase = downstream_dict["model.post_net.linear.weight"]
__UpperCAmelCase = downstream_dict["model.post_net.linear.bias"]
return model
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )-> Optional[int]:
__UpperCAmelCase = UniSpeechSatForAudioFrameClassification.from_pretrained(__lowercase , config=__lowercase )
__UpperCAmelCase = downstream_dict["model.linear.weight"]
__UpperCAmelCase = downstream_dict["model.linear.bias"]
return model
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )-> Tuple:
__UpperCAmelCase = UniSpeechSatForXVector.from_pretrained(__lowercase , config=__lowercase )
__UpperCAmelCase = downstream_dict["connector.weight"]
__UpperCAmelCase = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__UpperCAmelCase = downstream_dict[
F'model.framelevel_feature_extractor.module.{i}.kernel.weight'
]
__UpperCAmelCase = downstream_dict[F'model.framelevel_feature_extractor.module.{i}.kernel.bias']
__UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
__UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
__UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
__UpperCAmelCase = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
__UpperCAmelCase = downstream_dict["objective.W"]
return model
@torch.no_grad()
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )-> Any:
__UpperCAmelCase = torch.load(__lowercase , map_location='cpu' )
__UpperCAmelCase = checkpoint["Downstream"]
__UpperCAmelCase = UniSpeechSatConfig.from_pretrained(__lowercase )
__UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(
__lowercase , return_attention_mask=__lowercase , do_normalize=__lowercase )
__UpperCAmelCase = hf_config.architectures[0]
if arch.endswith('ForSequenceClassification' ):
__UpperCAmelCase = convert_classification(__lowercase , __lowercase , __lowercase )
elif arch.endswith('ForAudioFrameClassification' ):
__UpperCAmelCase = convert_diarization(__lowercase , __lowercase , __lowercase )
elif arch.endswith('ForXVector' ):
__UpperCAmelCase = convert_xvector(__lowercase , __lowercase , __lowercase )
else:
raise NotImplementedError(F'S3PRL weights conversion is not supported for {arch}' )
if hf_config.use_weighted_layer_sum:
__UpperCAmelCase = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(__lowercase )
hf_model.save_pretrained(__lowercase )
if __name__ == "__main__":
_A: List[str] = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model."""
)
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""")
parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""")
_A: Union[str, Any] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 126 | from __future__ import annotations
def snake_case (__lowercase , __lowercase , __lowercase ) -> dict[str, float]:
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance < 0:
raise ValueError("Resistance cannot be negative" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod() | 670 | 0 |
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False ) -> list[float]:
"""simple docstring"""
if radian_mode:
return [magnitude * cos(__lowercase ), magnitude * sin(__lowercase )]
return [magnitude * cos(radians(__lowercase ) ), magnitude * sin(radians(__lowercase ) )]
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 10**-1 ) -> bool:
"""simple docstring"""
__snake_case = cross(__lowercase , __lowercase )
__snake_case = sum(__lowercase )
return abs(__lowercase ) < eps
if __name__ == "__main__":
# Test to check if it works
_SCREAMING_SNAKE_CASE = array(
[
polar_force(718.4, 180 - 30),
polar_force(879.54, 45),
polar_force(100, -90),
]
)
_SCREAMING_SNAKE_CASE = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
_SCREAMING_SNAKE_CASE = array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
_SCREAMING_SNAKE_CASE = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
_SCREAMING_SNAKE_CASE = array([[0, -2_000], [0, -1_200], [0, 15_600], [0, -12_400]])
_SCREAMING_SNAKE_CASE = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 163 | import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def snake_case (*__lowercase ) -> Dict:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
_snake_case : Dict = list(__lowercase )
for i in range(len(__lowercase ) ):
_snake_case : List[str] = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def snake_case (__lowercase ) -> bool:
'''simple docstring'''
_snake_case : str = [
"CUDA out of memory.", # CUDA OOM
"cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.", # CUDNN SNAFU
"DefaultCPUAllocator: can't allocate memory", # CPU OOM
]
if isinstance(__lowercase , __lowercase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def snake_case (__lowercase = None , __lowercase = 128 ) -> Any:
'''simple docstring'''
if function is None:
return functools.partial(__lowercase , starting_batch_size=__lowercase )
_snake_case : List[str] = starting_batch_size
def decorator(*__lowercase , **__lowercase ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
_snake_case : Optional[Any] = list(inspect.signature(__lowercase ).parameters.keys() )
# Guard against user error
if len(__lowercase ) < (len(__lowercase ) + 1):
_snake_case : str = ", ".join([F"""{arg}={value}""" for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F"""Batch size was passed into `{function.__name__}` as the first argument when called."""
F"""Remove this as the decorator already does so: `{function.__name__}({arg_str})`""" )
while True:
if batch_size == 0:
raise RuntimeError("No executable batch size found, reached zero." )
try:
return function(__lowercase , *__lowercase , **__lowercase )
except Exception as e:
if should_reduce_batch_size(__lowercase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator | 670 | 0 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger("""transformers.models.speecht5""")
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
hf_model.apply_weight_norm()
A_ : int = checkpoint["input_conv.weight_g"]
A_ : Tuple = checkpoint["input_conv.weight_v"]
A_ : str = checkpoint["input_conv.bias"]
for i in range(len(config.upsample_rates ) ):
A_ : Optional[Any] = checkpoint[f"""upsamples.{i}.1.weight_g"""]
A_ : Optional[Any] = checkpoint[f"""upsamples.{i}.1.weight_v"""]
A_ : Dict = checkpoint[f"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
A_ : List[str] = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_g"""]
A_ : Optional[Any] = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_v"""]
A_ : Dict = checkpoint[f"""blocks.{i}.convs1.{j}.1.bias"""]
A_ : int = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_g"""]
A_ : Tuple = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_v"""]
A_ : Optional[int] = checkpoint[f"""blocks.{i}.convs2.{j}.1.bias"""]
A_ : Union[str, Any] = checkpoint["output_conv.1.weight_g"]
A_ : Union[str, Any] = checkpoint["output_conv.1.weight_v"]
A_ : List[str] = checkpoint["output_conv.1.bias"]
hf_model.remove_weight_norm()
@torch.no_grad()
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,):
'''simple docstring'''
if config_path is not None:
A_ : Union[str, Any] = SpeechTaHifiGanConfig.from_pretrained(__lowercase )
else:
A_ : Any = SpeechTaHifiGanConfig()
A_ : Any = SpeechTaHifiGan(__lowercase )
A_ : Optional[int] = torch.load(__lowercase )
load_weights(orig_checkpoint["""model"""]["""generator"""] ,__lowercase ,__lowercase )
A_ : Dict = np.load(__lowercase )
A_ : str = stats[0].reshape(-1 )
A_ : Optional[int] = stats[1].reshape(-1 )
A_ : Optional[Any] = torch.from_numpy(__lowercase ).float()
A_ : List[Any] = torch.from_numpy(__lowercase ).float()
model.save_pretrained(__lowercase )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(__lowercase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
_lowerCAmelCase = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 569 | __SCREAMING_SNAKE_CASE : Union[str, Any] = {
'a': 'AAAAA',
'b': 'AAAAB',
'c': 'AAABA',
'd': 'AAABB',
'e': 'AABAA',
'f': 'AABAB',
'g': 'AABBA',
'h': 'AABBB',
'i': 'ABAAA',
'j': 'BBBAA',
'k': 'ABAAB',
'l': 'ABABA',
'm': 'ABABB',
'n': 'ABBAA',
'o': 'ABBAB',
'p': 'ABBBA',
'q': 'ABBBB',
'r': 'BAAAA',
's': 'BAAAB',
't': 'BAABA',
'u': 'BAABB',
'v': 'BBBAB',
'w': 'BABAA',
'x': 'BABAB',
'y': 'BABBA',
'z': 'BABBB',
' ': ' ',
}
__SCREAMING_SNAKE_CASE : int = {value: key for key, value in encode_dict.items()}
def snake_case (__lowercase ) -> str:
'''simple docstring'''
_snake_case : Any = ""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("encode() accepts only letters of the alphabet and spaces" )
return encoded
def snake_case (__lowercase ) -> str:
'''simple docstring'''
if set(__lowercase ) - {"A", "B", " "} != set():
raise Exception("decode() accepts only 'A', 'B' and spaces" )
_snake_case : str = ""
for word in coded.split():
while len(__lowercase ) != 0:
decoded += decode_dict[word[:5]]
_snake_case : int = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod() | 670 | 0 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
A_ : str =logging.get_logger(__name__)
# General docstring
A_ : List[Any] ='RegNetConfig'
# Base docstring
A_ : Tuple ='facebook/regnet-y-040'
A_ : Tuple =[1, 1088, 7, 7]
# Image classification docstring
A_ : Optional[int] ='facebook/regnet-y-040'
A_ : Union[str, Any] ='tabby, tabby cat'
A_ : Tuple =[
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowercase_ ( tf.keras.layers.Layer):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase = 3 , _UpperCAmelCase = 1 , _UpperCAmelCase = 1 , _UpperCAmelCase = "relu" , **_UpperCAmelCase , ):
"""simple docstring"""
super().__init__(**lowercase_ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
a_ = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
a_ = tf.keras.layers.ConvaD(
filters=lowercase_ , kernel_size=lowercase_ , strides=lowercase_ , padding="""VALID""" , groups=lowercase_ , use_bias=lowercase_ , name="""convolution""" , )
a_ = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="""normalization""" )
a_ = ACTaFN[activation] if activation is not None else tf.identity
def lowercase__ ( self , _UpperCAmelCase ):
"""simple docstring"""
a_ = self.convolution(self.padding(lowercase_ ) )
a_ = self.normalization(lowercase_ )
a_ = self.activation(lowercase_ )
return hidden_state
class lowercase_ ( tf.keras.layers.Layer):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , **_UpperCAmelCase ):
"""simple docstring"""
super().__init__(**lowercase_ )
a_ = config.num_channels
a_ = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="""embedder""" , )
def lowercase__ ( self , _UpperCAmelCase ):
"""simple docstring"""
a_ = shape_list(lowercase_ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
a_ = tf.transpose(lowercase_ , perm=(0, 2, 3, 1) )
a_ = self.embedder(lowercase_ )
return hidden_state
class lowercase_ ( tf.keras.layers.Layer):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase = 2 , **_UpperCAmelCase ):
"""simple docstring"""
super().__init__(**lowercase_ )
a_ = tf.keras.layers.ConvaD(
filters=lowercase_ , kernel_size=1 , strides=lowercase_ , use_bias=lowercase_ , name="""convolution""" )
a_ = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="""normalization""" )
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase = False ):
"""simple docstring"""
return self.normalization(self.convolution(lowercase_ ) , training=lowercase_ )
class lowercase_ ( tf.keras.layers.Layer):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ):
"""simple docstring"""
super().__init__(**lowercase_ )
a_ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase_ , name="""pooler""" )
a_ = [
tf.keras.layers.ConvaD(filters=lowercase_ , kernel_size=1 , activation="""relu""" , name="""attention.0""" ),
tf.keras.layers.ConvaD(filters=lowercase_ , kernel_size=1 , activation="""sigmoid""" , name="""attention.2""" ),
]
def lowercase__ ( self , _UpperCAmelCase ):
"""simple docstring"""
a_ = self.pooler(lowercase_ )
for layer_module in self.attention:
a_ = layer_module(lowercase_ )
a_ = hidden_state * pooled
return hidden_state
class lowercase_ ( tf.keras.layers.Layer):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 , **_UpperCAmelCase ):
"""simple docstring"""
super().__init__(**lowercase_ )
a_ = in_channels != out_channels or stride != 1
a_ = max(1 , out_channels // config.groups_width )
a_ = (
TFRegNetShortCut(lowercase_ , stride=lowercase_ , name="""shortcut""" )
if should_apply_shortcut
else tf.keras.layers.Activation("""linear""" , name="""shortcut""" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
a_ = [
TFRegNetConvLayer(lowercase_ , kernel_size=1 , activation=config.hidden_act , name="""layer.0""" ),
TFRegNetConvLayer(
lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act , name="""layer.1""" ),
TFRegNetConvLayer(lowercase_ , kernel_size=1 , activation=lowercase_ , name="""layer.2""" ),
]
a_ = ACTaFN[config.hidden_act]
def lowercase__ ( self , _UpperCAmelCase ):
"""simple docstring"""
a_ = hidden_state
for layer_module in self.layers:
a_ = layer_module(lowercase_ )
a_ = self.shortcut(lowercase_ )
hidden_state += residual
a_ = self.activation(lowercase_ )
return hidden_state
class lowercase_ ( tf.keras.layers.Layer):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 , **_UpperCAmelCase ):
"""simple docstring"""
super().__init__(**lowercase_ )
a_ = in_channels != out_channels or stride != 1
a_ = max(1 , out_channels // config.groups_width )
a_ = (
TFRegNetShortCut(lowercase_ , stride=lowercase_ , name="""shortcut""" )
if should_apply_shortcut
else tf.keras.layers.Activation("""linear""" , name="""shortcut""" )
)
a_ = [
TFRegNetConvLayer(lowercase_ , kernel_size=1 , activation=config.hidden_act , name="""layer.0""" ),
TFRegNetConvLayer(
lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act , name="""layer.1""" ),
TFRegNetSELayer(lowercase_ , reduced_channels=int(round(in_channels / 4 ) ) , name="""layer.2""" ),
TFRegNetConvLayer(lowercase_ , kernel_size=1 , activation=lowercase_ , name="""layer.3""" ),
]
a_ = ACTaFN[config.hidden_act]
def lowercase__ ( self , _UpperCAmelCase ):
"""simple docstring"""
a_ = hidden_state
for layer_module in self.layers:
a_ = layer_module(lowercase_ )
a_ = self.shortcut(lowercase_ )
hidden_state += residual
a_ = self.activation(lowercase_ )
return hidden_state
class lowercase_ ( tf.keras.layers.Layer):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , **_UpperCAmelCase ):
"""simple docstring"""
super().__init__(**lowercase_ )
a_ = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
a_ = [
# downsampling is done in the first layer with stride of 2
layer(lowercase_ , lowercase_ , lowercase_ , stride=lowercase_ , name="""layers.0""" ),
*[layer(lowercase_ , lowercase_ , lowercase_ , name=f"layers.{i+1}" ) for i in range(depth - 1 )],
]
def lowercase__ ( self , _UpperCAmelCase ):
"""simple docstring"""
for layer_module in self.layers:
a_ = layer_module(lowercase_ )
return hidden_state
class lowercase_ ( tf.keras.layers.Layer):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , **_UpperCAmelCase ):
"""simple docstring"""
super().__init__(**lowercase_ )
a_ = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
lowercase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="""stages.0""" , ) )
a_ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(lowercase_ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(lowercase_ , lowercase_ , lowercase_ , depth=lowercase_ , name=f"stages.{i+1}" ) )
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase = False , _UpperCAmelCase = True ):
"""simple docstring"""
a_ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
a_ = hidden_states + (hidden_state,)
a_ = stage_module(lowercase_ )
if output_hidden_states:
a_ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=lowercase_ , hidden_states=lowercase_ )
@keras_serializable
class lowercase_ ( tf.keras.layers.Layer):
"""simple docstring"""
snake_case_ = RegNetConfig
def __init__( self , _UpperCAmelCase , **_UpperCAmelCase ):
"""simple docstring"""
super().__init__(**lowercase_ )
a_ = config
a_ = TFRegNetEmbeddings(lowercase_ , name="""embedder""" )
a_ = TFRegNetEncoder(lowercase_ , name="""encoder""" )
a_ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase_ , name="""pooler""" )
@unpack_inputs
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , ):
"""simple docstring"""
a_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a_ = return_dict if return_dict is not None else self.config.use_return_dict
a_ = self.embedder(lowercase_ , training=lowercase_ )
a_ = self.encoder(
lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ , training=lowercase_ )
a_ = encoder_outputs[0]
a_ = self.pooler(lowercase_ )
# Change to NCHW output format have uniformity in the modules
a_ = tf.transpose(lowercase_ , perm=(0, 3, 1, 2) )
a_ = tf.transpose(lowercase_ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
a_ = tuple([tf.transpose(lowercase_ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase_ , pooler_output=lowercase_ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class lowercase_ ( __snake_case):
"""simple docstring"""
snake_case_ = RegNetConfig
snake_case_ = '''regnet'''
snake_case_ = '''pixel_values'''
@property
def lowercase__ ( self ):
"""simple docstring"""
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
A_ : Union[str, Any] =R'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
A_ : List[str] =R'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' ,__snake_case ,)
class lowercase_ ( __snake_case):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ):
"""simple docstring"""
super().__init__(lowercase_ , *lowercase_ , **lowercase_ )
a_ = TFRegNetMainLayer(lowercase_ , name="""regnet""" )
@unpack_inputs
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase=False , ):
"""simple docstring"""
a_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a_ = return_dict if return_dict is not None else self.config.use_return_dict
a_ = self.regnet(
pixel_values=lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ , training=lowercase_ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'''\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ''' ,__snake_case ,)
class lowercase_ ( __snake_case ,__snake_case):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ):
"""simple docstring"""
super().__init__(lowercase_ , *lowercase_ , **lowercase_ )
a_ = config.num_labels
a_ = TFRegNetMainLayer(lowercase_ , name="""regnet""" )
# classification head
a_ = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="""classifier.1""" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowercase__ ( self , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase=False , ):
"""simple docstring"""
a_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a_ = return_dict if return_dict is not None else self.config.use_return_dict
a_ = self.regnet(
lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ , training=lowercase_ )
a_ = outputs.pooler_output if return_dict else outputs[1]
a_ = self.classifier[0](lowercase_ )
a_ = self.classifier[1](lowercase_ )
a_ = None if labels is None else self.hf_compute_loss(labels=lowercase_ , logits=lowercase_ )
if not return_dict:
a_ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=lowercase_ , logits=lowercase_ , hidden_states=outputs.hidden_states ) | 483 | import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowercase_ ( unittest.TestCase ):
def UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def UpperCamelCase ( self ):
_snake_case ,_snake_case : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-2" , revision="bf16" , dtype=jnp.bfloataa , )
_snake_case : List[Any] = "A painting of a squirrel eating a burger"
_snake_case : Union[str, Any] = jax.device_count()
_snake_case : List[Any] = num_samples * [prompt]
_snake_case : Tuple = sd_pipe.prepare_inputs(lowercase_ )
_snake_case : str = replicate(lowercase_ )
_snake_case : Dict = shard(lowercase_ )
_snake_case : List[Any] = jax.random.PRNGKey(0 )
_snake_case : List[Any] = jax.random.split(lowercase_ , jax.device_count() )
_snake_case : Tuple = sd_pipe(lowercase_ , lowercase_ , lowercase_ , num_inference_steps=25 , jit=lowercase_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
_snake_case : List[Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_snake_case : str = images[0, 253:256, 253:256, -1]
_snake_case : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_snake_case : Optional[Any] = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def UpperCamelCase ( self ):
_snake_case : Optional[Any] = "stabilityai/stable-diffusion-2"
_snake_case ,_snake_case : List[Any] = FlaxDPMSolverMultistepScheduler.from_pretrained(lowercase_ , subfolder="scheduler" )
_snake_case ,_snake_case : int = FlaxStableDiffusionPipeline.from_pretrained(
lowercase_ , scheduler=lowercase_ , revision="bf16" , dtype=jnp.bfloataa , )
_snake_case : str = scheduler_params
_snake_case : Dict = "A painting of a squirrel eating a burger"
_snake_case : Dict = jax.device_count()
_snake_case : Optional[int] = num_samples * [prompt]
_snake_case : List[str] = sd_pipe.prepare_inputs(lowercase_ )
_snake_case : Optional[int] = replicate(lowercase_ )
_snake_case : Union[str, Any] = shard(lowercase_ )
_snake_case : List[Any] = jax.random.PRNGKey(0 )
_snake_case : Union[str, Any] = jax.random.split(lowercase_ , jax.device_count() )
_snake_case : str = sd_pipe(lowercase_ , lowercase_ , lowercase_ , num_inference_steps=25 , jit=lowercase_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
_snake_case : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_snake_case : List[str] = images[0, 253:256, 253:256, -1]
_snake_case : Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_snake_case : Dict = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 | 670 | 0 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
_UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
_UpperCAmelCase : List[str] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
_UpperCAmelCase : List[str] = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
_UpperCAmelCase : Union[str, Any] = {
'facebook/blenderbot_small-90M': 512,
}
class UpperCAmelCase ( __snake_case ):
"""simple docstring"""
A__ : Tuple = VOCAB_FILES_NAMES
A__ : str = PRETRAINED_VOCAB_FILES_MAP
A__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : int = BlenderbotSmallTokenizer
def __init__( self , _snake_case=None , _snake_case=None , _snake_case="<|endoftext|>" , _snake_case="<|endoftext|>" , _snake_case="<|endoftext|>" , _snake_case=False , _snake_case=True , **_snake_case , ) -> List[str]:
super().__init__(
ByteLevelBPETokenizer(
vocab=lowercase_ , merges=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ , ) , bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , **lowercase_ , )
_UpperCamelCase : int = add_prefix_space
def _lowercase ( self , _snake_case , _snake_case=None ) -> List[str]:
_UpperCamelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowercase ( self , _snake_case , _snake_case = None ) -> List[Any]:
_UpperCamelCase : Optional[int] = [self.sep_token_id]
_UpperCamelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 683 | from manim import *
class lowercase_ ( __snake_case ):
def UpperCamelCase ( self ):
_snake_case : Tuple = Rectangle(height=0.5 , width=0.5 )
_snake_case : List[str] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_snake_case : List[str] = [mem.copy() for i in range(6 )]
_snake_case : Any = [mem.copy() for i in range(6 )]
_snake_case : Any = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : Any = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : str = VGroup(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : int = Text("CPU" , font_size=24 )
_snake_case : str = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowercase_ )
_snake_case : int = [mem.copy() for i in range(4 )]
_snake_case : Dict = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : str = Text("GPU" , font_size=24 )
_snake_case : Optional[int] = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
gpu.move_to([-1, -1, 0] )
self.add(lowercase_ )
_snake_case : Any = [mem.copy() for i in range(6 )]
_snake_case : Any = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : Dict = Text("Model" , font_size=24 )
_snake_case : Dict = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
model.move_to([3, -1.0, 0] )
self.add(lowercase_ )
_snake_case : str = []
for i, rect in enumerate(lowercase_ ):
rect.set_stroke(lowercase_ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_snake_case : Union[str, Any] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowercase_ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowercase_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowercase_ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase_ , buff=0.0 )
self.add(lowercase_ )
cpu_targs.append(lowercase_ )
_snake_case : List[Any] = [mem.copy() for i in range(6 )]
_snake_case : Union[str, Any] = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : Optional[Any] = Text("Loaded Checkpoint" , font_size=24 )
_snake_case : Union[str, Any] = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , aligned_edge=lowercase_ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
_snake_case : Optional[int] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_snake_case : Optional[Any] = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowercase_ , lowercase_ )
_snake_case : Union[str, Any] = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(lowercase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
_snake_case : List[Any] = MarkupText(
f"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowercase_ ) , Write(lowercase_ ) )
self.play(Write(lowercase_ , run_time=1 ) , Create(lowercase_ , run_time=1 ) )
_snake_case : int = []
_snake_case : str = []
for i, rect in enumerate(lowercase_ ):
_snake_case : Dict = fill.copy().set_fill(lowercase_ , opacity=0.7 )
target.move_to(lowercase_ )
first_animations.append(GrowFromCenter(lowercase_ , run_time=1 ) )
_snake_case : Dict = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(lowercase_ , run_time=1.5 ) )
self.play(*lowercase_ )
self.play(*lowercase_ )
self.wait() | 670 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : Optional[int] = logging.get_logger(__name__)
A : Any = {
'facebook/nllb-moe-54B': 'https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json',
}
class _UpperCamelCase ( __snake_case ):
'''simple docstring'''
__UpperCAmelCase : List[Any] ="""nllb-moe"""
__UpperCAmelCase : List[Any] =["""past_key_values"""]
__UpperCAmelCase : int ={"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , __a=12_81_12 , __a=10_24 , __a=12 , __a=40_96 , __a=16 , __a=12 , __a=40_96 , __a=16 , __a=0.0_5 , __a=0.0_5 , __a=True , __a=True , __a="relu" , __a=10_24 , __a=0.1 , __a=0.1 , __a=0.0 , __a=0.0_2 , __a=2 , __a=True , __a=False , __a="float32" , __a=False , __a=1_28 , __a=64 , __a=4 , __a=4 , __a=0.0_0_1 , __a=0.0_0_1 , __a="all" , __a=False , __a=False , __a=1.0 , __a=0.2 , __a=1 , __a=0 , __a=2 , __a=False , **__a , ):
__lowerCAmelCase = vocab_size
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = d_model
__lowerCAmelCase = encoder_ffn_dim
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = encoder_attention_heads
__lowerCAmelCase = decoder_ffn_dim
__lowerCAmelCase = decoder_layers
__lowerCAmelCase = decoder_attention_heads
__lowerCAmelCase = dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation_dropout
__lowerCAmelCase = activation_function
__lowerCAmelCase = init_std
__lowerCAmelCase = encoder_layerdrop
__lowerCAmelCase = decoder_layerdrop
__lowerCAmelCase = use_cache
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCAmelCase = router_z_loss_coef
__lowerCAmelCase = router_aux_loss_coef
__lowerCAmelCase = decoder_sparse_step
__lowerCAmelCase = encoder_sparse_step
__lowerCAmelCase = num_experts
__lowerCAmelCase = expert_capacity
__lowerCAmelCase = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" )
__lowerCAmelCase = router_dtype
__lowerCAmelCase = router_ignore_padding_tokens
__lowerCAmelCase = batch_prioritized_routing
__lowerCAmelCase = second_expert_policy
__lowerCAmelCase = normalize_router_prob_before_dropping
__lowerCAmelCase = moe_eval_capacity_token_fraction
__lowerCAmelCase = moe_token_dropout
__lowerCAmelCase = output_router_logits
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , **lowercase_ , )
| 636 | import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
class lowercase_ ( __snake_case ):
_lowerCamelCase = 'linear'
_lowerCamelCase = 'cosine'
_lowerCamelCase = 'cosine_with_restarts'
_lowerCamelCase = 'polynomial'
_lowerCamelCase = 'constant'
_lowerCamelCase = 'constant_with_warmup'
_lowerCamelCase = 'piecewise_constant'
def snake_case (__lowercase , __lowercase = -1 ) -> List[Any]:
'''simple docstring'''
return LambdaLR(__lowercase , lambda __lowercase : 1 , last_epoch=__lowercase )
def snake_case (__lowercase , __lowercase , __lowercase = -1 ) -> List[str]:
'''simple docstring'''
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1.0 , __lowercase ) )
return 1.0
return LambdaLR(__lowercase , __lowercase , last_epoch=__lowercase )
def snake_case (__lowercase , __lowercase , __lowercase = -1 ) -> Optional[int]:
'''simple docstring'''
_snake_case : Optional[Any] = {}
_snake_case : Optional[int] = step_rules.split("," )
for rule_str in rule_list[:-1]:
_snake_case ,_snake_case : str = rule_str.split(":" )
_snake_case : Dict = int(__lowercase )
_snake_case : List[str] = float(__lowercase )
_snake_case : Tuple = value
_snake_case : str = float(rule_list[-1] )
def create_rules_function(__lowercase , __lowercase ):
def rule_func(__lowercase ) -> float:
_snake_case : List[str] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__lowercase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
_snake_case : int = create_rules_function(__lowercase , __lowercase )
return LambdaLR(__lowercase , __lowercase , last_epoch=__lowercase )
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase=-1 ) -> List[str]:
'''simple docstring'''
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1 , __lowercase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__lowercase , __lowercase , __lowercase )
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase = 0.5 , __lowercase = -1 ) -> Dict:
'''simple docstring'''
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1 , __lowercase ) )
_snake_case : Optional[int] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__lowercase ) * 2.0 * progress )) )
return LambdaLR(__lowercase , __lowercase , __lowercase )
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase = 1 , __lowercase = -1 ) -> Optional[int]:
'''simple docstring'''
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1 , __lowercase ) )
_snake_case : Any = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__lowercase ) * progress) % 1.0) )) )
return LambdaLR(__lowercase , __lowercase , __lowercase )
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase=1e-7 , __lowercase=1.0 , __lowercase=-1 ) -> List[Any]:
'''simple docstring'''
_snake_case : List[Any] = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1 , __lowercase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
_snake_case : Tuple = lr_init - lr_end
_snake_case : Any = num_training_steps - num_warmup_steps
_snake_case : Optional[int] = 1 - (current_step - num_warmup_steps) / decay_steps
_snake_case : Optional[Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__lowercase , __lowercase , __lowercase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def snake_case (__lowercase , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = 1 , __lowercase = 1.0 , __lowercase = -1 , ) -> List[Any]:
'''simple docstring'''
_snake_case : Any = SchedulerType(__lowercase )
_snake_case : Union[str, Any] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__lowercase , last_epoch=__lowercase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__lowercase , step_rules=__lowercase , last_epoch=__lowercase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__lowercase , num_warmup_steps=__lowercase , last_epoch=__lowercase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__lowercase , num_warmup_steps=__lowercase , num_training_steps=__lowercase , num_cycles=__lowercase , last_epoch=__lowercase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__lowercase , num_warmup_steps=__lowercase , num_training_steps=__lowercase , power=__lowercase , last_epoch=__lowercase , )
return schedule_func(
__lowercase , num_warmup_steps=__lowercase , num_training_steps=__lowercase , last_epoch=__lowercase ) | 670 | 0 |
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> list:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = len(__lowercase )
for i in range(1 , __lowercase ):
__UpperCAmelCase : Tuple = collection[i]
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : Optional[Any] = i - 1
while low <= high:
__UpperCAmelCase : Tuple = (low + high) // 2
if val < collection[mid]:
__UpperCAmelCase : Optional[int] = mid - 1
else:
__UpperCAmelCase : int = mid + 1
for j in range(__lowercase , __lowercase , -1 ):
__UpperCAmelCase : List[Any] = collection[j - 1]
__UpperCAmelCase : List[Any] = val
return collection
if __name__ == "__main__":
lowerCAmelCase = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase = [int(item) for item in user_input.split(""",""")]
print(binary_insertion_sort(unsorted))
| 462 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : int = {
'weiweishi/roc-bert-base-zh': 'https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json',
}
class lowercase_ ( __snake_case ):
_lowerCamelCase = 'roc_bert'
def __init__( self , lowercase_=30_522 , lowercase_=768 , lowercase_=12 , lowercase_=12 , lowercase_=3_072 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=512 , lowercase_=2 , lowercase_=0.02 , lowercase_=1e-12 , lowercase_=True , lowercase_=0 , lowercase_="absolute" , lowercase_=None , lowercase_=True , lowercase_=True , lowercase_=768 , lowercase_=910 , lowercase_=512 , lowercase_=24_858 , lowercase_=True , **lowercase_ , ):
_snake_case : int = vocab_size
_snake_case : Union[str, Any] = max_position_embeddings
_snake_case : Union[str, Any] = hidden_size
_snake_case : Dict = num_hidden_layers
_snake_case : Any = num_attention_heads
_snake_case : Dict = intermediate_size
_snake_case : List[Any] = hidden_act
_snake_case : Optional[int] = hidden_dropout_prob
_snake_case : Union[str, Any] = attention_probs_dropout_prob
_snake_case : Union[str, Any] = initializer_range
_snake_case : List[Any] = type_vocab_size
_snake_case : int = layer_norm_eps
_snake_case : Optional[Any] = use_cache
_snake_case : List[Any] = enable_pronunciation
_snake_case : Dict = enable_shape
_snake_case : Dict = pronunciation_embed_dim
_snake_case : Tuple = pronunciation_vocab_size
_snake_case : Tuple = shape_embed_dim
_snake_case : List[str] = shape_vocab_size
_snake_case : Dict = concat_input
_snake_case : int = position_embedding_type
_snake_case : int = classifier_dropout
super().__init__(pad_token_id=lowercase_ , **lowercase_ ) | 670 | 0 |
"""simple docstring"""
a : List[str] = 'Alexander Joslin'
import operator as op
from .stack import Stack
def lowercase__(A ) ->int:
"""simple docstring"""
lowercase__ : Optional[Any]= {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
lowercase__ : Stack[int]= Stack()
lowercase__ : Stack[str]= Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__lowercase ) )
elif i in operators:
# RULE 2
operator_stack.push(__lowercase )
elif i == ")":
# RULE 4
lowercase__ : Union[str, Any]= operator_stack.peek()
operator_stack.pop()
lowercase__ : Tuple= operand_stack.peek()
operand_stack.pop()
lowercase__ : List[Any]= operand_stack.peek()
operand_stack.pop()
lowercase__ : Optional[int]= operators[opr](__lowercase , __lowercase )
operand_stack.push(__lowercase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
a : List[Any] = '(5 + ((4 * 2) * (2 + 3)))'
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 218 | from cva import destroyAllWindows, imread, imshow, waitKey
def snake_case (__lowercase ) -> Tuple:
'''simple docstring'''
_snake_case ,_snake_case : int = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(__lowercase ):
for j in range(__lowercase ):
_snake_case : Optional[Any] = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
__SCREAMING_SNAKE_CASE : Optional[Any] = imread('image_data/lena.jpg', 1)
# convert to its negative
__SCREAMING_SNAKE_CASE : Tuple = convert_to_negative(img)
# show result image
imshow('negative of original image', img)
waitKey(0)
destroyAllWindows() | 670 | 0 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
lowerCamelCase_ = 'Create a default config file for Accelerate with only a few flags set.'
def SCREAMING_SNAKE_CASE_ ( __A : int="no" , __A : Optional[int] = default_json_config_file , __A : Optional[Any] = False ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = Path(__lowercase )
path.parent.mkdir(parents=__lowercase , exist_ok=__lowercase )
if path.exists():
print(
f"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
_SCREAMING_SNAKE_CASE = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
f"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" )
_SCREAMING_SNAKE_CASE = {
"compute_environment": "LOCAL_MACHINE",
"mixed_precision": mixed_precision,
}
if torch.cuda.is_available():
_SCREAMING_SNAKE_CASE = torch.cuda.device_count()
_SCREAMING_SNAKE_CASE = num_gpus
_SCREAMING_SNAKE_CASE = False
if num_gpus > 1:
_SCREAMING_SNAKE_CASE = "MULTI_GPU"
else:
_SCREAMING_SNAKE_CASE = "NO"
elif is_xpu_available() and use_xpu:
_SCREAMING_SNAKE_CASE = torch.xpu.device_count()
_SCREAMING_SNAKE_CASE = num_xpus
_SCREAMING_SNAKE_CASE = False
if num_xpus > 1:
_SCREAMING_SNAKE_CASE = "MULTI_XPU"
else:
_SCREAMING_SNAKE_CASE = "NO"
elif is_npu_available():
_SCREAMING_SNAKE_CASE = torch.npu.device_count()
_SCREAMING_SNAKE_CASE = num_npus
_SCREAMING_SNAKE_CASE = False
if num_npus > 1:
_SCREAMING_SNAKE_CASE = "MULTI_NPU"
else:
_SCREAMING_SNAKE_CASE = "NO"
else:
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = "NO"
_SCREAMING_SNAKE_CASE = ClusterConfig(**__lowercase )
config.to_json_file(__lowercase )
return path
def SCREAMING_SNAKE_CASE_ ( __A : int , __A : str ) -> Dict:
_SCREAMING_SNAKE_CASE = parser.add_parser("default" , parents=__lowercase , help=__lowercase , formatter_class=__lowercase )
parser.add_argument(
"--config_file" , default=__lowercase , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , dest="save_location" , )
parser.add_argument(
"--mixed_precision" , choices=["no", "fp16", "bf16"] , type=__lowercase , help="Whether or not to use mixed precision training. "
"Choose between FP16 and BF16 (bfloat16) training. "
"BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later." , default="no" , )
parser.set_defaults(func=__lowercase )
return parser
def SCREAMING_SNAKE_CASE_ ( __A : Any ) -> Dict:
_SCREAMING_SNAKE_CASE = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(f"""accelerate configuration saved at {config_file}""" )
| 418 | import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
__SCREAMING_SNAKE_CASE : List[str] = Mapping[str, np.ndarray]
__SCREAMING_SNAKE_CASE : List[Any] = Mapping[str, Any] # Is a nested dict.
__SCREAMING_SNAKE_CASE : List[Any] = 0.01
@dataclasses.dataclass(frozen=__snake_case )
class lowercase_ :
_lowerCamelCase = 42 # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
_lowerCamelCase = 42 # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
_lowerCamelCase = 42 # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
_lowerCamelCase = 42 # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
_lowerCamelCase = 42 # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
_lowerCamelCase = None
# Optional remark about the protein. Included as a comment in output PDB
# files
_lowerCamelCase = None
# Templates used to generate this protein (prediction-only)
_lowerCamelCase = None
# Chain corresponding to each parent
_lowerCamelCase = None
def snake_case (__lowercase ) -> Protein:
'''simple docstring'''
_snake_case : str = r"(\[[A-Z]+\]\n)"
_snake_case : List[str] = [tag.strip() for tag in re.split(__lowercase , __lowercase ) if len(__lowercase ) > 0]
_snake_case : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split("\n" ) for l in tags[1::2]] )
_snake_case : List[str] = ["N", "CA", "C"]
_snake_case : Any = None
_snake_case : Union[str, Any] = None
_snake_case : Optional[int] = None
for g in groups:
if "[PRIMARY]" == g[0]:
_snake_case : Tuple = g[1][0].strip()
for i in range(len(__lowercase ) ):
if seq[i] not in residue_constants.restypes:
_snake_case : Tuple = "X" # FIXME: strings are immutable
_snake_case : int = np.array(
[residue_constants.restype_order.get(__lowercase , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
_snake_case : List[List[float]] = []
for axis in range(3 ):
tertiary.append(list(map(__lowercase , g[1][axis].split() ) ) )
_snake_case : Dict = np.array(__lowercase )
_snake_case : Dict = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(__lowercase ):
_snake_case : List[Any] = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
_snake_case : int = np.array(list(map({"-": 0, "+": 1}.get , g[1][0].strip() ) ) )
_snake_case : Any = np.zeros(
(
len(__lowercase ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(__lowercase ):
_snake_case : Dict = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=__lowercase , atom_mask=__lowercase , aatype=__lowercase , residue_index=np.arange(len(__lowercase ) ) , b_factors=__lowercase , )
def snake_case (__lowercase , __lowercase = 0 ) -> List[str]:
'''simple docstring'''
_snake_case : List[str] = []
_snake_case : Optional[Any] = prot.remark
if remark is not None:
pdb_headers.append(F"""REMARK {remark}""" )
_snake_case : str = prot.parents
_snake_case : str = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
_snake_case : int = [p for i, p in zip(__lowercase , __lowercase ) if i == chain_id]
if parents is None or len(__lowercase ) == 0:
_snake_case : Optional[int] = ["N/A"]
pdb_headers.append(F"""PARENT {' '.join(__lowercase )}""" )
return pdb_headers
def snake_case (__lowercase , __lowercase ) -> str:
'''simple docstring'''
_snake_case : List[str] = []
_snake_case : Optional[int] = pdb_str.split("\n" )
_snake_case : List[str] = prot.remark
if remark is not None:
out_pdb_lines.append(F"""REMARK {remark}""" )
_snake_case : List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
_snake_case : str = []
if prot.parents_chain_index is not None:
_snake_case : Dict[str, List[str]] = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(__lowercase ) , [] )
parent_dict[str(__lowercase )].append(__lowercase )
_snake_case : Any = max([int(__lowercase ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
_snake_case : Tuple = parent_dict.get(str(__lowercase ) , ["N/A"] )
parents_per_chain.append(__lowercase )
else:
parents_per_chain.append(list(prot.parents ) )
else:
_snake_case : List[str] = [["N/A"]]
def make_parent_line(__lowercase ) -> str:
return F"""PARENT {' '.join(__lowercase )}"""
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
_snake_case : int = 0
for i, l in enumerate(__lowercase ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(__lowercase )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(__lowercase ):
_snake_case : Tuple = parents_per_chain[chain_counter]
else:
_snake_case : str = ["N/A"]
out_pdb_lines.append(make_parent_line(__lowercase ) )
return "\n".join(__lowercase )
def snake_case (__lowercase ) -> str:
'''simple docstring'''
_snake_case : Optional[Any] = residue_constants.restypes + ["X"]
def res_atoa(__lowercase ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , "UNK" )
_snake_case : Optional[int] = residue_constants.atom_types
_snake_case : List[str] = []
_snake_case : Tuple = prot.atom_mask
_snake_case : List[str] = prot.aatype
_snake_case : int = prot.atom_positions
_snake_case : int = prot.residue_index.astype(np.intaa )
_snake_case : List[Any] = prot.b_factors
_snake_case : str = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("Invalid aatypes." )
_snake_case : Union[str, Any] = get_pdb_headers(__lowercase )
if len(__lowercase ) > 0:
pdb_lines.extend(__lowercase )
_snake_case : Optional[Any] = aatype.shape[0]
_snake_case : str = 1
_snake_case : Tuple = 0
_snake_case : int = string.ascii_uppercase
_snake_case : Optional[Any] = None
# Add all atom sites.
for i in range(__lowercase ):
_snake_case : Dict = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(__lowercase , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
_snake_case : List[Any] = "ATOM"
_snake_case : Union[str, Any] = atom_name if len(__lowercase ) == 4 else F""" {atom_name}"""
_snake_case : str = ""
_snake_case : str = ""
_snake_case : Any = 1.00
_snake_case : str = atom_name[0] # Protein supports only C, N, O, S, this works.
_snake_case : Dict = ""
_snake_case : Any = "A"
if chain_index is not None:
_snake_case : List[Any] = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
_snake_case : Optional[int] = (
F"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"""
F"""{res_name_a:>3} {chain_tag:>1}"""
F"""{residue_index[i]:>4}{insertion_code:>1} """
F"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"""
F"""{occupancy:>6.2f}{b_factor:>6.2f} """
F"""{element:>2}{charge:>2}"""
)
pdb_lines.append(__lowercase )
atom_index += 1
_snake_case : Dict = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
_snake_case : Optional[int] = True
_snake_case : Union[str, Any] = chain_index[i + 1]
if should_terminate:
# Close the chain.
_snake_case : List[str] = "TER"
_snake_case : str = (
F"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"""
)
pdb_lines.append(__lowercase )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(__lowercase , __lowercase ) )
pdb_lines.append("END" )
pdb_lines.append("" )
return "\n".join(__lowercase )
def snake_case (__lowercase ) -> np.ndarray:
'''simple docstring'''
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def snake_case (__lowercase , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , ) -> Protein:
'''simple docstring'''
return Protein(
aatype=features["aatype"] , atom_positions=result["final_atom_positions"] , atom_mask=result["final_atom_mask"] , residue_index=features["residue_index"] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["final_atom_mask"] ) , chain_index=__lowercase , remark=__lowercase , parents=__lowercase , parents_chain_index=__lowercase , ) | 670 | 0 |
def __a ( lowerCAmelCase_ : Tuple ,lowerCAmelCase_ : Optional[int] ,lowerCAmelCase_ : Tuple ,lowerCAmelCase_ : str ,lowerCAmelCase_ : str ) -> int:
'''simple docstring'''
if index == number_of_items:
return 0
UpperCAmelCase_= 0
UpperCAmelCase_= 0
UpperCAmelCase_= knapsack(__lowercase ,__lowercase ,__lowercase ,__lowercase ,index + 1 )
if weights[index] <= max_weight:
UpperCAmelCase_= values[index] + knapsack(
__lowercase ,__lowercase ,__lowercase ,max_weight - weights[index] ,index + 1 )
return max(__lowercase ,__lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 593 | from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class lowercase_ ( __snake_case ):
_lowerCamelCase = ['image_processor']
_lowerCamelCase = 'SamImageProcessor'
def __init__( self , lowercase_ ):
super().__init__(lowercase_ )
_snake_case : Optional[Any] = self.image_processor
_snake_case : Tuple = -10
_snake_case : str = self.image_processor.size["longest_edge"]
def __call__( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_ = None , **lowercase_ , ):
_snake_case : List[Any] = self.image_processor(
lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
# pop arguments that are not used in the foward but used nevertheless
_snake_case : Any = encoding_image_processor["original_sizes"]
if hasattr(lowercase_ , "numpy" ): # Checks if Torch or TF tensor
_snake_case : int = original_sizes.numpy()
_snake_case ,_snake_case ,_snake_case : Union[str, Any] = self._check_and_preprocess_points(
input_points=lowercase_ , input_labels=lowercase_ , input_boxes=lowercase_ , )
_snake_case : Dict = self._normalize_and_convert(
lowercase_ , lowercase_ , input_points=lowercase_ , input_labels=lowercase_ , input_boxes=lowercase_ , return_tensors=lowercase_ , )
return encoding_image_processor
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_="pt" , ):
if input_points is not None:
if len(lowercase_ ) != len(lowercase_ ):
_snake_case : int = [
self._normalize_coordinates(self.target_size , lowercase_ , original_sizes[0] ) for point in input_points
]
else:
_snake_case : Dict = [
self._normalize_coordinates(self.target_size , lowercase_ , lowercase_ )
for point, original_size in zip(lowercase_ , lowercase_ )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
_snake_case ,_snake_case : int = self._pad_points_and_labels(lowercase_ , lowercase_ )
_snake_case : Any = np.array(lowercase_ )
if input_labels is not None:
_snake_case : Optional[Any] = np.array(lowercase_ )
if input_boxes is not None:
if len(lowercase_ ) != len(lowercase_ ):
_snake_case : Optional[Any] = [
self._normalize_coordinates(self.target_size , lowercase_ , original_sizes[0] , is_bounding_box=lowercase_ )
for box in input_boxes
]
else:
_snake_case : List[str] = [
self._normalize_coordinates(self.target_size , lowercase_ , lowercase_ , is_bounding_box=lowercase_ )
for box, original_size in zip(lowercase_ , lowercase_ )
]
_snake_case : Tuple = np.array(lowercase_ )
if input_boxes is not None:
if return_tensors == "pt":
_snake_case : List[str] = torch.from_numpy(lowercase_ )
# boxes batch size of 1 by default
_snake_case : Optional[Any] = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
_snake_case : List[str] = tf.convert_to_tensor(lowercase_ )
# boxes batch size of 1 by default
_snake_case : Optional[int] = tf.expand_dims(lowercase_ , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"input_boxes": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
_snake_case : Tuple = torch.from_numpy(lowercase_ )
# point batch size of 1 by default
_snake_case : int = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
_snake_case : List[str] = tf.convert_to_tensor(lowercase_ )
# point batch size of 1 by default
_snake_case : Tuple = tf.expand_dims(lowercase_ , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"input_points": input_points} )
if input_labels is not None:
if return_tensors == "pt":
_snake_case : Dict = torch.from_numpy(lowercase_ )
# point batch size of 1 by default
_snake_case : str = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
_snake_case : Optional[Any] = tf.convert_to_tensor(lowercase_ )
# point batch size of 1 by default
_snake_case : List[Any] = tf.expand_dims(lowercase_ , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"input_labels": input_labels} )
return encoding_image_processor
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
_snake_case : List[Any] = max([point.shape[0] for point in input_points] )
_snake_case : List[str] = []
for i, point in enumerate(lowercase_ ):
if point.shape[0] != expected_nb_points:
_snake_case : Optional[Any] = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
_snake_case : Union[str, Any] = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(lowercase_ )
_snake_case : Optional[Any] = processed_input_points
return input_points, input_labels
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=False ):
_snake_case ,_snake_case : Optional[int] = original_size
_snake_case ,_snake_case : List[str] = self.image_processor._get_preprocess_shape(lowercase_ , longest_edge=lowercase_ )
_snake_case : Optional[Any] = deepcopy(lowercase_ ).astype(lowercase_ )
if is_bounding_box:
_snake_case : str = coords.reshape(-1 , 2 , 2 )
_snake_case : Optional[Any] = coords[..., 0] * (new_w / old_w)
_snake_case : Dict = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
_snake_case : Optional[Any] = coords.reshape(-1 , 4 )
return coords
def UpperCamelCase ( self , lowercase_=None , lowercase_=None , lowercase_=None , ):
if input_points is not None:
if hasattr(lowercase_ , "numpy" ): # Checks for TF or Torch tensor
_snake_case : Union[str, Any] = input_points.numpy().tolist()
if not isinstance(lowercase_ , lowercase_ ) or not isinstance(input_points[0] , lowercase_ ):
raise ValueError("Input points must be a list of list of floating points." )
_snake_case : Any = [np.array(lowercase_ ) for input_point in input_points]
else:
_snake_case : Optional[int] = None
if input_labels is not None:
if hasattr(lowercase_ , "numpy" ):
_snake_case : Tuple = input_labels.numpy().tolist()
if not isinstance(lowercase_ , lowercase_ ) or not isinstance(input_labels[0] , lowercase_ ):
raise ValueError("Input labels must be a list of list integers." )
_snake_case : Tuple = [np.array(lowercase_ ) for label in input_labels]
else:
_snake_case : Optional[Any] = None
if input_boxes is not None:
if hasattr(lowercase_ , "numpy" ):
_snake_case : List[str] = input_boxes.numpy().tolist()
if (
not isinstance(lowercase_ , lowercase_ )
or not isinstance(input_boxes[0] , lowercase_ )
or not isinstance(input_boxes[0][0] , lowercase_ )
):
raise ValueError("Input boxes must be a list of list of list of floating points." )
_snake_case : List[Any] = [np.array(lowercase_ ).astype(np.floataa ) for box in input_boxes]
else:
_snake_case : Optional[int] = None
return input_points, input_labels, input_boxes
@property
def UpperCamelCase ( self ):
_snake_case : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(lowercase_ ) )
def UpperCamelCase ( self , *lowercase_ , **lowercase_ ):
return self.image_processor.post_process_masks(*lowercase_ , **lowercase_ ) | 670 | 0 |
import numpy
class snake_case :
'''simple docstring'''
def __init__( self : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
SCREAMING_SNAKE_CASE_ = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
SCREAMING_SNAKE_CASE_ = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
SCREAMING_SNAKE_CASE_ = numpy.random.rand(3 , 1 )
# Real output values provided.
SCREAMING_SNAKE_CASE_ = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
SCREAMING_SNAKE_CASE_ = numpy.zeros(output_array.shape )
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
SCREAMING_SNAKE_CASE_ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
SCREAMING_SNAKE_CASE_ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def _lowercase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
SCREAMING_SNAKE_CASE_ = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
SCREAMING_SNAKE_CASE_ = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def _lowercase ( self : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int ) -> int:
"""simple docstring"""
for iteration in range(1 , iterations + 1 ):
SCREAMING_SNAKE_CASE_ = self.feedforward()
self.back_propagation()
if give_loss:
SCREAMING_SNAKE_CASE_ = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F'''Iteration {iteration} Loss: {loss}''' )
def _lowercase ( self : Optional[Any] , lowerCAmelCase_ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = input_arr
SCREAMING_SNAKE_CASE_ = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
SCREAMING_SNAKE_CASE_ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
SCREAMING_SNAKE_CASE_ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def UpperCAmelCase ( UpperCAmelCase )-> numpy.ndarray:
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def UpperCAmelCase ( UpperCAmelCase )-> numpy.ndarray:
'''simple docstring'''
return (value) * (1 - (value))
def UpperCAmelCase ( )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) ,dtype=numpy.floataa ,)
# True output values for the given input values.
SCREAMING_SNAKE_CASE_ = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) ,dtype=numpy.floataa )
# Calling neural network class.
SCREAMING_SNAKE_CASE_ = TwoHiddenLayerNeuralNetwork(
input_array=__lowercase ,output_array=__lowercase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=__lowercase ,iterations=10 ,give_loss=__lowercase )
return neural_network.predict(numpy.array(([1, 1, 1]) ,dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 393 | def snake_case (__lowercase ) -> int:
'''simple docstring'''
if not grid or not grid[0]:
raise TypeError("The grid does not contain the appropriate information" )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
_snake_case : Union[str, Any] = grid[0]
for row_n in range(1 , len(__lowercase ) ):
_snake_case : Union[str, Any] = grid[row_n]
_snake_case : List[Any] = fill_row(__lowercase , __lowercase )
_snake_case : List[Any] = grid[row_n]
return grid[-1][-1]
def snake_case (__lowercase , __lowercase ) -> list:
'''simple docstring'''
current_row[0] += row_above[0]
for cell_n in range(1 , len(__lowercase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod() | 670 | 0 |
'''simple docstring'''
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
_A: Any = random.Random()
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase=1.0 , _lowerCAmelCase=None , _lowerCAmelCase=None )-> str:
if rng is None:
__UpperCAmelCase = global_rng
__UpperCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCAmelCase ( unittest.TestCase ):
def __init__( self , __A , __A=7 , __A=400 , __A=2_000 , __A=10 , __A=160 , __A=8 , __A=0.0 , __A=4_000 , __A=False , __A=True , ):
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = min_seq_length
__UpperCAmelCase = max_seq_length
__UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__UpperCAmelCase = padding_value
__UpperCAmelCase = sampling_rate
__UpperCAmelCase = return_attention_mask
__UpperCAmelCase = do_normalize
__UpperCAmelCase = feature_size
__UpperCAmelCase = chunk_length
__UpperCAmelCase = hop_length
def __lowerCamelCase ( self ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __lowerCamelCase ( self , __A=False , __A=False ):
def _flatten(__A ):
return list(itertools.chain(*lowercase_ ) )
if equal_length:
__UpperCAmelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__UpperCAmelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__UpperCAmelCase = [np.asarray(lowercase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase ( __snake_case , unittest.TestCase ):
_A : Dict = WhisperFeatureExtractor if is_speech_available() else None
def __lowerCamelCase ( self ):
__UpperCAmelCase = WhisperFeatureExtractionTester(self )
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase = feat_extract_first.save_pretrained(lowercase_ )[0]
check_json_file_has_correct_format(lowercase_ )
__UpperCAmelCase = self.feature_extraction_class.from_pretrained(lowercase_ )
__UpperCAmelCase = feat_extract_first.to_dict()
__UpperCAmelCase = feat_extract_second.to_dict()
__UpperCAmelCase = feat_extract_first.mel_filters
__UpperCAmelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowercase_ , lowercase_ ) )
self.assertEqual(lowercase_ , lowercase_ )
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase = os.path.join(lowercase_ , 'feat_extract.json' )
feat_extract_first.to_json_file(lowercase_ )
__UpperCAmelCase = self.feature_extraction_class.from_json_file(lowercase_ )
__UpperCAmelCase = feat_extract_first.to_dict()
__UpperCAmelCase = feat_extract_second.to_dict()
__UpperCAmelCase = feat_extract_first.mel_filters
__UpperCAmelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowercase_ , lowercase_ ) )
self.assertEqual(lowercase_ , lowercase_ )
def __lowerCamelCase ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
__UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__UpperCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__UpperCAmelCase = [np.asarray(lowercase_ ) for speech_input in speech_inputs]
# Test feature size
__UpperCAmelCase = feature_extractor(lowercase_ , padding='max_length' , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
__UpperCAmelCase = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
__UpperCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-3 ) )
# Test batched
__UpperCAmelCase = feature_extractor(lowercase_ , return_tensors='np' ).input_features
__UpperCAmelCase = feature_extractor(lowercase_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase_ , lowercase_ ):
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__UpperCAmelCase = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__UpperCAmelCase = np.asarray(lowercase_ )
__UpperCAmelCase = feature_extractor(lowercase_ , return_tensors='np' ).input_features
__UpperCAmelCase = feature_extractor(lowercase_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase_ , lowercase_ ):
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-3 ) )
# Test truncation required
__UpperCAmelCase = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
__UpperCAmelCase = [np.asarray(lowercase_ ) for speech_input in speech_inputs]
__UpperCAmelCase = [x[: feature_extractor.n_samples] for x in speech_inputs]
__UpperCAmelCase = [np.asarray(lowercase_ ) for speech_input in speech_inputs_truncated]
__UpperCAmelCase = feature_extractor(lowercase_ , return_tensors='np' ).input_features
__UpperCAmelCase = feature_extractor(lowercase_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase_ , lowercase_ ):
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-3 ) )
def __lowerCamelCase ( self ):
import torch
__UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCAmelCase = np.random.rand(100 , 32 ).astype(np.floataa )
__UpperCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__UpperCAmelCase = feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__UpperCAmelCase = feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __lowerCamelCase ( self , __A ):
__UpperCAmelCase = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
__UpperCAmelCase = ds.sort('id' ).select(range(lowercase_ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def __lowerCamelCase ( self ):
# fmt: off
__UpperCAmelCase = torch.tensor(
[
0.1_1_9_3, -0.0_9_4_6, -0.1_0_9_8, -0.0_1_9_6, 0.0_2_2_5, -0.0_6_9_0, -0.1_7_3_6, 0.0_9_5_1,
0.0_9_7_1, -0.0_8_1_7, -0.0_7_0_2, 0.0_1_6_2, 0.0_2_6_0, 0.0_0_1_7, -0.0_1_9_2, -0.1_6_7_8,
0.0_7_0_9, -0.1_8_6_7, -0.0_6_5_5, -0.0_2_7_4, -0.0_2_3_4, -0.1_8_8_4, -0.0_5_1_6, -0.0_5_5_4,
-0.0_2_7_4, -0.1_4_2_5, -0.1_4_2_3, 0.0_8_3_7, 0.0_3_7_7, -0.0_8_5_4
] )
# fmt: on
__UpperCAmelCase = self._load_datasamples(1 )
__UpperCAmelCase = WhisperFeatureExtractor()
__UpperCAmelCase = feature_extractor(lowercase_ , return_tensors='pt' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3_000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , lowercase_ , atol=1E-4 ) )
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCAmelCase = self._load_datasamples(1 )[0]
__UpperCAmelCase = ((audio - audio.min()) / (audio.max() - audio.min())) * 65_535 # Rescale to [0, 65535] to show issue
__UpperCAmelCase = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=lowercase_ )[0]
self.assertTrue(np.all(np.mean(lowercase_ ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowercase_ ) - 1 ) < 1E-3 ) )
| 126 | import random
def snake_case (__lowercase , __lowercase ) -> tuple:
'''simple docstring'''
_snake_case ,_snake_case ,_snake_case : List[Any] = [], [], []
for element in data:
if element < pivot:
less.append(__lowercase )
elif element > pivot:
greater.append(__lowercase )
else:
equal.append(__lowercase )
return less, equal, greater
def snake_case (__lowercase , __lowercase ) -> List[Any]:
'''simple docstring'''
if index >= len(__lowercase ) or index < 0:
return None
_snake_case : Any = items[random.randint(0 , len(__lowercase ) - 1 )]
_snake_case : Tuple = 0
_snake_case ,_snake_case ,_snake_case : Tuple = _partition(__lowercase , __lowercase )
_snake_case : Tuple = len(__lowercase )
_snake_case : List[str] = len(__lowercase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(__lowercase , __lowercase )
# must be in larger
else:
return quick_select(__lowercase , index - (m + count) ) | 670 | 0 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class __magic_name__ ( __snake_case ):
_SCREAMING_SNAKE_CASE : int = 'sew-d'
def __init__( self : Optional[int] , snake_case_ : Tuple=32 , snake_case_ : List[Any]=768 , snake_case_ : List[str]=12 , snake_case_ : List[Any]=12 , snake_case_ : Tuple=3072 , snake_case_ : Dict=2 , snake_case_ : Dict=512 , snake_case_ : Optional[int]=256 , snake_case_ : Optional[Any]=True , snake_case_ : Any=True , snake_case_ : Optional[int]=("p2c", "c2p") , snake_case_ : Optional[int]="layer_norm" , snake_case_ : Any="gelu_python" , snake_case_ : Dict=0.1 , snake_case_ : int=0.1 , snake_case_ : Union[str, Any]=0.1 , snake_case_ : int=0.0 , snake_case_ : str=0.1 , snake_case_ : Optional[int]=0.02 , snake_case_ : Optional[int]=1e-7 , snake_case_ : Dict=1e-5 , snake_case_ : List[str]="group" , snake_case_ : Union[str, Any]="gelu" , snake_case_ : int=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , snake_case_ : str=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , snake_case_ : Tuple=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , snake_case_ : Dict=False , snake_case_ : Optional[int]=128 , snake_case_ : int=16 , snake_case_ : List[Any]=True , snake_case_ : Optional[Any]=0.05 , snake_case_ : Optional[int]=10 , snake_case_ : Any=2 , snake_case_ : str=0.0 , snake_case_ : Optional[Any]=10 , snake_case_ : Any=0 , snake_case_ : Tuple="mean" , snake_case_ : Union[str, Any]=False , snake_case_ : Union[str, Any]=False , snake_case_ : int=256 , snake_case_ : str=0 , snake_case_ : int=1 , snake_case_ : List[Any]=2 , **snake_case_ : Optional[Any] , ):
super().__init__(**lowercase_ , pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ )
__snake_case = hidden_size
__snake_case = feat_extract_norm
__snake_case = feat_extract_activation
__snake_case = list(lowercase_ )
__snake_case = list(lowercase_ )
__snake_case = list(lowercase_ )
__snake_case = conv_bias
__snake_case = num_conv_pos_embeddings
__snake_case = num_conv_pos_embedding_groups
__snake_case = len(self.conv_dim )
__snake_case = num_hidden_layers
__snake_case = intermediate_size
__snake_case = squeeze_factor
__snake_case = max_position_embeddings
__snake_case = position_buckets
__snake_case = share_att_key
__snake_case = relative_attention
__snake_case = norm_rel_ebd
__snake_case = list(lowercase_ )
__snake_case = hidden_act
__snake_case = num_attention_heads
__snake_case = hidden_dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = feat_proj_dropout
__snake_case = final_dropout
__snake_case = layer_norm_eps
__snake_case = feature_layer_norm_eps
__snake_case = initializer_range
__snake_case = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__snake_case = apply_spec_augment
__snake_case = mask_time_prob
__snake_case = mask_time_length
__snake_case = mask_time_min_masks
__snake_case = mask_feature_prob
__snake_case = mask_feature_length
__snake_case = mask_feature_min_masks
# ctc loss
__snake_case = ctc_loss_reduction
__snake_case = ctc_zero_infinity
# sequence classification
__snake_case = use_weighted_layer_sum
__snake_case = classifier_proj_size
@property
def lowerCAmelCase ( self : Union[str, Any] ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 163 | from math import pow, sqrt
def snake_case (*__lowercase ) -> bool:
'''simple docstring'''
_snake_case : str = len(__lowercase ) > 0 and all(value > 0.0 for value in values )
return result
def snake_case (__lowercase , __lowercase ) -> float | ValueError:
'''simple docstring'''
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__lowercase , __lowercase )
else ValueError("Input Error: Molar mass values must greater than 0." )
)
def snake_case (__lowercase , __lowercase , __lowercase ) -> float | ValueError:
'''simple docstring'''
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__lowercase , __lowercase , __lowercase )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def snake_case (__lowercase , __lowercase , __lowercase ) -> float | ValueError:
'''simple docstring'''
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__lowercase , __lowercase , __lowercase )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def snake_case (__lowercase , __lowercase , __lowercase ) -> float | ValueError:
'''simple docstring'''
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(__lowercase , __lowercase , __lowercase )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def snake_case (__lowercase , __lowercase , __lowercase ) -> float | ValueError:
'''simple docstring'''
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(__lowercase , __lowercase , __lowercase )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
) | 670 | 0 |
import torch
from transformers import AutoModel
class _UpperCAmelCase ( torch.nn.Module ):
def __init__( self , a__="sayef/fsner-bert-base-uncased" ):
super(lowercase_ , self ).__init__()
A_ : List[Any] = AutoModel.from_pretrained(lowercase_ , return_dict=lowercase_ )
A_ : Optional[int] = torch.nn.CosineSimilarity(3 , 1E-08 )
A_ : Tuple = torch.nn.Softmax(dim=1 )
def _lowerCamelCase ( self , **a__ ):
return self.bert(**lowercase_ ).last_hidden_state
def _lowerCamelCase ( self , a__ ):
return token_embeddings.sum(2 , keepdim=lowercase_ )
def _lowerCamelCase ( self , a__ , a__ , a__=1 ):
return self.softmax(T * self.cos(lowercase_ , lowercase_ ) )
def _lowerCamelCase ( self , a__ , a__ ):
A_ : Union[str, Any] = W_supports["sizes"].tolist()
A_ : Optional[int] = W_supports["start_token_id"].item()
A_ : int = W_supports["end_token_id"].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
A_ : int = self.BERT(**lowercase_ )
A_ : int = self.BERT(**lowercase_ )
A_ : Any = None
A_ : List[Any] = None
A_ : Tuple = W_supports["input_ids"] == start_token_id
A_ : Optional[int] = W_supports["input_ids"] == end_token_id
for i, size in enumerate(lowercase_ ):
if i == 0:
A_ : str = 0
else:
A_ : Optional[int] = support_sizes[i - 1]
A_ : Optional[int] = S[s : s + size][start_token_masks[s : s + size]]
A_ : Dict = S[s : s + size][end_token_masks[s : s + size]]
A_ : Optional[int] = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
A_ : List[str] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
A_ : List[Any] = torch.vstack((p_starts, p_start) )
A_ : List[Any] = torch.vstack((p_ends, p_end) )
else:
A_ : Optional[int] = p_start
A_ : Optional[Any] = p_end
return p_starts, p_ends
| 569 | import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
__SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
class lowercase_ ( __snake_case ):
def __init__( self , *lowercase_ , **lowercase_ ):
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ ) | 670 | 0 |
from manim import *
class lowercase_ ( __snake_case):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
a_ = Rectangle(height=0.5 , width=0.5 )
a_ = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
a_ = [mem.copy() for i in range(6 )]
a_ = [mem.copy() for i in range(6 )]
a_ = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
a_ = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
a_ = VGroup(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0 )
a_ = Text("""CPU""" , font_size=24 )
a_ = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowercase_ )
a_ = [mem.copy() for i in range(1 )]
a_ = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
a_ = Text("""GPU""" , font_size=24 )
a_ = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
gpu.align_to(lowercase_ , lowercase_ )
gpu.set_x(gpu.get_x() - 1 )
self.add(lowercase_ )
a_ = [mem.copy() for i in range(6 )]
a_ = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
a_ = Text("""Model""" , font_size=24 )
a_ = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
model.move_to([3, -1.0, 0] )
self.play(
Create(lowercase_ , run_time=1 ) , Create(lowercase_ , run_time=1 ) , Create(lowercase_ , run_time=1 ) , )
a_ = MarkupText(
f"First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM." , font_size=24 , )
a_ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
a_ = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(lowercase_ , run_time=2.5 ) , Write(lowercase_ ) , Write(lowercase_ ) )
self.add(lowercase_ )
a_ = []
a_ = []
a_ = []
for i, rect in enumerate(lowercase_ ):
a_ = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(lowercase_ , opacity=0.7 )
cpu_target.move_to(lowercase_ )
cpu_target.generate_target()
a_ = 0.4_6 / 4
a_ = 0.4_6 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=lowercase_ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=lowercase_ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=lowercase_ , buff=0.0 )
cpu_targs.append(lowercase_ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(lowercase_ ) )
second_animations.append(MoveToTarget(lowercase_ , run_time=1.5 ) )
self.play(*lowercase_ )
self.play(*lowercase_ )
self.wait() | 483 | from __future__ import annotations
from typing import TypedDict
class lowercase_ ( __snake_case ):
_lowerCamelCase = 42
_lowerCamelCase = 42
def snake_case (__lowercase ) -> list[str]:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise TypeError("The parameter s type must be str." )
return [s[i:] + s[:i] for i in range(len(__lowercase ) )]
def snake_case (__lowercase ) -> BWTTransformDict:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise TypeError("The parameter s type must be str." )
if not s:
raise ValueError("The parameter s must not be empty." )
_snake_case : List[str] = all_rotations(__lowercase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_snake_case : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__lowercase ),
}
return response
def snake_case (__lowercase , __lowercase ) -> str:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise TypeError("The parameter bwt_string type must be str." )
if not bwt_string:
raise ValueError("The parameter bwt_string must not be empty." )
try:
_snake_case : Union[str, Any] = int(__lowercase )
except ValueError:
raise TypeError(
"The parameter idx_original_string type must be int or passive"
" of cast to int." )
if idx_original_string < 0:
raise ValueError("The parameter idx_original_string must not be lower than 0." )
if idx_original_string >= len(__lowercase ):
raise ValueError(
"The parameter idx_original_string must be lower than" " len(bwt_string)." )
_snake_case : Optional[Any] = [""] * len(__lowercase )
for _ in range(len(__lowercase ) ):
for i in range(len(__lowercase ) ):
_snake_case : Tuple = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[Any] = 'Provide a string that I will generate its BWT transform: '
__SCREAMING_SNAKE_CASE : Optional[Any] = input(entry_msg).strip()
__SCREAMING_SNAKE_CASE : int = bwt_transform(s)
print(
F'''Burrows Wheeler transform for string \'{s}\' results '''
F'''in \'{result['bwt_string']}\''''
)
__SCREAMING_SNAKE_CASE : List[str] = reverse_bwt(result['bwt_string'], result['idx_original_string'])
print(
F'''Reversing Burrows Wheeler transform for entry \'{result['bwt_string']}\' '''
F'''we get original string \'{original_string}\''''
) | 670 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase ( __snake_case ):
"""simple docstring"""
A__ : int = ['image_processor', 'tokenizer']
A__ : int = 'Pix2StructImageProcessor'
A__ : Dict = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self , _snake_case , _snake_case ) -> Optional[Any]:
_UpperCamelCase : Tuple = False
super().__init__(lowercase_ , lowercase_ )
def __call__( self , _snake_case=None , _snake_case = None , _snake_case = True , _snake_case = False , _snake_case = None , _snake_case = None , _snake_case = 2048 , _snake_case = 0 , _snake_case = None , _snake_case = None , _snake_case = False , _snake_case = False , _snake_case = False , _snake_case = False , _snake_case = False , _snake_case = True , _snake_case = None , **_snake_case , ) -> List[Any]:
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None and not self.image_processor.is_vqa:
_UpperCamelCase : List[Any] = self.tokenizer
_UpperCamelCase : int = self.tokenizer(
text=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_token_type_ids=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
_UpperCamelCase : Dict = self.image_processor(
lowercase_ , return_tensors=lowercase_ , max_patches=lowercase_ , **lowercase_ )
else:
# add pixel_values and bbox
_UpperCamelCase : Optional[Any] = self.image_processor(
lowercase_ , return_tensors=lowercase_ , max_patches=lowercase_ , header_text=lowercase_ , **lowercase_ )
if text is not None and not self.image_processor.is_vqa:
_UpperCamelCase : Union[str, Any] = self.tokenizer(
text=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_token_type_ids=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
if "attention_mask" in text_encoding:
_UpperCamelCase : str = text_encoding.pop('''attention_mask''' )
if "input_ids" in text_encoding:
_UpperCamelCase : List[str] = text_encoding.pop('''input_ids''' )
else:
_UpperCamelCase : Dict = None
if text_encoding is not None:
encoding_image_processor.update(lowercase_ )
return encoding_image_processor
def _lowercase ( self , *_snake_case , **_snake_case ) -> Any:
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def _lowercase ( self , *_snake_case , **_snake_case ) -> Tuple:
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def _lowercase ( self ) -> Dict:
_UpperCamelCase : Any = self.tokenizer.model_input_names
_UpperCamelCase : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 683 | # NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
) | 670 | 0 |
"""simple docstring"""
A : Any = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
A : int = [{'type': 'code', 'content': INSTALL_CONTENT}]
A : List[str] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 636 | from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class lowercase_ :
_lowerCamelCase = LEDConfig
_lowerCamelCase = {}
_lowerCamelCase = 'gelu'
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=7 , lowercase_=True , lowercase_=False , lowercase_=99 , lowercase_=32 , lowercase_=2 , lowercase_=4 , lowercase_=37 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=20 , lowercase_=2 , lowercase_=1 , lowercase_=0 , lowercase_=4 , ):
_snake_case : Optional[int] = parent
_snake_case : str = batch_size
_snake_case : int = seq_length
_snake_case : Dict = is_training
_snake_case : Optional[Any] = use_labels
_snake_case : Tuple = vocab_size
_snake_case : str = hidden_size
_snake_case : int = num_hidden_layers
_snake_case : Union[str, Any] = num_attention_heads
_snake_case : int = intermediate_size
_snake_case : List[str] = hidden_dropout_prob
_snake_case : List[Any] = attention_probs_dropout_prob
_snake_case : int = max_position_embeddings
_snake_case : Union[str, Any] = eos_token_id
_snake_case : str = pad_token_id
_snake_case : Any = bos_token_id
_snake_case : str = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_snake_case : List[Any] = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_snake_case : List[str] = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def UpperCamelCase ( self ):
_snake_case : str = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_snake_case : Any = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_snake_case : Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 )
_snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case : List[str] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_snake_case : Optional[Any] = prepare_led_inputs_dict(lowercase_ , lowercase_ , lowercase_ )
_snake_case : int = tf.concat(
[tf.zeros_like(lowercase_ )[:, :-1], tf.ones_like(lowercase_ )[:, -1:]] , axis=-1 , )
_snake_case : List[Any] = global_attention_mask
return config, inputs_dict
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
_snake_case : Dict = TFLEDModel(config=lowercase_ ).get_decoder()
_snake_case : Optional[Any] = inputs_dict["input_ids"]
_snake_case : Optional[int] = input_ids[:1, :]
_snake_case : int = inputs_dict["attention_mask"][:1, :]
_snake_case : int = 1
# first forward pass
_snake_case : str = model(lowercase_ , attention_mask=lowercase_ , use_cache=lowercase_ )
_snake_case ,_snake_case : Optional[int] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_snake_case : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case : List[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_snake_case : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 )
_snake_case : List[str] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_snake_case : str = model(lowercase_ , attention_mask=lowercase_ )[0]
_snake_case : List[str] = model(lowercase_ , attention_mask=lowercase_ , past_key_values=lowercase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_snake_case : Optional[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_snake_case : List[str] = output_from_no_past[:, -3:, random_slice_idx]
_snake_case : List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase_ , lowercase_ , rtol=1e-3 )
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , ) -> List[Any]:
'''simple docstring'''
if attention_mask is None:
_snake_case : int = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_snake_case : Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_snake_case : Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_snake_case : Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class lowercase_ ( __snake_case , __snake_case , unittest.TestCase ):
_lowerCamelCase = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_lowerCamelCase = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_lowerCamelCase = (
{
'conversational': TFLEDForConditionalGeneration,
'feature-extraction': TFLEDModel,
'summarization': TFLEDForConditionalGeneration,
'text2text-generation': TFLEDForConditionalGeneration,
'translation': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def UpperCamelCase ( self ):
_snake_case : Optional[Any] = TFLEDModelTester(self )
_snake_case : List[Any] = ConfigTester(self , config_class=lowercase_ )
def UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase ( self ):
_snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase_ )
def UpperCamelCase ( self ):
_snake_case ,_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Tuple = tf.zeros_like(inputs_dict["attention_mask"] )
_snake_case : Tuple = 2
_snake_case : Dict = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
_snake_case : Tuple = True
_snake_case : Union[str, Any] = self.model_tester.seq_length
_snake_case : Union[str, Any] = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowercase_ ):
_snake_case : Optional[Any] = outputs.decoder_attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(lowercase_ ):
_snake_case : int = [t.numpy() for t in outputs.encoder_attentions]
_snake_case : Optional[int] = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_snake_case : Union[str, Any] = True
_snake_case : Dict = False
_snake_case : Any = False
_snake_case : Any = model_class(lowercase_ )
_snake_case : Union[str, Any] = model(self._prepare_for_class(lowercase_ , lowercase_ ) )
_snake_case : Tuple = len(lowercase_ )
self.assertEqual(config.output_hidden_states , lowercase_ )
check_encoder_attentions_output(lowercase_ )
if self.is_encoder_decoder:
_snake_case : int = model_class(lowercase_ )
_snake_case : Union[str, Any] = model(self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(config.output_hidden_states , lowercase_ )
check_decoder_attentions_output(lowercase_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_snake_case : List[Any] = True
_snake_case : Any = model_class(lowercase_ )
_snake_case : Optional[Any] = model(self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(config.output_hidden_states , lowercase_ )
check_encoder_attentions_output(lowercase_ )
# Check attention is always last and order is fine
_snake_case : Optional[int] = True
_snake_case : Optional[int] = True
_snake_case : List[Any] = model_class(lowercase_ )
_snake_case : Union[str, Any] = model(self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase_ ) )
self.assertEqual(model.config.output_hidden_states , lowercase_ )
check_encoder_attentions_output(lowercase_ )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
# TODO: Head-masking not yet implement
pass
def snake_case (__lowercase ) -> Optional[Any]:
'''simple docstring'''
return tf.constant(__lowercase , dtype=tf.intaa )
__SCREAMING_SNAKE_CASE : List[Any] = 1E-4
@slow
@require_tf
class lowercase_ ( unittest.TestCase ):
def UpperCamelCase ( self ):
_snake_case : Dict = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
_snake_case : Union[str, Any] = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Optional[int] = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Union[str, Any] = prepare_led_inputs_dict(model.config , lowercase_ , lowercase_ )
_snake_case : Optional[Any] = model(**lowercase_ )[0]
_snake_case : str = (1, 1_024, 768)
self.assertEqual(output.shape , lowercase_ )
# change to expected output here
_snake_case : Optional[Any] = tf.convert_to_tensor(
[[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase_ , atol=1e-3 )
def UpperCamelCase ( self ):
_snake_case : List[Any] = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
_snake_case : int = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : int = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Optional[Any] = prepare_led_inputs_dict(model.config , lowercase_ , lowercase_ )
_snake_case : Tuple = model(**lowercase_ )[0]
_snake_case : Any = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , lowercase_ )
# change to expected output here
_snake_case : Optional[int] = tf.convert_to_tensor(
[[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase_ , atol=1e-3 , rtol=1e-3 ) | 670 | 0 |
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class lowerCamelCase ( unittest.TestCase ):
_lowerCAmelCase : Optional[Any] = MODEL_FOR_MASKED_LM_MAPPING
_lowerCAmelCase : List[str] = TF_MODEL_FOR_MASKED_LM_MAPPING
def A( self):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def A( self):
__UpperCAmelCase : List[Any] = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , top_k=2 , framework='''tf''')
__UpperCAmelCase : Dict = unmasker('''My name is <mask>''')
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{'''sequence''': '''My name is grouped''', '''score''': 2.1e-05, '''token''': 3_8_0_1_5, '''token_str''': ''' grouped'''},
{'''sequence''': '''My name is accuser''', '''score''': 2.1e-05, '''token''': 2_5_5_0_6, '''token_str''': ''' accuser'''},
] , )
__UpperCAmelCase : Any = unmasker('''The largest city in France is <mask>''')
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{
'''sequence''': '''The largest city in France is grouped''',
'''score''': 2.1e-05,
'''token''': 3_8_0_1_5,
'''token_str''': ''' grouped''',
},
{
'''sequence''': '''The largest city in France is accuser''',
'''score''': 2.1e-05,
'''token''': 2_5_5_0_6,
'''token_str''': ''' accuser''',
},
] , )
__UpperCAmelCase : int = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3)
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{'''sequence''': '''My name is Clara''', '''score''': 2e-05, '''token''': 1_3_6_0_6, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Patrick''', '''score''': 2e-05, '''token''': 3_4_9_9, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 1.9e-05, '''token''': 2_9_4_1, '''token_str''': ''' Te'''},
] , )
@require_torch
def A( self):
__UpperCAmelCase : Optional[Any] = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , top_k=2 , framework='''pt''')
__UpperCAmelCase : int = unmasker('''My name is <mask>''')
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{'''sequence''': '''My name is Maul''', '''score''': 2.2e-05, '''token''': 3_5_6_7_6, '''token_str''': ''' Maul'''},
{'''sequence''': '''My name isELS''', '''score''': 2.2e-05, '''token''': 1_6_4_1_6, '''token_str''': '''ELS'''},
] , )
__UpperCAmelCase : str = unmasker('''The largest city in France is <mask>''')
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{
'''sequence''': '''The largest city in France is Maul''',
'''score''': 2.2e-05,
'''token''': 3_5_6_7_6,
'''token_str''': ''' Maul''',
},
{'''sequence''': '''The largest city in France isELS''', '''score''': 2.2e-05, '''token''': 1_6_4_1_6, '''token_str''': '''ELS'''},
] , )
__UpperCAmelCase : Optional[Any] = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3)
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
{'''sequence''': '''My name is Patrick''', '''score''': 2.1e-05, '''token''': 3_4_9_9, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 2e-05, '''token''': 2_9_4_1, '''token_str''': ''' Te'''},
{'''sequence''': '''My name is Clara''', '''score''': 2e-05, '''token''': 1_3_6_0_6, '''token_str''': ''' Clara'''},
] , )
__UpperCAmelCase : Dict = unmasker('''My name is <mask> <mask>''' , top_k=2)
self.assertEqual(
nested_simplify(lowercase_ , decimals=6) , [
[
{
'''score''': 2.2e-05,
'''token''': 3_5_6_7_6,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is Maul<mask></s>''',
},
{'''score''': 2.2e-05, '''token''': 1_6_4_1_6, '''token_str''': '''ELS''', '''sequence''': '''<s>My name isELS<mask></s>'''},
],
[
{
'''score''': 2.2e-05,
'''token''': 3_5_6_7_6,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is<mask> Maul</s>''',
},
{'''score''': 2.2e-05, '''token''': 1_6_4_1_6, '''token_str''': '''ELS''', '''sequence''': '''<s>My name is<mask>ELS</s>'''},
],
] , )
@require_torch_gpu
def A( self):
__UpperCAmelCase : Optional[int] = pipeline('''fill-mask''' , model='''hf-internal-testing/tiny-random-distilbert''' , device=0 , framework='''pt''')
# convert model to fp16
pipe.model.half()
__UpperCAmelCase : List[Any] = pipe('''Paris is the [MASK] of France.''')
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(lowercase_ , lowercase_)
@slow
@require_torch
def A( self):
__UpperCAmelCase : int = pipeline(task='''fill-mask''' , model='''distilroberta-base''' , top_k=2 , framework='''pt''')
self.run_large_test(lowercase_)
@slow
@require_tf
def A( self):
__UpperCAmelCase : Union[str, Any] = pipeline(task='''fill-mask''' , model='''distilroberta-base''' , top_k=2 , framework='''tf''')
self.run_large_test(lowercase_)
def A( self , lowercase__):
__UpperCAmelCase : Optional[int] = unmasker('''My name is <mask>''')
self.assertEqual(
nested_simplify(lowercase_) , [
{'''sequence''': '''My name is John''', '''score''': 0.0_0_8, '''token''': 6_1_0, '''token_str''': ''' John'''},
{'''sequence''': '''My name is Chris''', '''score''': 0.0_0_7, '''token''': 1_5_7_3, '''token_str''': ''' Chris'''},
] , )
__UpperCAmelCase : Optional[Any] = unmasker('''The largest city in France is <mask>''')
self.assertEqual(
nested_simplify(lowercase_) , [
{
'''sequence''': '''The largest city in France is Paris''',
'''score''': 0.2_5_1,
'''token''': 2_2_0_1,
'''token_str''': ''' Paris''',
},
{
'''sequence''': '''The largest city in France is Lyon''',
'''score''': 0.2_1_4,
'''token''': 1_2_7_9_0,
'''token_str''': ''' Lyon''',
},
] , )
__UpperCAmelCase : str = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3)
self.assertEqual(
nested_simplify(lowercase_) , [
{'''sequence''': '''My name is Patrick''', '''score''': 0.0_0_5, '''token''': 3_4_9_9, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Clara''', '''score''': 0.0_0_0, '''token''': 1_3_6_0_6, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Te''', '''score''': 0.0_0_0, '''token''': 2_9_4_1, '''token_str''': ''' Te'''},
] , )
@require_torch
def A( self):
__UpperCAmelCase : str = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , framework='''pt''')
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : Optional[Any] = None
self.run_pipeline_test(lowercase_ , [])
@require_tf
def A( self):
__UpperCAmelCase : List[str] = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , framework='''tf''')
__UpperCAmelCase : int = None
__UpperCAmelCase : str = None
self.run_pipeline_test(lowercase_ , [])
def A( self , lowercase__ , lowercase__ , lowercase__):
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('''The provided tokenizer has no mask token, (probably reformer or wav2vec2)''')
__UpperCAmelCase : Union[str, Any] = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
__UpperCAmelCase : str = [
F"This is another {tokenizer.mask_token} test",
]
return fill_masker, examples
def A( self , lowercase__ , lowercase__):
__UpperCAmelCase : Tuple = fill_masker.tokenizer
__UpperCAmelCase : int = fill_masker.model
__UpperCAmelCase : Tuple = fill_masker(
F"This is a {tokenizer.mask_token}" , )
self.assertEqual(
lowercase_ , [
{'''sequence''': ANY(lowercase_), '''score''': ANY(lowercase_), '''token''': ANY(lowercase_), '''token_str''': ANY(lowercase_)},
{'''sequence''': ANY(lowercase_), '''score''': ANY(lowercase_), '''token''': ANY(lowercase_), '''token_str''': ANY(lowercase_)},
{'''sequence''': ANY(lowercase_), '''score''': ANY(lowercase_), '''token''': ANY(lowercase_), '''token_str''': ANY(lowercase_)},
{'''sequence''': ANY(lowercase_), '''score''': ANY(lowercase_), '''token''': ANY(lowercase_), '''token_str''': ANY(lowercase_)},
{'''sequence''': ANY(lowercase_), '''score''': ANY(lowercase_), '''token''': ANY(lowercase_), '''token_str''': ANY(lowercase_)},
] , )
__UpperCAmelCase : Any = fill_masker([F"This is a {tokenizer.mask_token}"])
self.assertEqual(
lowercase_ , [
{'''sequence''': ANY(lowercase_), '''score''': ANY(lowercase_), '''token''': ANY(lowercase_), '''token_str''': ANY(lowercase_)},
{'''sequence''': ANY(lowercase_), '''score''': ANY(lowercase_), '''token''': ANY(lowercase_), '''token_str''': ANY(lowercase_)},
{'''sequence''': ANY(lowercase_), '''score''': ANY(lowercase_), '''token''': ANY(lowercase_), '''token_str''': ANY(lowercase_)},
{'''sequence''': ANY(lowercase_), '''score''': ANY(lowercase_), '''token''': ANY(lowercase_), '''token_str''': ANY(lowercase_)},
{'''sequence''': ANY(lowercase_), '''score''': ANY(lowercase_), '''token''': ANY(lowercase_), '''token_str''': ANY(lowercase_)},
] , )
__UpperCAmelCase : str = fill_masker([F"This is a {tokenizer.mask_token}", F"Another {tokenizer.mask_token} great test."])
self.assertEqual(
lowercase_ , [
[
{'''sequence''': ANY(lowercase_), '''score''': ANY(lowercase_), '''token''': ANY(lowercase_), '''token_str''': ANY(lowercase_)},
{'''sequence''': ANY(lowercase_), '''score''': ANY(lowercase_), '''token''': ANY(lowercase_), '''token_str''': ANY(lowercase_)},
{'''sequence''': ANY(lowercase_), '''score''': ANY(lowercase_), '''token''': ANY(lowercase_), '''token_str''': ANY(lowercase_)},
{'''sequence''': ANY(lowercase_), '''score''': ANY(lowercase_), '''token''': ANY(lowercase_), '''token_str''': ANY(lowercase_)},
{'''sequence''': ANY(lowercase_), '''score''': ANY(lowercase_), '''token''': ANY(lowercase_), '''token_str''': ANY(lowercase_)},
],
[
{'''sequence''': ANY(lowercase_), '''score''': ANY(lowercase_), '''token''': ANY(lowercase_), '''token_str''': ANY(lowercase_)},
{'''sequence''': ANY(lowercase_), '''score''': ANY(lowercase_), '''token''': ANY(lowercase_), '''token_str''': ANY(lowercase_)},
{'''sequence''': ANY(lowercase_), '''score''': ANY(lowercase_), '''token''': ANY(lowercase_), '''token_str''': ANY(lowercase_)},
{'''sequence''': ANY(lowercase_), '''score''': ANY(lowercase_), '''token''': ANY(lowercase_), '''token_str''': ANY(lowercase_)},
{'''sequence''': ANY(lowercase_), '''score''': ANY(lowercase_), '''token''': ANY(lowercase_), '''token_str''': ANY(lowercase_)},
],
] , )
with self.assertRaises(lowercase_):
fill_masker([None])
# No mask_token is not supported
with self.assertRaises(lowercase_):
fill_masker('''This is''')
self.run_test_top_k(lowercase_ , lowercase_)
self.run_test_targets(lowercase_ , lowercase_)
self.run_test_top_k_targets(lowercase_ , lowercase_)
self.fill_mask_with_duplicate_targets_and_top_k(lowercase_ , lowercase_)
self.fill_mask_with_multiple_masks(lowercase_ , lowercase_)
def A( self , lowercase__ , lowercase__):
__UpperCAmelCase : Optional[int] = tokenizer.get_vocab()
__UpperCAmelCase : Optional[int] = sorted(vocab.keys())[:2]
# Pipeline argument
__UpperCAmelCase : Dict = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_ , targets=lowercase_)
__UpperCAmelCase : Dict = fill_masker(F"This is a {tokenizer.mask_token}")
self.assertEqual(
lowercase_ , [
{'''sequence''': ANY(lowercase_), '''score''': ANY(lowercase_), '''token''': ANY(lowercase_), '''token_str''': ANY(lowercase_)},
{'''sequence''': ANY(lowercase_), '''score''': ANY(lowercase_), '''token''': ANY(lowercase_), '''token_str''': ANY(lowercase_)},
] , )
__UpperCAmelCase : Union[str, Any] = {vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs} , lowercase_)
__UpperCAmelCase : Any = [tokenizer.decode([x]) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs} , set(lowercase_))
# Call argument
__UpperCAmelCase : Optional[Any] = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
__UpperCAmelCase : Tuple = fill_masker(F"This is a {tokenizer.mask_token}" , targets=lowercase_)
self.assertEqual(
lowercase_ , [
{'''sequence''': ANY(lowercase_), '''score''': ANY(lowercase_), '''token''': ANY(lowercase_), '''token_str''': ANY(lowercase_)},
{'''sequence''': ANY(lowercase_), '''score''': ANY(lowercase_), '''token''': ANY(lowercase_), '''token_str''': ANY(lowercase_)},
] , )
__UpperCAmelCase : str = {vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs} , lowercase_)
__UpperCAmelCase : Optional[Any] = [tokenizer.decode([x]) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs} , set(lowercase_))
# Score equivalence
__UpperCAmelCase : str = fill_masker(F"This is a {tokenizer.mask_token}" , targets=lowercase_)
__UpperCAmelCase : Optional[int] = [top_mask["token_str"] for top_mask in outputs]
__UpperCAmelCase : List[Any] = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowercase_) == set(lowercase_):
__UpperCAmelCase : str = fill_masker(F"This is a {tokenizer.mask_token}" , targets=lowercase_)
__UpperCAmelCase : Tuple = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(lowercase_) , nested_simplify(lowercase_))
# Raises with invalid
with self.assertRaises(lowercase_):
__UpperCAmelCase : Optional[int] = fill_masker(F"This is a {tokenizer.mask_token}" , targets=[])
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(lowercase_):
__UpperCAmelCase : Union[str, Any] = fill_masker(F"This is a {tokenizer.mask_token}" , targets=[''''''])
with self.assertRaises(lowercase_):
__UpperCAmelCase : Optional[int] = fill_masker(F"This is a {tokenizer.mask_token}" , targets='''''')
def A( self , lowercase__ , lowercase__):
__UpperCAmelCase : Optional[int] = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_ , top_k=2)
__UpperCAmelCase : Dict = fill_masker(F"This is a {tokenizer.mask_token}")
self.assertEqual(
lowercase_ , [
{'''sequence''': ANY(lowercase_), '''score''': ANY(lowercase_), '''token''': ANY(lowercase_), '''token_str''': ANY(lowercase_)},
{'''sequence''': ANY(lowercase_), '''score''': ANY(lowercase_), '''token''': ANY(lowercase_), '''token_str''': ANY(lowercase_)},
] , )
__UpperCAmelCase : Tuple = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
__UpperCAmelCase : Optional[int] = fill_masker(F"This is a {tokenizer.mask_token}" , top_k=2)
self.assertEqual(
lowercase_ , [
{'''sequence''': ANY(lowercase_), '''score''': ANY(lowercase_), '''token''': ANY(lowercase_), '''token_str''': ANY(lowercase_)},
{'''sequence''': ANY(lowercase_), '''score''': ANY(lowercase_), '''token''': ANY(lowercase_), '''token_str''': ANY(lowercase_)},
] , )
self.assertEqual(nested_simplify(lowercase_) , nested_simplify(lowercase_))
def A( self , lowercase__ , lowercase__):
__UpperCAmelCase : Tuple = tokenizer.get_vocab()
__UpperCAmelCase : Any = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
# top_k=2, ntargets=3
__UpperCAmelCase : Union[str, Any] = sorted(vocab.keys())[:3]
__UpperCAmelCase : Tuple = fill_masker(F"This is a {tokenizer.mask_token}" , top_k=2 , targets=lowercase_)
# If we use the most probably targets, and filter differently, we should still
# have the same results
__UpperCAmelCase : str = [el["token_str"] for el in sorted(lowercase_ , key=lambda lowercase__: x["score"] , reverse=lowercase_)]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowercase_).issubset(lowercase_):
__UpperCAmelCase : Any = fill_masker(F"This is a {tokenizer.mask_token}" , top_k=3 , targets=lowercase_)
# They should yield exactly the same result
self.assertEqual(nested_simplify(lowercase_) , nested_simplify(lowercase_))
def A( self , lowercase__ , lowercase__):
__UpperCAmelCase : int = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
__UpperCAmelCase : List[Any] = tokenizer.get_vocab()
# String duplicates + id duplicates
__UpperCAmelCase : Tuple = sorted(vocab.keys())[:3]
__UpperCAmelCase : Optional[int] = [targets[0], targets[1], targets[0], targets[2], targets[1]]
__UpperCAmelCase : Dict = fill_masker(F"My name is {tokenizer.mask_token}" , targets=lowercase_ , top_k=1_0)
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(lowercase_) , 3)
def A( self , lowercase__ , lowercase__):
__UpperCAmelCase : Union[str, Any] = FillMaskPipeline(model=lowercase_ , tokenizer=lowercase_)
__UpperCAmelCase : Optional[int] = fill_masker(
F"This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}" , top_k=2)
self.assertEqual(
lowercase_ , [
[
{'''sequence''': ANY(lowercase_), '''score''': ANY(lowercase_), '''token''': ANY(lowercase_), '''token_str''': ANY(lowercase_)},
{'''sequence''': ANY(lowercase_), '''score''': ANY(lowercase_), '''token''': ANY(lowercase_), '''token_str''': ANY(lowercase_)},
],
[
{'''sequence''': ANY(lowercase_), '''score''': ANY(lowercase_), '''token''': ANY(lowercase_), '''token_str''': ANY(lowercase_)},
{'''sequence''': ANY(lowercase_), '''score''': ANY(lowercase_), '''token''': ANY(lowercase_), '''token_str''': ANY(lowercase_)},
],
[
{'''sequence''': ANY(lowercase_), '''score''': ANY(lowercase_), '''token''': ANY(lowercase_), '''token_str''': ANY(lowercase_)},
{'''sequence''': ANY(lowercase_), '''score''': ANY(lowercase_), '''token''': ANY(lowercase_), '''token_str''': ANY(lowercase_)},
],
] , )
| 462 | import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class lowercase_ ( __snake_case , unittest.TestCase ):
_lowerCamelCase = ReformerTokenizer
_lowerCamelCase = ReformerTokenizerFast
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = True
def UpperCamelCase ( self ):
super().setUp()
_snake_case : Union[str, Any] = ReformerTokenizer(lowercase_ , keep_accents=lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self ):
_snake_case : int = "<s>"
_snake_case : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(lowercase_ ) , 1_000 )
def UpperCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def UpperCamelCase ( self ):
if not self.test_rust_tokenizer:
return
_snake_case : Tuple = self.get_tokenizer()
_snake_case : List[str] = self.get_rust_tokenizer()
_snake_case : int = "I was born in 92000, and this is falsé."
_snake_case : Tuple = tokenizer.tokenize(lowercase_ )
_snake_case : List[Any] = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_snake_case : str = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
_snake_case : Tuple = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_snake_case : Dict = self.get_rust_tokenizer()
_snake_case : List[Any] = tokenizer.encode(lowercase_ )
_snake_case : str = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def UpperCamelCase ( self , lowercase_=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_snake_case : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
# Simple input
_snake_case : List[str] = "This is a simple input"
_snake_case : Optional[Any] = ["This is a simple input 1", "This is a simple input 2"]
_snake_case : Union[str, Any] = ("This is a simple input", "This is a pair")
_snake_case : int = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(lowercase_ , tokenizer_r.encode , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Simple input
self.assertRaises(lowercase_ , tokenizer_r.encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Simple input
self.assertRaises(
lowercase_ , tokenizer_r.batch_encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" , )
# Pair input
self.assertRaises(lowercase_ , tokenizer_r.encode , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Pair input
self.assertRaises(lowercase_ , tokenizer_r.encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Pair input
self.assertRaises(
lowercase_ , tokenizer_r.batch_encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" , )
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
_snake_case : Dict = ReformerTokenizer(lowercase_ , keep_accents=lowercase_ )
_snake_case : Tuple = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowercase_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ) , [285, 46, 10, 170, 382] , )
_snake_case : str = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_snake_case : Any = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_snake_case : List[Any] = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def UpperCamelCase ( self ):
return ReformerTokenizer.from_pretrained("google/reformer-crime-and-punishment" )
@slow
def UpperCamelCase ( self ):
_snake_case : int = "Hello World!"
_snake_case : Dict = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@slow
def UpperCamelCase ( self ):
_snake_case : Optional[int] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
_snake_case : Dict = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@require_torch
@slow
def UpperCamelCase ( self ):
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
_snake_case : str = list(self.big_tokenizer.get_vocab().keys() )[:10]
_snake_case : str = " ".join(lowercase_ )
_snake_case : Tuple = self.big_tokenizer.encode_plus(lowercase_ , return_tensors="pt" )
_snake_case : Tuple = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="pt" )
_snake_case : int = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
_snake_case : Union[str, Any] = encoded_sequence["input_ids"].shape
_snake_case : List[str] = ReformerModel(lowercase_ )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowercase_ )
model(**lowercase_ )
@slow
def UpperCamelCase ( self ):
# fmt: off
_snake_case : Union[str, Any] = {"input_ids": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
_snake_case : Tuple = [
"This is a very simple sentence.",
"The quick brown fox jumps over the lazy dog.",
]
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name="google/reformer-crime-and-punishment" , revision="0e6c3decb8211d49bf881013425dc8b0448b3f5a" , padding=lowercase_ , sequences=lowercase_ , ) | 670 | 0 |
"""simple docstring"""
def lowercase__(A ) ->int:
"""simple docstring"""
lowercase__ : Optional[int]= len(__lowercase )
lowercase__ : int= sum(__lowercase )
lowercase__ : List[str]= [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
lowercase__ : Union[str, Any]= True
for i in range(1 , s + 1 ):
lowercase__ : Tuple= False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
lowercase__ : List[Any]= dp[i][j - 1]
if arr[i - 1] <= j:
lowercase__ : Tuple= dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
lowercase__ : int= s - 2 * j
break
return diff
| 218 | import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class lowercase_ ( unittest.TestCase ):
def UpperCamelCase ( self ):
_snake_case : Any = tempfile.mkdtemp()
# fmt: off
_snake_case : Optional[Any] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
_snake_case : Dict = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
_snake_case : Dict = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
_snake_case : Optional[int] = {"unk_token": "<unk>"}
_snake_case : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_snake_case : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowercase_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowercase_ ) )
_snake_case : Any = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
_snake_case : Optional[Any] = os.path.join(self.tmpdirname , lowercase_ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(lowercase_ , lowercase_ )
def UpperCamelCase ( self , **lowercase_ ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCamelCase ( self , **lowercase_ ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCamelCase ( self , **lowercase_ ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self ):
_snake_case : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_snake_case : Union[str, Any] = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase ( self ):
_snake_case : Tuple = self.get_tokenizer()
_snake_case : Any = self.get_rust_tokenizer()
_snake_case : Optional[Any] = self.get_image_processor()
_snake_case : Any = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
processor_slow.save_pretrained(self.tmpdirname )
_snake_case : Optional[int] = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase_ )
_snake_case : List[Any] = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
processor_fast.save_pretrained(self.tmpdirname )
_snake_case : Optional[Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowercase_ )
self.assertIsInstance(processor_fast.tokenizer , lowercase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowercase_ )
self.assertIsInstance(processor_fast.image_processor , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : List[Any] = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_snake_case : List[Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_snake_case : Optional[Any] = self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0 )
_snake_case : Tuple = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowercase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : Union[str, Any] = self.get_image_processor()
_snake_case : Any = self.get_tokenizer()
_snake_case : int = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : Optional[int] = self.prepare_image_inputs()
_snake_case : Optional[Any] = image_processor(lowercase_ , return_tensors="np" )
_snake_case : str = processor(images=lowercase_ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase ( self ):
_snake_case : Optional[Any] = self.get_image_processor()
_snake_case : Any = self.get_tokenizer()
_snake_case : Dict = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : List[str] = "lower newer"
_snake_case : int = processor(text=lowercase_ )
_snake_case : str = tokenizer(lowercase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase ( self ):
_snake_case : List[Any] = self.get_image_processor()
_snake_case : int = self.get_tokenizer()
_snake_case : Tuple = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : List[Any] = "lower newer"
_snake_case : int = self.prepare_image_inputs()
_snake_case : Dict = processor(text=lowercase_ , images=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowercase_ ):
processor()
def UpperCamelCase ( self ):
_snake_case : Dict = self.get_image_processor()
_snake_case : List[str] = self.get_tokenizer()
_snake_case : Union[str, Any] = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : Optional[int] = self.prepare_image_inputs()
_snake_case : Dict = self.prepare_image_inputs()
_snake_case : List[Any] = processor(images=lowercase_ , visual_prompt=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "conditional_pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowercase_ ):
processor()
def UpperCamelCase ( self ):
_snake_case : Dict = self.get_image_processor()
_snake_case : List[Any] = self.get_tokenizer()
_snake_case : str = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_snake_case : Any = processor.batch_decode(lowercase_ )
_snake_case : Any = tokenizer.batch_decode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ ) | 670 | 0 |
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'b0': efficientnet.EfficientNetBa,
'b1': efficientnet.EfficientNetBa,
'b2': efficientnet.EfficientNetBa,
'b3': efficientnet.EfficientNetBa,
'b4': efficientnet.EfficientNetBa,
'b5': efficientnet.EfficientNetBa,
'b6': efficientnet.EfficientNetBa,
'b7': efficientnet.EfficientNetBa,
}
lowerCamelCase_ = {
'b0': {
'hidden_dim': 12_80,
'width_coef': 1.0,
'depth_coef': 1.0,
'image_size': 2_24,
'dropout_rate': 0.2,
'dw_padding': [],
},
'b1': {
'hidden_dim': 12_80,
'width_coef': 1.0,
'depth_coef': 1.1,
'image_size': 2_40,
'dropout_rate': 0.2,
'dw_padding': [16],
},
'b2': {
'hidden_dim': 14_08,
'width_coef': 1.1,
'depth_coef': 1.2,
'image_size': 2_60,
'dropout_rate': 0.3,
'dw_padding': [5, 8, 16],
},
'b3': {
'hidden_dim': 15_36,
'width_coef': 1.2,
'depth_coef': 1.4,
'image_size': 3_00,
'dropout_rate': 0.3,
'dw_padding': [5, 18],
},
'b4': {
'hidden_dim': 17_92,
'width_coef': 1.4,
'depth_coef': 1.8,
'image_size': 3_80,
'dropout_rate': 0.4,
'dw_padding': [6],
},
'b5': {
'hidden_dim': 20_48,
'width_coef': 1.6,
'depth_coef': 2.2,
'image_size': 4_56,
'dropout_rate': 0.4,
'dw_padding': [13, 27],
},
'b6': {
'hidden_dim': 23_04,
'width_coef': 1.8,
'depth_coef': 2.6,
'image_size': 5_28,
'dropout_rate': 0.5,
'dw_padding': [31],
},
'b7': {
'hidden_dim': 25_60,
'width_coef': 2.0,
'depth_coef': 3.1,
'image_size': 6_00,
'dropout_rate': 0.5,
'dw_padding': [18],
},
}
def SCREAMING_SNAKE_CASE_ ( __A : List[str] ) -> Any:
_SCREAMING_SNAKE_CASE = EfficientNetConfig()
_SCREAMING_SNAKE_CASE = CONFIG_MAP[model_name]["hidden_dim"]
_SCREAMING_SNAKE_CASE = CONFIG_MAP[model_name]["width_coef"]
_SCREAMING_SNAKE_CASE = CONFIG_MAP[model_name]["depth_coef"]
_SCREAMING_SNAKE_CASE = CONFIG_MAP[model_name]["image_size"]
_SCREAMING_SNAKE_CASE = CONFIG_MAP[model_name]["dropout_rate"]
_SCREAMING_SNAKE_CASE = CONFIG_MAP[model_name]["dw_padding"]
_SCREAMING_SNAKE_CASE = "huggingface/label-files"
_SCREAMING_SNAKE_CASE = "imagenet-1k-id2label.json"
_SCREAMING_SNAKE_CASE = 10_00
_SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) )
_SCREAMING_SNAKE_CASE = {int(__lowercase ): v for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE = idalabel
_SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE_ ( ) -> Tuple:
_SCREAMING_SNAKE_CASE = "http://images.cocodataset.org/val2017/000000039769.jpg"
_SCREAMING_SNAKE_CASE = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
def SCREAMING_SNAKE_CASE_ ( __A : Optional[Any] ) -> Tuple:
_SCREAMING_SNAKE_CASE = CONFIG_MAP[model_name]["image_size"]
_SCREAMING_SNAKE_CASE = EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=__lowercase , )
return preprocessor
def SCREAMING_SNAKE_CASE_ ( __A : Any ) -> Tuple:
_SCREAMING_SNAKE_CASE = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
_SCREAMING_SNAKE_CASE = sorted(set(__lowercase ) )
_SCREAMING_SNAKE_CASE = len(__lowercase )
_SCREAMING_SNAKE_CASE = {b: str(__lowercase ) for b, i in zip(__lowercase , range(__lowercase ) )}
_SCREAMING_SNAKE_CASE = []
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
_SCREAMING_SNAKE_CASE = block_name_mapping[b]
rename_keys.append((f"""block{b}_expand_conv/kernel:0""", f"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((f"""block{b}_expand_bn/gamma:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((f"""block{b}_expand_bn/beta:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(f"""block{b}_expand_bn/moving_mean:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(f"""block{b}_expand_bn/moving_variance:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(f"""block{b}_dwconv/depthwise_kernel:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((f"""block{b}_bn/gamma:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((f"""block{b}_bn/beta:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(f"""block{b}_bn/moving_mean:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(f"""block{b}_bn/moving_variance:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((f"""block{b}_se_reduce/kernel:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((f"""block{b}_se_reduce/bias:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((f"""block{b}_se_expand/kernel:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((f"""block{b}_se_expand/bias:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(f"""block{b}_project_conv/kernel:0""", f"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((f"""block{b}_project_bn/gamma:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((f"""block{b}_project_bn/beta:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(f"""block{b}_project_bn/moving_mean:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(f"""block{b}_project_bn/moving_variance:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
_SCREAMING_SNAKE_CASE = {}
for item in rename_keys:
if item[0] in original_param_names:
_SCREAMING_SNAKE_CASE = "efficientnet." + item[1]
_SCREAMING_SNAKE_CASE = "classifier.weight"
_SCREAMING_SNAKE_CASE = "classifier.bias"
return key_mapping
def SCREAMING_SNAKE_CASE_ ( __A : Dict , __A : Union[str, Any] , __A : Dict ) -> int:
for key, value in tf_params.items():
if "normalization" in key:
continue
_SCREAMING_SNAKE_CASE = key_mapping[key]
if "_conv" in key and "kernel" in key:
_SCREAMING_SNAKE_CASE = torch.from_numpy(__lowercase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
_SCREAMING_SNAKE_CASE = torch.from_numpy(__lowercase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
_SCREAMING_SNAKE_CASE = torch.from_numpy(np.transpose(__lowercase ) )
else:
_SCREAMING_SNAKE_CASE = torch.from_numpy(__lowercase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(__lowercase )
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( __A : List[Any] , __A : Dict , __A : Tuple , __A : List[str] ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = model_classes[model_name](
include_top=__lowercase , weights="imagenet" , input_tensor=__lowercase , input_shape=__lowercase , pooling=__lowercase , classes=10_00 , classifier_activation="softmax" , )
_SCREAMING_SNAKE_CASE = original_model.trainable_variables
_SCREAMING_SNAKE_CASE = original_model.non_trainable_variables
_SCREAMING_SNAKE_CASE = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
_SCREAMING_SNAKE_CASE = param.numpy()
_SCREAMING_SNAKE_CASE = list(tf_params.keys() )
# Load HuggingFace model
_SCREAMING_SNAKE_CASE = get_efficientnet_config(__lowercase )
_SCREAMING_SNAKE_CASE = EfficientNetForImageClassification(__lowercase ).eval()
_SCREAMING_SNAKE_CASE = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
_SCREAMING_SNAKE_CASE = rename_keys(__lowercase )
replace_params(__lowercase , __lowercase , __lowercase )
# Initialize preprocessor and preprocess input image
_SCREAMING_SNAKE_CASE = convert_image_processor(__lowercase )
_SCREAMING_SNAKE_CASE = preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE = hf_model(**__lowercase )
_SCREAMING_SNAKE_CASE = outputs.logits.detach().numpy()
# Original model inference
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = CONFIG_MAP[model_name]["image_size"]
_SCREAMING_SNAKE_CASE = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
_SCREAMING_SNAKE_CASE = image.img_to_array(__lowercase )
_SCREAMING_SNAKE_CASE = np.expand_dims(__lowercase , axis=0 )
_SCREAMING_SNAKE_CASE = original_model.predict(__lowercase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(__lowercase , __lowercase , atol=1e-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(__lowercase ):
os.mkdir(__lowercase )
# Save converted model and image processor
hf_model.save_pretrained(__lowercase )
preprocessor.save_pretrained(__lowercase )
if push_to_hub:
# Push model and image processor to hub
print(f"""Pushing converted {model_name} to the hub...""" )
_SCREAMING_SNAKE_CASE = f"""efficientnet-{model_name}"""
preprocessor.push_to_hub(__lowercase )
hf_model.push_to_hub(__lowercase )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='b0',
type=str,
help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='hf_model',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--save_model', action='store_true', help='Save model to local')
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
lowerCamelCase_ = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 418 | from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
__SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__) # pylint: disable=invalid-name
def snake_case (__lowercase ) -> Any:
'''simple docstring'''
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(__lowercase ):
return ext
raise Exception(
F"""Unable to determine file format from file extension {path}. """
F"""Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}""" )
def snake_case (__lowercase ) -> Any:
'''simple docstring'''
_snake_case : int = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
_snake_case : List[Any] = try_infer_format_from_ext(args.input ) if args.format == "infer" else args.format
_snake_case : Optional[int] = PipelineDataFormat.from_str(
format=__lowercase , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(__lowercase , __lowercase )
class lowercase_ ( __snake_case ):
def __init__( self , lowercase_ , lowercase_ ):
_snake_case : str = nlp
_snake_case : str = reader
@staticmethod
def UpperCamelCase ( lowercase_ ):
_snake_case : Dict = parser.add_parser("run" , help="Run a pipeline through the CLI" )
run_parser.add_argument("--task" , choices=get_supported_tasks() , help="Task to run" )
run_parser.add_argument("--input" , type=lowercase_ , help="Path to the file to use for inference" )
run_parser.add_argument("--output" , type=lowercase_ , help="Path to the file that will be used post to write results." )
run_parser.add_argument("--model" , type=lowercase_ , help="Name or path to the model to instantiate." )
run_parser.add_argument("--config" , type=lowercase_ , help="Name or path to the model's config to instantiate." )
run_parser.add_argument(
"--tokenizer" , type=lowercase_ , help="Name of the tokenizer to use. (default: same as the model name)" )
run_parser.add_argument(
"--column" , type=lowercase_ , help="Name of the column to use as input. (For multi columns input as QA use column1,columns2)" , )
run_parser.add_argument(
"--format" , type=lowercase_ , default="infer" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="Input format to read from" , )
run_parser.add_argument(
"--device" , type=lowercase_ , default=-1 , help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" , )
run_parser.add_argument("--overwrite" , action="store_true" , help="Allow overwriting the output file." )
run_parser.set_defaults(func=lowercase_ )
def UpperCamelCase ( self ):
_snake_case ,_snake_case : Tuple = self._nlp, []
for entry in self._reader:
_snake_case : Optional[Any] = nlp(**lowercase_ ) if self._reader.is_multi_columns else nlp(lowercase_ )
if isinstance(lowercase_ , lowercase_ ):
outputs.append(lowercase_ )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
_snake_case : str = self._reader.save_binary(lowercase_ )
logger.warning(f"""Current pipeline requires output to be in binary format, saving at {binary_path}""" )
else:
self._reader.save(lowercase_ ) | 670 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__A = {
'configuration_efficientformer': [
'EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientFormerConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['EfficientFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientFormerForImageClassification',
'EfficientFormerForImageClassificationWithTeacher',
'EfficientFormerModel',
'EfficientFormerPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFEfficientFormerForImageClassification',
'TFEfficientFormerForImageClassificationWithTeacher',
'TFEfficientFormerModel',
'TFEfficientFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 593 | import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
__SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
class lowercase_ ( __snake_case ):
def __init__( self , lowercase_ ):
super().__init__()
_snake_case : List[str] = nn.ModuleList(lowercase_ )
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = False , lowercase_ = True , ):
for i, (image, scale, controlnet) in enumerate(zip(lowercase_ , lowercase_ , self.nets ) ):
_snake_case ,_snake_case : Optional[int] = controlnet(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , )
# merge samples
if i == 0:
_snake_case ,_snake_case : Tuple = down_samples, mid_sample
else:
_snake_case : Tuple = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowercase_ , lowercase_ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def UpperCamelCase ( self , lowercase_ , lowercase_ = True , lowercase_ = None , lowercase_ = False , lowercase_ = None , ):
_snake_case : Tuple = 0
_snake_case : Dict = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowercase_ , is_main_process=lowercase_ , save_function=lowercase_ , safe_serialization=lowercase_ , variant=lowercase_ , )
idx += 1
_snake_case : int = model_path_to_save + f"""_{idx}"""
@classmethod
def UpperCamelCase ( cls , lowercase_ , **lowercase_ ):
_snake_case : List[str] = 0
_snake_case : Optional[Any] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_snake_case : Optional[Any] = pretrained_model_path
while os.path.isdir(lowercase_ ):
_snake_case : int = ControlNetModel.from_pretrained(lowercase_ , **lowercase_ )
controlnets.append(lowercase_ )
idx += 1
_snake_case : str = pretrained_model_path + f"""_{idx}"""
logger.info(f"""{len(lowercase_ )} controlnets loaded from {pretrained_model_path}.""" )
if len(lowercase_ ) == 0:
raise ValueError(
f"""No ControlNets found under {os.path.dirname(lowercase_ )}. Expected at least {pretrained_model_path + '_0'}.""" )
return cls(lowercase_ ) | 670 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A_ = logging.get_logger(__name__)
A_ = {
'shi-labs/dinat-mini-in1k-224': 'https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json',
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class snake_case ( __snake_case , __snake_case ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = """dinat"""
UpperCAmelCase : str = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : List[Any] , lowerCAmelCase_ : int=4 , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : Any=64 , lowerCAmelCase_ : List[str]=[3, 4, 6, 5] , lowerCAmelCase_ : Dict=[2, 4, 8, 16] , lowerCAmelCase_ : List[Any]=7 , lowerCAmelCase_ : Optional[int]=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , lowerCAmelCase_ : Any=3.0 , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Optional[Any]=0.0 , lowerCAmelCase_ : int=0.0 , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : Optional[Any]="gelu" , lowerCAmelCase_ : Optional[int]=0.02 , lowerCAmelCase_ : Dict=1e-5 , lowerCAmelCase_ : Optional[Any]=0.0 , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Union[str, Any]=None , **lowerCAmelCase_ : int , ) -> Any:
"""simple docstring"""
super().__init__(**lowercase_ )
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = embed_dim
SCREAMING_SNAKE_CASE_ = depths
SCREAMING_SNAKE_CASE_ = len(lowercase_ )
SCREAMING_SNAKE_CASE_ = num_heads
SCREAMING_SNAKE_CASE_ = kernel_size
SCREAMING_SNAKE_CASE_ = dilations
SCREAMING_SNAKE_CASE_ = mlp_ratio
SCREAMING_SNAKE_CASE_ = qkv_bias
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = drop_path_rate
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE_ = int(embed_dim * 2 ** (len(lowercase_ ) - 1) )
SCREAMING_SNAKE_CASE_ = layer_scale_init_value
SCREAMING_SNAKE_CASE_ = ["stem"] + [F'''stage{idx}''' for idx in range(1 , len(lowercase_ ) + 1 )]
SCREAMING_SNAKE_CASE_ = get_aligned_output_features_output_indices(
out_features=lowercase_ , out_indices=lowercase_ , stage_names=self.stage_names )
| 393 | import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase_ ( __snake_case ):
_lowerCamelCase = ['image_processor', 'tokenizer']
_lowerCamelCase = 'CLIPImageProcessor'
_lowerCamelCase = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self , lowercase_=None , lowercase_=None , **lowercase_ ):
_snake_case : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowercase_ , )
_snake_case : Dict = kwargs.pop("feature_extractor" )
_snake_case : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowercase_ , lowercase_ )
def __call__( self , lowercase_=None , lowercase_=None , lowercase_=None , **lowercase_ ):
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
_snake_case : str = self.tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if images is not None:
_snake_case : List[str] = self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if text is not None and images is not None:
_snake_case : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase_ ) , tensor_type=lowercase_ )
def UpperCamelCase ( self , *lowercase_ , **lowercase_ ):
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def UpperCamelCase ( self , *lowercase_ , **lowercase_ ):
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def UpperCamelCase ( self ):
_snake_case : Any = self.tokenizer.model_input_names
_snake_case : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 670 | 0 |
'''simple docstring'''
import numpy as np
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 1e-1_2 , _lowerCAmelCase = 1_00 , )-> tuple[float, np.ndarray]:
assert np.shape(__lowercase )[0] == np.shape(__lowercase )[1]
# Ensure proper dimensionality.
assert np.shape(__lowercase )[0] == np.shape(__lowercase )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(__lowercase ) == np.iscomplexobj(__lowercase )
__UpperCAmelCase = np.iscomplexobj(__lowercase )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(__lowercase , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
__UpperCAmelCase = False
__UpperCAmelCase = 0
__UpperCAmelCase = 0
__UpperCAmelCase = 1e1_2
while not convergence:
# Multiple matrix by the vector.
__UpperCAmelCase = np.dot(__lowercase , __lowercase )
# Normalize the resulting output vector.
__UpperCAmelCase = w / np.linalg.norm(__lowercase )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
__UpperCAmelCase = vector.conj().T if is_complex else vector.T
__UpperCAmelCase = np.dot(__lowercase , np.dot(__lowercase , __lowercase ) )
# Check convergence.
__UpperCAmelCase = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
__UpperCAmelCase = True
__UpperCAmelCase = lambda_
if is_complex:
__UpperCAmelCase = np.real(lambda_ )
return lambda_, vector
def _lowerCAmelCase ( )-> None:
__UpperCAmelCase = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
__UpperCAmelCase = np.array([41, 4, 20] )
__UpperCAmelCase = real_input_matrix.astype(np.complexaaa )
__UpperCAmelCase = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
__UpperCAmelCase = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
__UpperCAmelCase = real_input_matrix
__UpperCAmelCase = real_vector
elif problem_type == "complex":
__UpperCAmelCase = complex_input_matrix
__UpperCAmelCase = complex_vector
# Our implementation.
__UpperCAmelCase = power_iteration(__lowercase , __lowercase )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
__UpperCAmelCase = np.linalg.eigh(__lowercase )
# Last eigenvalue is the maximum one.
__UpperCAmelCase = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
__UpperCAmelCase = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(__lowercase ) - np.abs(__lowercase ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 126 | from __future__ import annotations
def snake_case (__lowercase , __lowercase , __lowercase ) -> dict[str, float]:
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance < 0:
raise ValueError("Resistance cannot be negative" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod() | 670 | 0 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class __magic_name__ ( __snake_case ):
_SCREAMING_SNAKE_CASE : Optional[int] = 'deformable_detr'
_SCREAMING_SNAKE_CASE : Optional[int] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : int , snake_case_ : Optional[int]=True , snake_case_ : int=None , snake_case_ : Union[str, Any]=3 , snake_case_ : str=300 , snake_case_ : Union[str, Any]=1024 , snake_case_ : List[str]=6 , snake_case_ : int=1024 , snake_case_ : Tuple=8 , snake_case_ : List[Any]=6 , snake_case_ : Any=1024 , snake_case_ : Tuple=8 , snake_case_ : Union[str, Any]=0.0 , snake_case_ : List[Any]=True , snake_case_ : List[Any]="relu" , snake_case_ : Any=256 , snake_case_ : Tuple=0.1 , snake_case_ : Optional[int]=0.0 , snake_case_ : str=0.0 , snake_case_ : int=0.02 , snake_case_ : Optional[Any]=1.0 , snake_case_ : int=True , snake_case_ : Tuple=False , snake_case_ : List[Any]="sine" , snake_case_ : Any="resnet50" , snake_case_ : Union[str, Any]=True , snake_case_ : int=False , snake_case_ : Union[str, Any]=4 , snake_case_ : Tuple=4 , snake_case_ : Any=4 , snake_case_ : Tuple=False , snake_case_ : Tuple=300 , snake_case_ : Union[str, Any]=False , snake_case_ : Tuple=1 , snake_case_ : str=5 , snake_case_ : Dict=2 , snake_case_ : List[Any]=1 , snake_case_ : str=1 , snake_case_ : Optional[int]=5 , snake_case_ : Tuple=2 , snake_case_ : Optional[int]=0.1 , snake_case_ : str=0.25 , snake_case_ : Optional[Any]=False , **snake_case_ : List[Any] , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__snake_case = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(lowercase_ , lowercase_ ):
__snake_case = backbone_config.get("model_type" )
__snake_case = CONFIG_MAPPING[backbone_model_type]
__snake_case = config_class.from_dict(lowercase_ )
__snake_case = use_timm_backbone
__snake_case = backbone_config
__snake_case = num_channels
__snake_case = num_queries
__snake_case = max_position_embeddings
__snake_case = d_model
__snake_case = encoder_ffn_dim
__snake_case = encoder_layers
__snake_case = encoder_attention_heads
__snake_case = decoder_ffn_dim
__snake_case = decoder_layers
__snake_case = decoder_attention_heads
__snake_case = dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = activation_function
__snake_case = init_std
__snake_case = init_xavier_std
__snake_case = encoder_layerdrop
__snake_case = auxiliary_loss
__snake_case = position_embedding_type
__snake_case = backbone
__snake_case = use_pretrained_backbone
__snake_case = dilation
# deformable attributes
__snake_case = num_feature_levels
__snake_case = encoder_n_points
__snake_case = decoder_n_points
__snake_case = two_stage
__snake_case = two_stage_num_proposals
__snake_case = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
__snake_case = class_cost
__snake_case = bbox_cost
__snake_case = giou_cost
# Loss coefficients
__snake_case = mask_loss_coefficient
__snake_case = dice_loss_coefficient
__snake_case = bbox_loss_coefficient
__snake_case = giou_loss_coefficient
__snake_case = eos_coefficient
__snake_case = focal_alpha
__snake_case = disable_custom_kernels
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ )
@property
def lowerCAmelCase ( self : List[str] ):
return self.encoder_attention_heads
@property
def lowerCAmelCase ( self : Tuple ):
return self.d_model
def lowerCAmelCase ( self : Dict ):
__snake_case = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
__snake_case = self.backbone_config.to_dict()
__snake_case = self.__class__.model_type
return output
| 163 | import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def snake_case (*__lowercase ) -> Dict:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
_snake_case : Dict = list(__lowercase )
for i in range(len(__lowercase ) ):
_snake_case : List[str] = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def snake_case (__lowercase ) -> bool:
'''simple docstring'''
_snake_case : str = [
"CUDA out of memory.", # CUDA OOM
"cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.", # CUDNN SNAFU
"DefaultCPUAllocator: can't allocate memory", # CPU OOM
]
if isinstance(__lowercase , __lowercase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def snake_case (__lowercase = None , __lowercase = 128 ) -> Any:
'''simple docstring'''
if function is None:
return functools.partial(__lowercase , starting_batch_size=__lowercase )
_snake_case : List[str] = starting_batch_size
def decorator(*__lowercase , **__lowercase ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
_snake_case : Optional[Any] = list(inspect.signature(__lowercase ).parameters.keys() )
# Guard against user error
if len(__lowercase ) < (len(__lowercase ) + 1):
_snake_case : str = ", ".join([F"""{arg}={value}""" for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F"""Batch size was passed into `{function.__name__}` as the first argument when called."""
F"""Remove this as the decorator already does so: `{function.__name__}({arg_str})`""" )
while True:
if batch_size == 0:
raise RuntimeError("No executable batch size found, reached zero." )
try:
return function(__lowercase , *__lowercase , **__lowercase )
except Exception as e:
if should_reduce_batch_size(__lowercase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator | 670 | 0 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
_lowerCAmelCase = {
'vocab_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
},
'merges_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
},
}
_lowerCAmelCase = {
'allenai/longformer-base-4096': 4_096,
'allenai/longformer-large-4096': 4_096,
'allenai/longformer-large-4096-finetuned-triviaqa': 4_096,
'allenai/longformer-base-4096-extra.pos.embd.only': 4_096,
'allenai/longformer-large-4096-extra.pos.embd.only': 4_096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _lowerCAmelCase ( ):
'''simple docstring'''
A_ : Optional[int] = (
list(range(ord("""!""" ) ,ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) ,ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) ,ord("""ÿ""" ) + 1 ) )
)
A_ : Any = bs[:]
A_ : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__lowercase )
cs.append(2**8 + n )
n += 1
A_ : int = [chr(__lowercase ) for n in cs]
return dict(zip(__lowercase ,__lowercase ) )
def _lowerCAmelCase ( _lowerCAmelCase ):
'''simple docstring'''
A_ : Dict = set()
A_ : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A_ : Tuple = char
return pairs
class _UpperCAmelCase ( __snake_case ):
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = ['''input_ids''', '''attention_mask''']
def __init__( self , a__ , a__ , a__="replace" , a__="<s>" , a__="</s>" , a__="</s>" , a__="<s>" , a__="<unk>" , a__="<pad>" , a__="<mask>" , a__=False , **a__ , ):
A_ : Tuple = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else bos_token
A_ : Any = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else eos_token
A_ : List[str] = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else sep_token
A_ : Any = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else cls_token
A_ : int = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else unk_token
A_ : Union[str, Any] = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
A_ : str = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else mask_token
super().__init__(
errors=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , pad_token=lowercase_ , mask_token=lowercase_ , add_prefix_space=lowercase_ , **lowercase_ , )
with open(lowercase_ , encoding="""utf-8""" ) as vocab_handle:
A_ : Tuple = json.load(lowercase_ )
A_ : Any = {v: k for k, v in self.encoder.items()}
A_ : List[Any] = errors # how to handle errors in decoding
A_ : Optional[int] = bytes_to_unicode()
A_ : int = {v: k for k, v in self.byte_encoder.items()}
with open(lowercase_ , encoding="""utf-8""" ) as merges_handle:
A_ : List[str] = merges_handle.read().split("""\n""" )[1:-1]
A_ : Tuple = [tuple(merge.split() ) for merge in bpe_merges]
A_ : Any = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
A_ : List[str] = {}
A_ : int = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
A_ : Union[str, Any] = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def _lowerCamelCase ( self ):
return len(self.encoder )
def _lowerCamelCase ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCamelCase ( self , a__ ):
if token in self.cache:
return self.cache[token]
A_ : Optional[int] = tuple(lowercase_ )
A_ : Union[str, Any] = get_pairs(lowercase_ )
if not pairs:
return token
while True:
A_ : Tuple = min(lowercase_ , key=lambda a__ : self.bpe_ranks.get(lowercase_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
A_ : Any = bigram
A_ : int = []
A_ : Union[str, Any] = 0
while i < len(lowercase_ ):
try:
A_ : Optional[int] = word.index(lowercase_ , lowercase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A_ : str = j
if word[i] == first and i < len(lowercase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A_ : str = tuple(lowercase_ )
A_ : List[str] = new_word
if len(lowercase_ ) == 1:
break
else:
A_ : Optional[Any] = get_pairs(lowercase_ )
A_ : Dict = " ".join(lowercase_ )
A_ : str = word
return word
def _lowerCamelCase ( self , a__ ):
A_ : Tuple = []
for token in re.findall(self.pat , lowercase_ ):
A_ : Union[str, Any] = "".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowercase_ ).split(""" """ ) )
return bpe_tokens
def _lowerCamelCase ( self , a__ ):
return self.encoder.get(lowercase_ , self.encoder.get(self.unk_token ) )
def _lowerCamelCase ( self , a__ ):
return self.decoder.get(lowercase_ )
def _lowerCamelCase ( self , a__ ):
A_ : Union[str, Any] = "".join(lowercase_ )
A_ : str = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def _lowerCamelCase ( self , a__ , a__ = None ):
if not os.path.isdir(lowercase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
A_ : List[Any] = os.path.join(
lowercase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A_ : Any = os.path.join(
lowercase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowercase_ , ensure_ascii=lowercase_ ) + """\n""" )
A_ : List[Any] = 0
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda a__ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
A_ : Optional[Any] = token_index
writer.write(""" """.join(lowercase_ ) + """\n""" )
index += 1
return vocab_file, merge_file
def _lowerCamelCase ( self , a__ , a__ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A_ : Any = [self.cls_token_id]
A_ : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowerCamelCase ( self , a__ , a__ = None , a__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowercase_ )) + [1]
return [1] + ([0] * len(lowercase_ )) + [1, 1] + ([0] * len(lowercase_ )) + [1]
def _lowerCamelCase ( self , a__ , a__ = None ):
A_ : List[Any] = [self.sep_token_id]
A_ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCamelCase ( self , a__ , a__=False , **a__ ):
A_ : str = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowercase_ ) > 0 and not text[0].isspace()):
A_ : List[Any] = " " + text
return (text, kwargs)
| 569 | __SCREAMING_SNAKE_CASE : Union[str, Any] = {
'a': 'AAAAA',
'b': 'AAAAB',
'c': 'AAABA',
'd': 'AAABB',
'e': 'AABAA',
'f': 'AABAB',
'g': 'AABBA',
'h': 'AABBB',
'i': 'ABAAA',
'j': 'BBBAA',
'k': 'ABAAB',
'l': 'ABABA',
'm': 'ABABB',
'n': 'ABBAA',
'o': 'ABBAB',
'p': 'ABBBA',
'q': 'ABBBB',
'r': 'BAAAA',
's': 'BAAAB',
't': 'BAABA',
'u': 'BAABB',
'v': 'BBBAB',
'w': 'BABAA',
'x': 'BABAB',
'y': 'BABBA',
'z': 'BABBB',
' ': ' ',
}
__SCREAMING_SNAKE_CASE : int = {value: key for key, value in encode_dict.items()}
def snake_case (__lowercase ) -> str:
'''simple docstring'''
_snake_case : Any = ""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("encode() accepts only letters of the alphabet and spaces" )
return encoded
def snake_case (__lowercase ) -> str:
'''simple docstring'''
if set(__lowercase ) - {"A", "B", " "} != set():
raise Exception("decode() accepts only 'A', 'B' and spaces" )
_snake_case : str = ""
for word in coded.split():
while len(__lowercase ) != 0:
decoded += decode_dict[word[:5]]
_snake_case : int = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod() | 670 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : List[Any] =logging.get_logger(__name__)
A_ : Union[str, Any] ={
'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json',
}
class lowercase_ ( __snake_case):
"""simple docstring"""
snake_case_ = '''switch_transformers'''
snake_case_ = ['''past_key_values''']
snake_case_ = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self , _UpperCAmelCase=32_128 , _UpperCAmelCase=768 , _UpperCAmelCase=64 , _UpperCAmelCase=2_048 , _UpperCAmelCase=64 , _UpperCAmelCase=12 , _UpperCAmelCase=3 , _UpperCAmelCase=12 , _UpperCAmelCase=3 , _UpperCAmelCase=12 , _UpperCAmelCase=8 , _UpperCAmelCase=False , _UpperCAmelCase=0.0_1 , _UpperCAmelCase="float32" , _UpperCAmelCase=False , _UpperCAmelCase=32 , _UpperCAmelCase=128 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1e-6 , _UpperCAmelCase=0.0_0_1 , _UpperCAmelCase=0.0_0_1 , _UpperCAmelCase=1.0 , _UpperCAmelCase="relu" , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=0 , _UpperCAmelCase=1 , **_UpperCAmelCase , ):
"""simple docstring"""
a_ = vocab_size
a_ = d_model
a_ = d_kv
a_ = d_ff
a_ = num_sparse_encoder_layers
a_ = num_layers
a_ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
a_ = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
a_ = self.num_layers // self.num_sparse_encoder_layers
else:
a_ = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
a_ = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
a_ = self.num_decoder_layers # HACK: this will create 0 sparse layers
a_ = num_heads
a_ = num_experts
a_ = expert_capacity
a_ = router_bias
a_ = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" )
a_ = router_dtype
a_ = router_ignore_padding_tokens
a_ = relative_attention_num_buckets
a_ = relative_attention_max_distance
a_ = dropout_rate
a_ = layer_norm_epsilon
a_ = initializer_factor
a_ = feed_forward_proj
a_ = use_cache
a_ = add_router_probs
a_ = router_z_loss_coef
a_ = router_aux_loss_coef
a_ = self.feed_forward_proj.split("""-""" )
a_ = act_info[-1]
a_ = act_info[0] == "gated"
if len(lowercase_ ) > 1 and act_info[0] != "gated" or len(lowercase_ ) > 2:
raise ValueError(
f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
a_ = "gelu_new"
super().__init__(
pad_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , **lowercase_ , ) | 483 | import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowercase_ ( unittest.TestCase ):
def UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def UpperCamelCase ( self ):
_snake_case ,_snake_case : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-2" , revision="bf16" , dtype=jnp.bfloataa , )
_snake_case : List[Any] = "A painting of a squirrel eating a burger"
_snake_case : Union[str, Any] = jax.device_count()
_snake_case : List[Any] = num_samples * [prompt]
_snake_case : Tuple = sd_pipe.prepare_inputs(lowercase_ )
_snake_case : str = replicate(lowercase_ )
_snake_case : Dict = shard(lowercase_ )
_snake_case : List[Any] = jax.random.PRNGKey(0 )
_snake_case : List[Any] = jax.random.split(lowercase_ , jax.device_count() )
_snake_case : Tuple = sd_pipe(lowercase_ , lowercase_ , lowercase_ , num_inference_steps=25 , jit=lowercase_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
_snake_case : List[Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_snake_case : str = images[0, 253:256, 253:256, -1]
_snake_case : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_snake_case : Optional[Any] = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def UpperCamelCase ( self ):
_snake_case : Optional[Any] = "stabilityai/stable-diffusion-2"
_snake_case ,_snake_case : List[Any] = FlaxDPMSolverMultistepScheduler.from_pretrained(lowercase_ , subfolder="scheduler" )
_snake_case ,_snake_case : int = FlaxStableDiffusionPipeline.from_pretrained(
lowercase_ , scheduler=lowercase_ , revision="bf16" , dtype=jnp.bfloataa , )
_snake_case : str = scheduler_params
_snake_case : Dict = "A painting of a squirrel eating a burger"
_snake_case : Dict = jax.device_count()
_snake_case : Optional[int] = num_samples * [prompt]
_snake_case : List[str] = sd_pipe.prepare_inputs(lowercase_ )
_snake_case : Optional[int] = replicate(lowercase_ )
_snake_case : Union[str, Any] = shard(lowercase_ )
_snake_case : List[Any] = jax.random.PRNGKey(0 )
_snake_case : Union[str, Any] = jax.random.split(lowercase_ , jax.device_count() )
_snake_case : str = sd_pipe(lowercase_ , lowercase_ , lowercase_ , num_inference_steps=25 , jit=lowercase_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
_snake_case : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_snake_case : List[str] = images[0, 253:256, 253:256, -1]
_snake_case : Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_snake_case : Dict = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 | 670 | 0 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class UpperCAmelCase ( __snake_case ):
"""simple docstring"""
A__ : Optional[int] = ['image_processor', 'feature_extractor']
A__ : Any = 'TvltImageProcessor'
A__ : Optional[Any] = 'TvltFeatureExtractor'
def __init__( self , _snake_case , _snake_case ) -> List[Any]:
super().__init__(image_processor=lowercase_ , feature_extractor=lowercase_ )
_UpperCamelCase : List[str] = image_processor
_UpperCamelCase : Dict = feature_extractor
def __call__( self , _snake_case=None , _snake_case=None , _snake_case=None , _snake_case=None , _snake_case=False , _snake_case=False , *_snake_case , **_snake_case , ) -> str:
if images is None and audio is None:
raise ValueError('''You need to specify either an `images` or `audio` input to process.''' )
_UpperCamelCase : Tuple = None
if images is not None:
_UpperCamelCase : Any = self.image_processor(lowercase_ , mask_pixel=lowercase_ , *lowercase_ , **lowercase_ )
if images_mixed is not None:
_UpperCamelCase : Optional[Any] = self.image_processor(lowercase_ , is_mixed=lowercase_ , *lowercase_ , **lowercase_ )
if audio is not None:
_UpperCamelCase : List[Any] = self.feature_extractor(
lowercase_ , *lowercase_ , sampling_rate=lowercase_ , mask_audio=lowercase_ , **lowercase_ )
_UpperCamelCase : int = {}
if audio is not None:
output_dict.update(lowercase_ )
if images is not None:
output_dict.update(lowercase_ )
if images_mixed_dict is not None:
output_dict.update(lowercase_ )
return output_dict
@property
def _lowercase ( self ) -> Union[str, Any]:
_UpperCamelCase : str = self.image_processor.model_input_names
_UpperCamelCase : Union[str, Any] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 683 | from manim import *
class lowercase_ ( __snake_case ):
def UpperCamelCase ( self ):
_snake_case : Tuple = Rectangle(height=0.5 , width=0.5 )
_snake_case : List[str] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_snake_case : List[str] = [mem.copy() for i in range(6 )]
_snake_case : Any = [mem.copy() for i in range(6 )]
_snake_case : Any = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : Any = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : str = VGroup(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : int = Text("CPU" , font_size=24 )
_snake_case : str = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowercase_ )
_snake_case : int = [mem.copy() for i in range(4 )]
_snake_case : Dict = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : str = Text("GPU" , font_size=24 )
_snake_case : Optional[int] = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
gpu.move_to([-1, -1, 0] )
self.add(lowercase_ )
_snake_case : Any = [mem.copy() for i in range(6 )]
_snake_case : Any = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : Dict = Text("Model" , font_size=24 )
_snake_case : Dict = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
model.move_to([3, -1.0, 0] )
self.add(lowercase_ )
_snake_case : str = []
for i, rect in enumerate(lowercase_ ):
rect.set_stroke(lowercase_ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_snake_case : Union[str, Any] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowercase_ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowercase_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowercase_ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase_ , buff=0.0 )
self.add(lowercase_ )
cpu_targs.append(lowercase_ )
_snake_case : List[Any] = [mem.copy() for i in range(6 )]
_snake_case : Union[str, Any] = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : Optional[Any] = Text("Loaded Checkpoint" , font_size=24 )
_snake_case : Union[str, Any] = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , aligned_edge=lowercase_ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
_snake_case : Optional[int] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_snake_case : Optional[Any] = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowercase_ , lowercase_ )
_snake_case : Union[str, Any] = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(lowercase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
_snake_case : List[Any] = MarkupText(
f"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowercase_ ) , Write(lowercase_ ) )
self.play(Write(lowercase_ , run_time=1 ) , Create(lowercase_ , run_time=1 ) )
_snake_case : int = []
_snake_case : str = []
for i, rect in enumerate(lowercase_ ):
_snake_case : Dict = fill.copy().set_fill(lowercase_ , opacity=0.7 )
target.move_to(lowercase_ )
first_animations.append(GrowFromCenter(lowercase_ , run_time=1 ) )
_snake_case : Dict = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(lowercase_ , run_time=1.5 ) )
self.play(*lowercase_ )
self.play(*lowercase_ )
self.wait() | 670 | 0 |
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = 0
while number > 0:
__lowerCAmelCase = number % 10
sum_of_digits += last_digit
__lowerCAmelCase = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def _lowerCamelCase ( _UpperCamelCase = 100 ):
'''simple docstring'''
__lowerCAmelCase = factorial(__lowercase )
__lowerCAmelCase = split_and_add(__lowercase )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 636 | import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
class lowercase_ ( __snake_case ):
_lowerCamelCase = 'linear'
_lowerCamelCase = 'cosine'
_lowerCamelCase = 'cosine_with_restarts'
_lowerCamelCase = 'polynomial'
_lowerCamelCase = 'constant'
_lowerCamelCase = 'constant_with_warmup'
_lowerCamelCase = 'piecewise_constant'
def snake_case (__lowercase , __lowercase = -1 ) -> List[Any]:
'''simple docstring'''
return LambdaLR(__lowercase , lambda __lowercase : 1 , last_epoch=__lowercase )
def snake_case (__lowercase , __lowercase , __lowercase = -1 ) -> List[str]:
'''simple docstring'''
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1.0 , __lowercase ) )
return 1.0
return LambdaLR(__lowercase , __lowercase , last_epoch=__lowercase )
def snake_case (__lowercase , __lowercase , __lowercase = -1 ) -> Optional[int]:
'''simple docstring'''
_snake_case : Optional[Any] = {}
_snake_case : Optional[int] = step_rules.split("," )
for rule_str in rule_list[:-1]:
_snake_case ,_snake_case : str = rule_str.split(":" )
_snake_case : Dict = int(__lowercase )
_snake_case : List[str] = float(__lowercase )
_snake_case : Tuple = value
_snake_case : str = float(rule_list[-1] )
def create_rules_function(__lowercase , __lowercase ):
def rule_func(__lowercase ) -> float:
_snake_case : List[str] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__lowercase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
_snake_case : int = create_rules_function(__lowercase , __lowercase )
return LambdaLR(__lowercase , __lowercase , last_epoch=__lowercase )
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase=-1 ) -> List[str]:
'''simple docstring'''
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1 , __lowercase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__lowercase , __lowercase , __lowercase )
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase = 0.5 , __lowercase = -1 ) -> Dict:
'''simple docstring'''
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1 , __lowercase ) )
_snake_case : Optional[int] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__lowercase ) * 2.0 * progress )) )
return LambdaLR(__lowercase , __lowercase , __lowercase )
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase = 1 , __lowercase = -1 ) -> Optional[int]:
'''simple docstring'''
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1 , __lowercase ) )
_snake_case : Any = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__lowercase ) * progress) % 1.0) )) )
return LambdaLR(__lowercase , __lowercase , __lowercase )
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase=1e-7 , __lowercase=1.0 , __lowercase=-1 ) -> List[Any]:
'''simple docstring'''
_snake_case : List[Any] = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1 , __lowercase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
_snake_case : Tuple = lr_init - lr_end
_snake_case : Any = num_training_steps - num_warmup_steps
_snake_case : Optional[int] = 1 - (current_step - num_warmup_steps) / decay_steps
_snake_case : Optional[Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__lowercase , __lowercase , __lowercase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def snake_case (__lowercase , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = 1 , __lowercase = 1.0 , __lowercase = -1 , ) -> List[Any]:
'''simple docstring'''
_snake_case : Any = SchedulerType(__lowercase )
_snake_case : Union[str, Any] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__lowercase , last_epoch=__lowercase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__lowercase , step_rules=__lowercase , last_epoch=__lowercase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__lowercase , num_warmup_steps=__lowercase , last_epoch=__lowercase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__lowercase , num_warmup_steps=__lowercase , num_training_steps=__lowercase , num_cycles=__lowercase , last_epoch=__lowercase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__lowercase , num_warmup_steps=__lowercase , num_training_steps=__lowercase , power=__lowercase , last_epoch=__lowercase , )
return schedule_func(
__lowercase , num_warmup_steps=__lowercase , num_training_steps=__lowercase , last_epoch=__lowercase ) | 670 | 0 |
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any:
'''simple docstring'''
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(__lowercase ):
return ext
raise Exception(
f"Unable to determine file format from file extension {path}. "
f"Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}" )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any:
'''simple docstring'''
__UpperCAmelCase : int = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
__UpperCAmelCase : List[Any] = try_infer_format_from_ext(args.input ) if args.format == "infer" else args.format
__UpperCAmelCase : Optional[int] = PipelineDataFormat.from_str(
format=__lowercase , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(__lowercase , __lowercase )
class lowerCamelCase ( __snake_case ):
def __init__( self , lowercase__ , lowercase__):
__UpperCAmelCase : str = nlp
__UpperCAmelCase : str = reader
@staticmethod
def A( lowercase__):
__UpperCAmelCase : Dict = parser.add_parser('''run''' , help='''Run a pipeline through the CLI''')
run_parser.add_argument('''--task''' , choices=get_supported_tasks() , help='''Task to run''')
run_parser.add_argument('''--input''' , type=lowercase_ , help='''Path to the file to use for inference''')
run_parser.add_argument('''--output''' , type=lowercase_ , help='''Path to the file that will be used post to write results.''')
run_parser.add_argument('''--model''' , type=lowercase_ , help='''Name or path to the model to instantiate.''')
run_parser.add_argument('''--config''' , type=lowercase_ , help='''Name or path to the model\'s config to instantiate.''')
run_parser.add_argument(
'''--tokenizer''' , type=lowercase_ , help='''Name of the tokenizer to use. (default: same as the model name)''')
run_parser.add_argument(
'''--column''' , type=lowercase_ , help='''Name of the column to use as input. (For multi columns input as QA use column1,columns2)''' , )
run_parser.add_argument(
'''--format''' , type=lowercase_ , default='''infer''' , choices=PipelineDataFormat.SUPPORTED_FORMATS , help='''Input format to read from''' , )
run_parser.add_argument(
'''--device''' , type=lowercase_ , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
run_parser.add_argument('''--overwrite''' , action='''store_true''' , help='''Allow overwriting the output file.''')
run_parser.set_defaults(func=lowercase_)
def A( self):
__UpperCAmelCase : Tuple = self._nlp, []
for entry in self._reader:
__UpperCAmelCase : Optional[Any] = nlp(**lowercase_) if self._reader.is_multi_columns else nlp(lowercase_)
if isinstance(lowercase_ , lowercase_):
outputs.append(lowercase_)
else:
outputs += output
# Saving data
if self._nlp.binary_output:
__UpperCAmelCase : str = self._reader.save_binary(lowercase_)
logger.warning(F"Current pipeline requires output to be in binary format, saving at {binary_path}")
else:
self._reader.save(lowercase_)
| 462 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : int = {
'weiweishi/roc-bert-base-zh': 'https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json',
}
class lowercase_ ( __snake_case ):
_lowerCamelCase = 'roc_bert'
def __init__( self , lowercase_=30_522 , lowercase_=768 , lowercase_=12 , lowercase_=12 , lowercase_=3_072 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=512 , lowercase_=2 , lowercase_=0.02 , lowercase_=1e-12 , lowercase_=True , lowercase_=0 , lowercase_="absolute" , lowercase_=None , lowercase_=True , lowercase_=True , lowercase_=768 , lowercase_=910 , lowercase_=512 , lowercase_=24_858 , lowercase_=True , **lowercase_ , ):
_snake_case : int = vocab_size
_snake_case : Union[str, Any] = max_position_embeddings
_snake_case : Union[str, Any] = hidden_size
_snake_case : Dict = num_hidden_layers
_snake_case : Any = num_attention_heads
_snake_case : Dict = intermediate_size
_snake_case : List[Any] = hidden_act
_snake_case : Optional[int] = hidden_dropout_prob
_snake_case : Union[str, Any] = attention_probs_dropout_prob
_snake_case : Union[str, Any] = initializer_range
_snake_case : List[Any] = type_vocab_size
_snake_case : int = layer_norm_eps
_snake_case : Optional[Any] = use_cache
_snake_case : List[Any] = enable_pronunciation
_snake_case : Dict = enable_shape
_snake_case : Dict = pronunciation_embed_dim
_snake_case : Tuple = pronunciation_vocab_size
_snake_case : Tuple = shape_embed_dim
_snake_case : List[str] = shape_vocab_size
_snake_case : Dict = concat_input
_snake_case : int = position_embedding_type
_snake_case : int = classifier_dropout
super().__init__(pad_token_id=lowercase_ , **lowercase_ ) | 670 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a : Tuple = logging.get_logger(__name__)
def lowercase__(A , A=False , A=False , A=False ) ->Dict:
"""simple docstring"""
lowercase__ : Dict= []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''transformer.blocks.{i}.norm1.weight''', f'''vilt.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.norm1.bias''', f'''vilt.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''transformer.blocks.{i}.attn.proj.weight''', f'''vilt.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''transformer.blocks.{i}.attn.proj.bias''', f'''vilt.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''transformer.blocks.{i}.norm2.weight''', f'''vilt.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.norm2.bias''', f'''vilt.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(f'''transformer.blocks.{i}.mlp.fc1.weight''', f'''vilt.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc1.bias''', f'''vilt.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.weight''', f'''vilt.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''transformer.blocks.{i}.mlp.fc2.bias''', f'''vilt.encoder.layer.{i}.output.dense.bias''') )
# embeddings
rename_keys.extend(
[
# text embeddings
("text_embeddings.word_embeddings.weight", "vilt.embeddings.text_embeddings.word_embeddings.weight"),
(
"text_embeddings.position_embeddings.weight",
"vilt.embeddings.text_embeddings.position_embeddings.weight",
),
("text_embeddings.position_ids", "vilt.embeddings.text_embeddings.position_ids"),
(
"text_embeddings.token_type_embeddings.weight",
"vilt.embeddings.text_embeddings.token_type_embeddings.weight",
),
("text_embeddings.LayerNorm.weight", "vilt.embeddings.text_embeddings.LayerNorm.weight"),
("text_embeddings.LayerNorm.bias", "vilt.embeddings.text_embeddings.LayerNorm.bias"),
# patch embeddings
("transformer.cls_token", "vilt.embeddings.cls_token"),
("transformer.patch_embed.proj.weight", "vilt.embeddings.patch_embeddings.projection.weight"),
("transformer.patch_embed.proj.bias", "vilt.embeddings.patch_embeddings.projection.bias"),
("transformer.pos_embed", "vilt.embeddings.position_embeddings"),
# token type embeddings
("token_type_embeddings.weight", "vilt.embeddings.token_type_embeddings.weight"),
] )
# final layernorm + pooler
rename_keys.extend(
[
("transformer.norm.weight", "vilt.layernorm.weight"),
("transformer.norm.bias", "vilt.layernorm.bias"),
("pooler.dense.weight", "vilt.pooler.dense.weight"),
("pooler.dense.bias", "vilt.pooler.dense.bias"),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("vqa_classifier.0.weight", "classifier.0.weight"),
("vqa_classifier.0.bias", "classifier.0.bias"),
("vqa_classifier.1.weight", "classifier.1.weight"),
("vqa_classifier.1.bias", "classifier.1.bias"),
("vqa_classifier.3.weight", "classifier.3.weight"),
("vqa_classifier.3.bias", "classifier.3.bias"),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
("nlvr2_classifier.0.weight", "classifier.0.weight"),
("nlvr2_classifier.0.bias", "classifier.0.bias"),
("nlvr2_classifier.1.weight", "classifier.1.weight"),
("nlvr2_classifier.1.bias", "classifier.1.bias"),
("nlvr2_classifier.3.weight", "classifier.3.weight"),
("nlvr2_classifier.3.bias", "classifier.3.bias"),
] )
else:
pass
return rename_keys
def lowercase__(A , A ) ->int:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
lowercase__ : Optional[int]= "vilt."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase__ : Tuple= state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.weight''' )
lowercase__ : Tuple= state_dict.pop(f'''transformer.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase__ : Any= in_proj_weight[
: config.hidden_size, :
]
lowercase__ : Optional[Any]= in_proj_bias[: config.hidden_size]
lowercase__ : int= in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase__ : Dict= in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase__ : List[Any]= in_proj_weight[
-config.hidden_size :, :
]
lowercase__ : List[str]= in_proj_bias[-config.hidden_size :]
def lowercase__(A ) ->Any:
"""simple docstring"""
lowercase__ : List[Any]= ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def lowercase__(A , A , A ) ->Optional[int]:
"""simple docstring"""
lowercase__ : Tuple= dct.pop(__lowercase )
lowercase__ : Optional[Any]= val
@torch.no_grad()
def lowercase__(A , A ) ->List[str]:
"""simple docstring"""
lowercase__ : Dict= ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=__lowercase )
lowercase__ : Tuple= False
lowercase__ : str= False
lowercase__ : List[str]= False
lowercase__ : Any= False
if "vqa" in checkpoint_url:
lowercase__ : Dict= True
lowercase__ : Union[str, Any]= 3_129
lowercase__ : List[Any]= "huggingface/label-files"
lowercase__ : Optional[Any]= "vqa2-id2label.json"
lowercase__ : Union[str, Any]= json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) )
lowercase__ : Union[str, Any]= {int(__lowercase ): v for k, v in idalabel.items()}
lowercase__ : Dict= idalabel
lowercase__ : int= {v: k for k, v in idalabel.items()}
lowercase__ : Optional[int]= ViltForQuestionAnswering(__lowercase )
elif "nlvr" in checkpoint_url:
lowercase__ : Dict= True
lowercase__ : List[Any]= 2
lowercase__ : Optional[int]= {0: "False", 1: "True"}
lowercase__ : Optional[Any]= {v: k for k, v in config.idalabel.items()}
lowercase__ : Optional[int]= 3
lowercase__ : Optional[Any]= ViltForImagesAndTextClassification(__lowercase )
elif "irtr" in checkpoint_url:
lowercase__ : Dict= True
lowercase__ : Union[str, Any]= ViltForImageAndTextRetrieval(__lowercase )
elif "mlm_itm" in checkpoint_url:
lowercase__ : Any= True
lowercase__ : Dict= ViltForMaskedLM(__lowercase )
else:
raise ValueError("Unknown model type" )
# load state_dict of original model, remove and rename some keys
lowercase__ : List[Any]= torch.hub.load_state_dict_from_url(__lowercase , map_location="cpu" )["state_dict"]
lowercase__ : str= create_rename_keys(__lowercase , __lowercase , __lowercase , __lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
read_in_q_k_v(__lowercase , __lowercase )
if mlm_model or irtr_model:
lowercase__ : Optional[Any]= ["itm_score.fc.weight", "itm_score.fc.bias"]
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
lowercase__ : int= model.load_state_dict(__lowercase , strict=__lowercase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(__lowercase )
# Define processor
lowercase__ : Optional[int]= ViltImageProcessor(size=384 )
lowercase__ : Tuple= BertTokenizer.from_pretrained("bert-base-uncased" )
lowercase__ : Dict= ViltProcessor(__lowercase , __lowercase )
# Forward pass on example inputs (image + text)
if nlvr_model:
lowercase__ : List[Any]= Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=__lowercase ).raw )
lowercase__ : str= Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=__lowercase ).raw )
lowercase__ : Any= (
"The left image contains twice the number of dogs as the right image, and at least two dogs in total are"
" standing."
)
lowercase__ : Optional[Any]= processor(__lowercase , __lowercase , return_tensors="pt" )
lowercase__ : int= processor(__lowercase , __lowercase , return_tensors="pt" )
lowercase__ : List[str]= model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
lowercase__ : Tuple= Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg" , stream=__lowercase ).raw )
if mlm_model:
lowercase__ : Optional[Any]= "a bunch of [MASK] laying on a [MASK]."
else:
lowercase__ : Optional[Any]= "How many cats are there?"
lowercase__ : List[str]= processor(__lowercase , __lowercase , return_tensors="pt" )
lowercase__ : Any= model(**__lowercase )
# Verify outputs
if mlm_model:
lowercase__ : List[Any]= torch.Size([1, 11, 30_522] )
lowercase__ : Union[str, Any]= torch.tensor([-12.5_061, -12.5_123, -12.5_174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , __lowercase , atol=1e-4 )
# verify masked token prediction equals "cats"
lowercase__ : Optional[Any]= outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
lowercase__ : Any= torch.Size([1, 3_129] )
lowercase__ : Optional[int]= torch.tensor([-15.9_495, -18.1_472, -10.3_041] )
assert torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , __lowercase , atol=1e-4 )
# verify vqa prediction equals "2"
lowercase__ : str= outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
lowercase__ : Dict= torch.Size([1, 2] )
lowercase__ : Optional[int]= torch.tensor([-2.8_721, 2.1_291] )
assert torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(__lowercase ).mkdir(exist_ok=__lowercase )
print(f'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowercase )
processor.save_pretrained(__lowercase )
if __name__ == "__main__":
a : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt""",
type=str,
help="""URL of the checkpoint you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
a : Union[str, Any] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 218 | from cva import destroyAllWindows, imread, imshow, waitKey
def snake_case (__lowercase ) -> Tuple:
'''simple docstring'''
_snake_case ,_snake_case : int = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(__lowercase ):
for j in range(__lowercase ):
_snake_case : Optional[Any] = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
__SCREAMING_SNAKE_CASE : Optional[Any] = imread('image_data/lena.jpg', 1)
# convert to its negative
__SCREAMING_SNAKE_CASE : Tuple = convert_to_negative(img)
# show result image
imshow('negative of original image', img)
waitKey(0)
destroyAllWindows() | 670 | 0 |
'''simple docstring'''
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def SCREAMING_SNAKE_CASE_ ( __A : Any , __A : Union[str, Any] ) -> str:
assert isinstance(__lowercase , __lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def SCREAMING_SNAKE_CASE_ ( __A : List[str] , __A : str , __A : Tuple ) -> Dict:
_SCREAMING_SNAKE_CASE = tmp_path / "cache"
_SCREAMING_SNAKE_CASE = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_SCREAMING_SNAKE_CASE = ParquetDatasetReader(__lowercase , cache_dir=__lowercase , keep_in_memory=__lowercase ).read()
_check_parquet_dataset(__lowercase , __lowercase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def SCREAMING_SNAKE_CASE_ ( __A : List[Any] , __A : int , __A : Optional[int] ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = tmp_path / "cache"
_SCREAMING_SNAKE_CASE = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_SCREAMING_SNAKE_CASE = features.copy() if features else default_expected_features
_SCREAMING_SNAKE_CASE = (
Features({feature: Value(__lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
_SCREAMING_SNAKE_CASE = ParquetDatasetReader(__lowercase , features=__lowercase , cache_dir=__lowercase ).read()
_check_parquet_dataset(__lowercase , __lowercase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def SCREAMING_SNAKE_CASE_ ( __A : Any , __A : str , __A : List[Any] ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = tmp_path / "cache"
_SCREAMING_SNAKE_CASE = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_SCREAMING_SNAKE_CASE = ParquetDatasetReader(__lowercase , cache_dir=__lowercase , split=__lowercase ).read()
_check_parquet_dataset(__lowercase , __lowercase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : Optional[int] , __A : Tuple ) -> str:
if issubclass(__lowercase , __lowercase ):
_SCREAMING_SNAKE_CASE = parquet_path
elif issubclass(__lowercase , __lowercase ):
_SCREAMING_SNAKE_CASE = [parquet_path]
_SCREAMING_SNAKE_CASE = tmp_path / "cache"
_SCREAMING_SNAKE_CASE = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_SCREAMING_SNAKE_CASE = ParquetDatasetReader(__lowercase , cache_dir=__lowercase ).read()
_check_parquet_dataset(__lowercase , __lowercase )
def SCREAMING_SNAKE_CASE_ ( __A : List[Any] , __A : List[Any] , __A : str=("train",) ) -> List[str]:
assert isinstance(__lowercase , __lowercase )
for split in splits:
_SCREAMING_SNAKE_CASE = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def SCREAMING_SNAKE_CASE_ ( __A : Tuple , __A : List[Any] , __A : str ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = tmp_path / "cache"
_SCREAMING_SNAKE_CASE = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_SCREAMING_SNAKE_CASE = ParquetDatasetReader(
{"train": parquet_path} , cache_dir=__lowercase , keep_in_memory=__lowercase ).read()
_check_parquet_datasetdict(__lowercase , __lowercase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def SCREAMING_SNAKE_CASE_ ( __A : Any , __A : Any , __A : Optional[int] ) -> List[str]:
_SCREAMING_SNAKE_CASE = tmp_path / "cache"
_SCREAMING_SNAKE_CASE = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_SCREAMING_SNAKE_CASE = features.copy() if features else default_expected_features
_SCREAMING_SNAKE_CASE = (
Features({feature: Value(__lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
_SCREAMING_SNAKE_CASE = ParquetDatasetReader({"train": parquet_path} , features=__lowercase , cache_dir=__lowercase ).read()
_check_parquet_datasetdict(__lowercase , __lowercase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def SCREAMING_SNAKE_CASE_ ( __A : Union[str, Any] , __A : Optional[int] , __A : List[str] ) -> Dict:
if split:
_SCREAMING_SNAKE_CASE = {split: parquet_path}
else:
_SCREAMING_SNAKE_CASE = "train"
_SCREAMING_SNAKE_CASE = {"train": parquet_path, "test": parquet_path}
_SCREAMING_SNAKE_CASE = tmp_path / "cache"
_SCREAMING_SNAKE_CASE = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_SCREAMING_SNAKE_CASE = ParquetDatasetReader(__lowercase , cache_dir=__lowercase ).read()
_check_parquet_datasetdict(__lowercase , __lowercase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def SCREAMING_SNAKE_CASE_ ( __A : Any , __A : Any ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = ParquetDatasetWriter(__lowercase , tmp_path / "foo.parquet" )
assert writer.write() > 0
_SCREAMING_SNAKE_CASE = pq.ParquetFile(tmp_path / "foo.parquet" )
_SCREAMING_SNAKE_CASE = pf.read()
assert dataset.data.table == output_table
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] , __A : Union[str, Any] ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = str(shared_datadir / "test_image_rgb.jpg" )
_SCREAMING_SNAKE_CASE = {"image": [image_path]}
_SCREAMING_SNAKE_CASE = Features({"image": Image()} )
_SCREAMING_SNAKE_CASE = Dataset.from_dict(__lowercase , features=__lowercase )
_SCREAMING_SNAKE_CASE = ParquetDatasetWriter(__lowercase , tmp_path / "foo.parquet" )
assert writer.write() > 0
_SCREAMING_SNAKE_CASE = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
_SCREAMING_SNAKE_CASE = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=__lowercase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] , __A : Dict ) -> Any:
assert get_writer_batch_size(__lowercase ) == expected
| 418 | import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
__SCREAMING_SNAKE_CASE : List[str] = Mapping[str, np.ndarray]
__SCREAMING_SNAKE_CASE : List[Any] = Mapping[str, Any] # Is a nested dict.
__SCREAMING_SNAKE_CASE : List[Any] = 0.01
@dataclasses.dataclass(frozen=__snake_case )
class lowercase_ :
_lowerCamelCase = 42 # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
_lowerCamelCase = 42 # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
_lowerCamelCase = 42 # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
_lowerCamelCase = 42 # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
_lowerCamelCase = 42 # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
_lowerCamelCase = None
# Optional remark about the protein. Included as a comment in output PDB
# files
_lowerCamelCase = None
# Templates used to generate this protein (prediction-only)
_lowerCamelCase = None
# Chain corresponding to each parent
_lowerCamelCase = None
def snake_case (__lowercase ) -> Protein:
'''simple docstring'''
_snake_case : str = r"(\[[A-Z]+\]\n)"
_snake_case : List[str] = [tag.strip() for tag in re.split(__lowercase , __lowercase ) if len(__lowercase ) > 0]
_snake_case : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split("\n" ) for l in tags[1::2]] )
_snake_case : List[str] = ["N", "CA", "C"]
_snake_case : Any = None
_snake_case : Union[str, Any] = None
_snake_case : Optional[int] = None
for g in groups:
if "[PRIMARY]" == g[0]:
_snake_case : Tuple = g[1][0].strip()
for i in range(len(__lowercase ) ):
if seq[i] not in residue_constants.restypes:
_snake_case : Tuple = "X" # FIXME: strings are immutable
_snake_case : int = np.array(
[residue_constants.restype_order.get(__lowercase , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
_snake_case : List[List[float]] = []
for axis in range(3 ):
tertiary.append(list(map(__lowercase , g[1][axis].split() ) ) )
_snake_case : Dict = np.array(__lowercase )
_snake_case : Dict = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(__lowercase ):
_snake_case : List[Any] = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
_snake_case : int = np.array(list(map({"-": 0, "+": 1}.get , g[1][0].strip() ) ) )
_snake_case : Any = np.zeros(
(
len(__lowercase ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(__lowercase ):
_snake_case : Dict = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=__lowercase , atom_mask=__lowercase , aatype=__lowercase , residue_index=np.arange(len(__lowercase ) ) , b_factors=__lowercase , )
def snake_case (__lowercase , __lowercase = 0 ) -> List[str]:
'''simple docstring'''
_snake_case : List[str] = []
_snake_case : Optional[Any] = prot.remark
if remark is not None:
pdb_headers.append(F"""REMARK {remark}""" )
_snake_case : str = prot.parents
_snake_case : str = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
_snake_case : int = [p for i, p in zip(__lowercase , __lowercase ) if i == chain_id]
if parents is None or len(__lowercase ) == 0:
_snake_case : Optional[int] = ["N/A"]
pdb_headers.append(F"""PARENT {' '.join(__lowercase )}""" )
return pdb_headers
def snake_case (__lowercase , __lowercase ) -> str:
'''simple docstring'''
_snake_case : List[str] = []
_snake_case : Optional[int] = pdb_str.split("\n" )
_snake_case : List[str] = prot.remark
if remark is not None:
out_pdb_lines.append(F"""REMARK {remark}""" )
_snake_case : List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
_snake_case : str = []
if prot.parents_chain_index is not None:
_snake_case : Dict[str, List[str]] = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(__lowercase ) , [] )
parent_dict[str(__lowercase )].append(__lowercase )
_snake_case : Any = max([int(__lowercase ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
_snake_case : Tuple = parent_dict.get(str(__lowercase ) , ["N/A"] )
parents_per_chain.append(__lowercase )
else:
parents_per_chain.append(list(prot.parents ) )
else:
_snake_case : List[str] = [["N/A"]]
def make_parent_line(__lowercase ) -> str:
return F"""PARENT {' '.join(__lowercase )}"""
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
_snake_case : int = 0
for i, l in enumerate(__lowercase ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(__lowercase )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(__lowercase ):
_snake_case : Tuple = parents_per_chain[chain_counter]
else:
_snake_case : str = ["N/A"]
out_pdb_lines.append(make_parent_line(__lowercase ) )
return "\n".join(__lowercase )
def snake_case (__lowercase ) -> str:
'''simple docstring'''
_snake_case : Optional[Any] = residue_constants.restypes + ["X"]
def res_atoa(__lowercase ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , "UNK" )
_snake_case : Optional[int] = residue_constants.atom_types
_snake_case : List[str] = []
_snake_case : Tuple = prot.atom_mask
_snake_case : List[str] = prot.aatype
_snake_case : int = prot.atom_positions
_snake_case : int = prot.residue_index.astype(np.intaa )
_snake_case : List[Any] = prot.b_factors
_snake_case : str = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("Invalid aatypes." )
_snake_case : Union[str, Any] = get_pdb_headers(__lowercase )
if len(__lowercase ) > 0:
pdb_lines.extend(__lowercase )
_snake_case : Optional[Any] = aatype.shape[0]
_snake_case : str = 1
_snake_case : Tuple = 0
_snake_case : int = string.ascii_uppercase
_snake_case : Optional[Any] = None
# Add all atom sites.
for i in range(__lowercase ):
_snake_case : Dict = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(__lowercase , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
_snake_case : List[Any] = "ATOM"
_snake_case : Union[str, Any] = atom_name if len(__lowercase ) == 4 else F""" {atom_name}"""
_snake_case : str = ""
_snake_case : str = ""
_snake_case : Any = 1.00
_snake_case : str = atom_name[0] # Protein supports only C, N, O, S, this works.
_snake_case : Dict = ""
_snake_case : Any = "A"
if chain_index is not None:
_snake_case : List[Any] = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
_snake_case : Optional[int] = (
F"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"""
F"""{res_name_a:>3} {chain_tag:>1}"""
F"""{residue_index[i]:>4}{insertion_code:>1} """
F"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"""
F"""{occupancy:>6.2f}{b_factor:>6.2f} """
F"""{element:>2}{charge:>2}"""
)
pdb_lines.append(__lowercase )
atom_index += 1
_snake_case : Dict = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
_snake_case : Optional[int] = True
_snake_case : Union[str, Any] = chain_index[i + 1]
if should_terminate:
# Close the chain.
_snake_case : List[str] = "TER"
_snake_case : str = (
F"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"""
)
pdb_lines.append(__lowercase )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(__lowercase , __lowercase ) )
pdb_lines.append("END" )
pdb_lines.append("" )
return "\n".join(__lowercase )
def snake_case (__lowercase ) -> np.ndarray:
'''simple docstring'''
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def snake_case (__lowercase , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , ) -> Protein:
'''simple docstring'''
return Protein(
aatype=features["aatype"] , atom_positions=result["final_atom_positions"] , atom_mask=result["final_atom_mask"] , residue_index=features["residue_index"] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["final_atom_mask"] ) , chain_index=__lowercase , remark=__lowercase , parents=__lowercase , parents_chain_index=__lowercase , ) | 670 | 0 |
import re
def __a ( lowerCAmelCase_ : str ) -> bool:
'''simple docstring'''
UpperCAmelCase_= re.compile(r"""^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$""" )
if match := re.search(__lowercase ,__lowercase ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('''+918827897895'''))
| 593 | from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class lowercase_ ( __snake_case ):
_lowerCamelCase = ['image_processor']
_lowerCamelCase = 'SamImageProcessor'
def __init__( self , lowercase_ ):
super().__init__(lowercase_ )
_snake_case : Optional[Any] = self.image_processor
_snake_case : Tuple = -10
_snake_case : str = self.image_processor.size["longest_edge"]
def __call__( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_ = None , **lowercase_ , ):
_snake_case : List[Any] = self.image_processor(
lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
# pop arguments that are not used in the foward but used nevertheless
_snake_case : Any = encoding_image_processor["original_sizes"]
if hasattr(lowercase_ , "numpy" ): # Checks if Torch or TF tensor
_snake_case : int = original_sizes.numpy()
_snake_case ,_snake_case ,_snake_case : Union[str, Any] = self._check_and_preprocess_points(
input_points=lowercase_ , input_labels=lowercase_ , input_boxes=lowercase_ , )
_snake_case : Dict = self._normalize_and_convert(
lowercase_ , lowercase_ , input_points=lowercase_ , input_labels=lowercase_ , input_boxes=lowercase_ , return_tensors=lowercase_ , )
return encoding_image_processor
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_="pt" , ):
if input_points is not None:
if len(lowercase_ ) != len(lowercase_ ):
_snake_case : int = [
self._normalize_coordinates(self.target_size , lowercase_ , original_sizes[0] ) for point in input_points
]
else:
_snake_case : Dict = [
self._normalize_coordinates(self.target_size , lowercase_ , lowercase_ )
for point, original_size in zip(lowercase_ , lowercase_ )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
_snake_case ,_snake_case : int = self._pad_points_and_labels(lowercase_ , lowercase_ )
_snake_case : Any = np.array(lowercase_ )
if input_labels is not None:
_snake_case : Optional[Any] = np.array(lowercase_ )
if input_boxes is not None:
if len(lowercase_ ) != len(lowercase_ ):
_snake_case : Optional[Any] = [
self._normalize_coordinates(self.target_size , lowercase_ , original_sizes[0] , is_bounding_box=lowercase_ )
for box in input_boxes
]
else:
_snake_case : List[str] = [
self._normalize_coordinates(self.target_size , lowercase_ , lowercase_ , is_bounding_box=lowercase_ )
for box, original_size in zip(lowercase_ , lowercase_ )
]
_snake_case : Tuple = np.array(lowercase_ )
if input_boxes is not None:
if return_tensors == "pt":
_snake_case : List[str] = torch.from_numpy(lowercase_ )
# boxes batch size of 1 by default
_snake_case : Optional[Any] = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
_snake_case : List[str] = tf.convert_to_tensor(lowercase_ )
# boxes batch size of 1 by default
_snake_case : Optional[int] = tf.expand_dims(lowercase_ , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"input_boxes": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
_snake_case : Tuple = torch.from_numpy(lowercase_ )
# point batch size of 1 by default
_snake_case : int = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
_snake_case : List[str] = tf.convert_to_tensor(lowercase_ )
# point batch size of 1 by default
_snake_case : Tuple = tf.expand_dims(lowercase_ , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"input_points": input_points} )
if input_labels is not None:
if return_tensors == "pt":
_snake_case : Dict = torch.from_numpy(lowercase_ )
# point batch size of 1 by default
_snake_case : str = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
_snake_case : Optional[Any] = tf.convert_to_tensor(lowercase_ )
# point batch size of 1 by default
_snake_case : List[Any] = tf.expand_dims(lowercase_ , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"input_labels": input_labels} )
return encoding_image_processor
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
_snake_case : List[Any] = max([point.shape[0] for point in input_points] )
_snake_case : List[str] = []
for i, point in enumerate(lowercase_ ):
if point.shape[0] != expected_nb_points:
_snake_case : Optional[Any] = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
_snake_case : Union[str, Any] = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(lowercase_ )
_snake_case : Optional[Any] = processed_input_points
return input_points, input_labels
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=False ):
_snake_case ,_snake_case : Optional[int] = original_size
_snake_case ,_snake_case : List[str] = self.image_processor._get_preprocess_shape(lowercase_ , longest_edge=lowercase_ )
_snake_case : Optional[Any] = deepcopy(lowercase_ ).astype(lowercase_ )
if is_bounding_box:
_snake_case : str = coords.reshape(-1 , 2 , 2 )
_snake_case : Optional[Any] = coords[..., 0] * (new_w / old_w)
_snake_case : Dict = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
_snake_case : Optional[Any] = coords.reshape(-1 , 4 )
return coords
def UpperCamelCase ( self , lowercase_=None , lowercase_=None , lowercase_=None , ):
if input_points is not None:
if hasattr(lowercase_ , "numpy" ): # Checks for TF or Torch tensor
_snake_case : Union[str, Any] = input_points.numpy().tolist()
if not isinstance(lowercase_ , lowercase_ ) or not isinstance(input_points[0] , lowercase_ ):
raise ValueError("Input points must be a list of list of floating points." )
_snake_case : Any = [np.array(lowercase_ ) for input_point in input_points]
else:
_snake_case : Optional[int] = None
if input_labels is not None:
if hasattr(lowercase_ , "numpy" ):
_snake_case : Tuple = input_labels.numpy().tolist()
if not isinstance(lowercase_ , lowercase_ ) or not isinstance(input_labels[0] , lowercase_ ):
raise ValueError("Input labels must be a list of list integers." )
_snake_case : Tuple = [np.array(lowercase_ ) for label in input_labels]
else:
_snake_case : Optional[Any] = None
if input_boxes is not None:
if hasattr(lowercase_ , "numpy" ):
_snake_case : List[str] = input_boxes.numpy().tolist()
if (
not isinstance(lowercase_ , lowercase_ )
or not isinstance(input_boxes[0] , lowercase_ )
or not isinstance(input_boxes[0][0] , lowercase_ )
):
raise ValueError("Input boxes must be a list of list of list of floating points." )
_snake_case : List[Any] = [np.array(lowercase_ ).astype(np.floataa ) for box in input_boxes]
else:
_snake_case : Optional[int] = None
return input_points, input_labels, input_boxes
@property
def UpperCamelCase ( self ):
_snake_case : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(lowercase_ ) )
def UpperCamelCase ( self , *lowercase_ , **lowercase_ ):
return self.image_processor.post_process_masks(*lowercase_ , **lowercase_ ) | 670 | 0 |
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class snake_case ( __snake_case ):
'''simple docstring'''
UpperCAmelCase : str = ["""image_processor"""]
UpperCAmelCase : Tuple = """SamImageProcessor"""
def __init__( self : int , lowerCAmelCase_ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
super().__init__(lowercase_ )
SCREAMING_SNAKE_CASE_ = self.image_processor
SCREAMING_SNAKE_CASE_ = -10
SCREAMING_SNAKE_CASE_ = self.image_processor.size["longest_edge"]
def __call__( self : List[str] , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Any = None , **lowerCAmelCase_ : Optional[Any] , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.image_processor(
lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
# pop arguments that are not used in the foward but used nevertheless
SCREAMING_SNAKE_CASE_ = encoding_image_processor["original_sizes"]
if hasattr(lowercase_ , '''numpy''' ): # Checks if Torch or TF tensor
SCREAMING_SNAKE_CASE_ = original_sizes.numpy()
SCREAMING_SNAKE_CASE_ = self._check_and_preprocess_points(
input_points=lowercase_ , input_labels=lowercase_ , input_boxes=lowercase_ , )
SCREAMING_SNAKE_CASE_ = self._normalize_and_convert(
lowercase_ , lowercase_ , input_points=lowercase_ , input_labels=lowercase_ , input_boxes=lowercase_ , return_tensors=lowercase_ , )
return encoding_image_processor
def _lowercase ( self : Dict , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Tuple="pt" , ) -> Dict:
"""simple docstring"""
if input_points is not None:
if len(lowercase_ ) != len(lowercase_ ):
SCREAMING_SNAKE_CASE_ = [
self._normalize_coordinates(self.target_size , lowercase_ , original_sizes[0] ) for point in input_points
]
else:
SCREAMING_SNAKE_CASE_ = [
self._normalize_coordinates(self.target_size , lowercase_ , lowercase_ )
for point, original_size in zip(lowercase_ , lowercase_ )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
SCREAMING_SNAKE_CASE_ = self._pad_points_and_labels(lowercase_ , lowercase_ )
SCREAMING_SNAKE_CASE_ = np.array(lowercase_ )
if input_labels is not None:
SCREAMING_SNAKE_CASE_ = np.array(lowercase_ )
if input_boxes is not None:
if len(lowercase_ ) != len(lowercase_ ):
SCREAMING_SNAKE_CASE_ = [
self._normalize_coordinates(self.target_size , lowercase_ , original_sizes[0] , is_bounding_box=lowercase_ )
for box in input_boxes
]
else:
SCREAMING_SNAKE_CASE_ = [
self._normalize_coordinates(self.target_size , lowercase_ , lowercase_ , is_bounding_box=lowercase_ )
for box, original_size in zip(lowercase_ , lowercase_ )
]
SCREAMING_SNAKE_CASE_ = np.array(lowercase_ )
if input_boxes is not None:
if return_tensors == "pt":
SCREAMING_SNAKE_CASE_ = torch.from_numpy(lowercase_ )
# boxes batch size of 1 by default
SCREAMING_SNAKE_CASE_ = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
SCREAMING_SNAKE_CASE_ = tf.convert_to_tensor(lowercase_ )
# boxes batch size of 1 by default
SCREAMING_SNAKE_CASE_ = tf.expand_dims(lowercase_ , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({'''input_boxes''': input_boxes} )
if input_points is not None:
if return_tensors == "pt":
SCREAMING_SNAKE_CASE_ = torch.from_numpy(lowercase_ )
# point batch size of 1 by default
SCREAMING_SNAKE_CASE_ = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
SCREAMING_SNAKE_CASE_ = tf.convert_to_tensor(lowercase_ )
# point batch size of 1 by default
SCREAMING_SNAKE_CASE_ = tf.expand_dims(lowercase_ , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({'''input_points''': input_points} )
if input_labels is not None:
if return_tensors == "pt":
SCREAMING_SNAKE_CASE_ = torch.from_numpy(lowercase_ )
# point batch size of 1 by default
SCREAMING_SNAKE_CASE_ = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
SCREAMING_SNAKE_CASE_ = tf.convert_to_tensor(lowercase_ )
# point batch size of 1 by default
SCREAMING_SNAKE_CASE_ = tf.expand_dims(lowercase_ , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({'''input_labels''': input_labels} )
return encoding_image_processor
def _lowercase ( self : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = max([point.shape[0] for point in input_points] )
SCREAMING_SNAKE_CASE_ = []
for i, point in enumerate(lowercase_ ):
if point.shape[0] != expected_nb_points:
SCREAMING_SNAKE_CASE_ = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
SCREAMING_SNAKE_CASE_ = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(lowercase_ )
SCREAMING_SNAKE_CASE_ = processed_input_points
return input_points, input_labels
def _lowercase ( self : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple=False ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = original_size
SCREAMING_SNAKE_CASE_ = self.image_processor._get_preprocess_shape(lowercase_ , longest_edge=lowercase_ )
SCREAMING_SNAKE_CASE_ = deepcopy(lowercase_ ).astype(lowercase_ )
if is_bounding_box:
SCREAMING_SNAKE_CASE_ = coords.reshape(-1 , 2 , 2 )
SCREAMING_SNAKE_CASE_ = coords[..., 0] * (new_w / old_w)
SCREAMING_SNAKE_CASE_ = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
SCREAMING_SNAKE_CASE_ = coords.reshape(-1 , 4 )
return coords
def _lowercase ( self : List[str] , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Optional[Any]=None , ) -> List[str]:
"""simple docstring"""
if input_points is not None:
if hasattr(lowercase_ , '''numpy''' ): # Checks for TF or Torch tensor
SCREAMING_SNAKE_CASE_ = input_points.numpy().tolist()
if not isinstance(lowercase_ , lowercase_ ) or not isinstance(input_points[0] , lowercase_ ):
raise ValueError('''Input points must be a list of list of floating points.''' )
SCREAMING_SNAKE_CASE_ = [np.array(lowercase_ ) for input_point in input_points]
else:
SCREAMING_SNAKE_CASE_ = None
if input_labels is not None:
if hasattr(lowercase_ , '''numpy''' ):
SCREAMING_SNAKE_CASE_ = input_labels.numpy().tolist()
if not isinstance(lowercase_ , lowercase_ ) or not isinstance(input_labels[0] , lowercase_ ):
raise ValueError('''Input labels must be a list of list integers.''' )
SCREAMING_SNAKE_CASE_ = [np.array(lowercase_ ) for label in input_labels]
else:
SCREAMING_SNAKE_CASE_ = None
if input_boxes is not None:
if hasattr(lowercase_ , '''numpy''' ):
SCREAMING_SNAKE_CASE_ = input_boxes.numpy().tolist()
if (
not isinstance(lowercase_ , lowercase_ )
or not isinstance(input_boxes[0] , lowercase_ )
or not isinstance(input_boxes[0][0] , lowercase_ )
):
raise ValueError('''Input boxes must be a list of list of list of floating points.''' )
SCREAMING_SNAKE_CASE_ = [np.array(lowercase_ ).astype(np.floataa ) for box in input_boxes]
else:
SCREAMING_SNAKE_CASE_ = None
return input_points, input_labels, input_boxes
@property
def _lowercase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.image_processor.model_input_names
return list(dict.fromkeys(lowercase_ ) )
def _lowercase ( self : Optional[Any] , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : int ) -> List[str]:
"""simple docstring"""
return self.image_processor.post_process_masks(*lowercase_ , **lowercase_ )
| 393 | def snake_case (__lowercase ) -> int:
'''simple docstring'''
if not grid or not grid[0]:
raise TypeError("The grid does not contain the appropriate information" )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
_snake_case : Union[str, Any] = grid[0]
for row_n in range(1 , len(__lowercase ) ):
_snake_case : Union[str, Any] = grid[row_n]
_snake_case : List[Any] = fill_row(__lowercase , __lowercase )
_snake_case : List[Any] = grid[row_n]
return grid[-1][-1]
def snake_case (__lowercase , __lowercase ) -> list:
'''simple docstring'''
current_row[0] += row_above[0]
for cell_n in range(1 , len(__lowercase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod() | 670 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A: List[Any] = logging.get_logger(__name__)
_A: Union[str, Any] = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class UpperCAmelCase ( __snake_case ):
_A : Dict = """glpn"""
def __init__( self , __A=3 , __A=4 , __A=[2, 2, 2, 2] , __A=[8, 4, 2, 1] , __A=[32, 64, 160, 256] , __A=[7, 3, 3, 3] , __A=[4, 2, 2, 2] , __A=[1, 2, 5, 8] , __A=[4, 4, 4, 4] , __A="gelu" , __A=0.0 , __A=0.0 , __A=0.0_2 , __A=0.1 , __A=1E-6 , __A=64 , __A=10 , __A=-1 , **__A , ):
super().__init__(**lowercase_ )
__UpperCAmelCase = num_channels
__UpperCAmelCase = num_encoder_blocks
__UpperCAmelCase = depths
__UpperCAmelCase = sr_ratios
__UpperCAmelCase = hidden_sizes
__UpperCAmelCase = patch_sizes
__UpperCAmelCase = strides
__UpperCAmelCase = mlp_ratios
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = initializer_range
__UpperCAmelCase = drop_path_rate
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = decoder_hidden_size
__UpperCAmelCase = max_depth
__UpperCAmelCase = head_in_index
| 126 | import random
def snake_case (__lowercase , __lowercase ) -> tuple:
'''simple docstring'''
_snake_case ,_snake_case ,_snake_case : List[Any] = [], [], []
for element in data:
if element < pivot:
less.append(__lowercase )
elif element > pivot:
greater.append(__lowercase )
else:
equal.append(__lowercase )
return less, equal, greater
def snake_case (__lowercase , __lowercase ) -> List[Any]:
'''simple docstring'''
if index >= len(__lowercase ) or index < 0:
return None
_snake_case : Any = items[random.randint(0 , len(__lowercase ) - 1 )]
_snake_case : Tuple = 0
_snake_case ,_snake_case ,_snake_case : Tuple = _partition(__lowercase , __lowercase )
_snake_case : Tuple = len(__lowercase )
_snake_case : List[str] = len(__lowercase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(__lowercase , __lowercase )
# must be in larger
else:
return quick_select(__lowercase , index - (m + count) ) | 670 | 0 |
"""simple docstring"""
import argparse
import os
import re
_SCREAMING_SNAKE_CASE = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
_SCREAMING_SNAKE_CASE = re.compile(R"""[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict""")
# re pattern that matches identifiers in mappings
_SCREAMING_SNAKE_CASE = re.compile(R"""\s*\(\s*\"(\S[^\"]+)\"""")
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False ) -> List[str]:
"""simple docstring"""
with open(__lowercase , "r" , encoding="utf-8" ) as f:
__snake_case = f.read()
__snake_case = content.split("\n" )
__snake_case = []
__snake_case = 0
while line_idx < len(__lowercase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
__snake_case = len(re.search(r"^(\s*)\S" , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
__snake_case = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
__snake_case = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
__snake_case = sorted(__lowercase , key=lambda SCREAMING_SNAKE_CASE : _re_identifier.search(__lowercase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(__lowercase , "w" , encoding="utf-8" ) as f:
f.write("\n".join(__lowercase ) )
elif "\n".join(__lowercase ) != content:
return True
def __UpperCamelCase ( SCREAMING_SNAKE_CASE = False ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = [os.path.join(__lowercase , __lowercase ) for f in os.listdir(__lowercase ) if f.endswith(".py" )]
__snake_case = [sort_auto_mapping(__lowercase , overwrite=__lowercase ) for fname in fnames]
if not overwrite and any(__lowercase ):
__snake_case = [f for f, d in zip(__lowercase , __lowercase ) if d]
raise ValueError(
F'''The following files have auto mappings that need sorting: {", ".join(__lowercase )}. Run `make style` to fix'''
" this." )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
_SCREAMING_SNAKE_CASE = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 163 | from math import pow, sqrt
def snake_case (*__lowercase ) -> bool:
'''simple docstring'''
_snake_case : str = len(__lowercase ) > 0 and all(value > 0.0 for value in values )
return result
def snake_case (__lowercase , __lowercase ) -> float | ValueError:
'''simple docstring'''
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__lowercase , __lowercase )
else ValueError("Input Error: Molar mass values must greater than 0." )
)
def snake_case (__lowercase , __lowercase , __lowercase ) -> float | ValueError:
'''simple docstring'''
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__lowercase , __lowercase , __lowercase )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def snake_case (__lowercase , __lowercase , __lowercase ) -> float | ValueError:
'''simple docstring'''
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__lowercase , __lowercase , __lowercase )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def snake_case (__lowercase , __lowercase , __lowercase ) -> float | ValueError:
'''simple docstring'''
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(__lowercase , __lowercase , __lowercase )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def snake_case (__lowercase , __lowercase , __lowercase ) -> float | ValueError:
'''simple docstring'''
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(__lowercase , __lowercase , __lowercase )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
) | 670 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowerCAmelCase = {
'configuration_clip': [
'CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'CLIPConfig',
'CLIPOnnxConfig',
'CLIPTextConfig',
'CLIPVisionConfig',
],
'processing_clip': ['CLIPProcessor'],
'tokenization_clip': ['CLIPTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ['CLIPTokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ['CLIPFeatureExtractor']
_lowerCAmelCase = ['CLIPImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'CLIPModel',
'CLIPPreTrainedModel',
'CLIPTextModel',
'CLIPTextModelWithProjection',
'CLIPVisionModel',
'CLIPVisionModelWithProjection',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCLIPModel',
'TFCLIPPreTrainedModel',
'TFCLIPTextModel',
'TFCLIPVisionModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'FlaxCLIPModel',
'FlaxCLIPPreTrainedModel',
'FlaxCLIPTextModel',
'FlaxCLIPTextPreTrainedModel',
'FlaxCLIPVisionModel',
'FlaxCLIPVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 569 | import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
__SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
class lowercase_ ( __snake_case ):
def __init__( self , *lowercase_ , **lowercase_ ):
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ ) | 670 | 0 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = "x" , UpperCAmelCase__ = 10**-10 , UpperCAmelCase__ = 1 , ):
"""simple docstring"""
a_ = symbols(__lowercase )
a_ = lambdify(__lowercase , __lowercase )
a_ = lambdify(__lowercase , diff(__lowercase , __lowercase ) )
a_ = starting_point
while True:
if diff_function(__lowercase ) != 0:
a_ = prev_guess - multiplicity * func(__lowercase ) / diff_function(
__lowercase )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
a_ = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''')
# Find root of polynomial
# Find fourth Root of 5
print(F'''The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}''')
# Find value of e
print(
"""The root of log(y) - 1 = 0 is """,
F'''{newton_raphson('log(y) - 1', 2, variable='y')}''',
)
# Exponential Roots
print(
"""The root of exp(x) - 1 = 0 is""",
F'''{newton_raphson('exp(x) - 1', 10, precision=0.005)}''',
)
# Find root of cos(x)
print(F'''The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}''') | 483 | from __future__ import annotations
from typing import TypedDict
class lowercase_ ( __snake_case ):
_lowerCamelCase = 42
_lowerCamelCase = 42
def snake_case (__lowercase ) -> list[str]:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise TypeError("The parameter s type must be str." )
return [s[i:] + s[:i] for i in range(len(__lowercase ) )]
def snake_case (__lowercase ) -> BWTTransformDict:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise TypeError("The parameter s type must be str." )
if not s:
raise ValueError("The parameter s must not be empty." )
_snake_case : List[str] = all_rotations(__lowercase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_snake_case : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__lowercase ),
}
return response
def snake_case (__lowercase , __lowercase ) -> str:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise TypeError("The parameter bwt_string type must be str." )
if not bwt_string:
raise ValueError("The parameter bwt_string must not be empty." )
try:
_snake_case : Union[str, Any] = int(__lowercase )
except ValueError:
raise TypeError(
"The parameter idx_original_string type must be int or passive"
" of cast to int." )
if idx_original_string < 0:
raise ValueError("The parameter idx_original_string must not be lower than 0." )
if idx_original_string >= len(__lowercase ):
raise ValueError(
"The parameter idx_original_string must be lower than" " len(bwt_string)." )
_snake_case : Optional[Any] = [""] * len(__lowercase )
for _ in range(len(__lowercase ) ):
for i in range(len(__lowercase ) ):
_snake_case : Tuple = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[Any] = 'Provide a string that I will generate its BWT transform: '
__SCREAMING_SNAKE_CASE : Optional[Any] = input(entry_msg).strip()
__SCREAMING_SNAKE_CASE : int = bwt_transform(s)
print(
F'''Burrows Wheeler transform for string \'{s}\' results '''
F'''in \'{result['bwt_string']}\''''
)
__SCREAMING_SNAKE_CASE : List[str] = reverse_bwt(result['bwt_string'], result['idx_original_string'])
print(
F'''Reversing Burrows Wheeler transform for entry \'{result['bwt_string']}\' '''
F'''we get original string \'{original_string}\''''
) | 670 | 0 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
_UpperCAmelCase : List[str] = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase :
"""simple docstring"""
A__ : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
A__ : int = field(
default=__snake_case , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A__ : Tuple = field(
default=__snake_case , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
A__ : Optional[Any] = field(
default=__snake_case , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
A__ : Optional[int] = field(
default=__snake_case , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
A__ : Tuple = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
A__ : List[Any] = field(
default=__snake_case , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
@dataclass
class UpperCAmelCase :
"""simple docstring"""
A__ : Optional[int] = field(default=__snake_case , metadata={'help': 'The input training data file (a text file).'} )
A__ : Optional[Any] = field(
default=__snake_case , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
A__ : int = field(
default=__snake_case , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
A__ : List[str] = field(
default=__snake_case , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
A__ : Union[str, Any] = field(
default=__snake_case , metadata={
'help': (
'The maximum total input sequence length after tokenization. If passed, sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A__ : List[Any] = field(
default=__snake_case , metadata={
'help': (
'Whether to pad all samples to the maximum sentence length. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch. More '
'efficient on GPU but very bad for TPU.'
)
} , )
A__ : int = field(
default=__snake_case , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
A__ : List[Any] = field(
default=__snake_case , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def _lowercase ( self ) -> str:
if self.train_file is not None:
_UpperCamelCase : int = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
_UpperCamelCase : Tuple = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class UpperCAmelCase :
"""simple docstring"""
A__ : List[str] = 42
A__ : Optional[Any] = True
A__ : Tuple = None
A__ : int = None
def __call__( self , _snake_case ) -> Any:
_UpperCamelCase : Tuple = "label" if "label" in features[0].keys() else "labels"
_UpperCamelCase : Any = [feature.pop(lowercase_ ) for feature in features]
_UpperCamelCase : Any = len(lowercase_ )
_UpperCamelCase : List[str] = len(features[0]['''input_ids'''] )
_UpperCamelCase : Any = [
[{k: v[i] for k, v in feature.items()} for i in range(lowercase_ )] for feature in features
]
_UpperCamelCase : int = list(chain(*lowercase_ ) )
_UpperCamelCase : Tuple = self.tokenizer.pad(
lowercase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
# Un-flatten
_UpperCamelCase : str = {k: v.view(lowercase_ , lowercase_ , -1 ) for k, v in batch.items()}
# Add back labels
_UpperCamelCase : Any = torch.tensor(lowercase_ , dtype=torch.intaa )
return batch
def snake_case__ ( ) -> Optional[Any]:
_UpperCamelCase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCamelCase : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCamelCase : str = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_swag''' ,__lowercase ,__lowercase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,handlers=[logging.StreamHandler(sys.stdout )] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_UpperCamelCase : Optional[Any] = training_args.get_process_log_level()
logger.setLevel(__lowercase )
datasets.utils.logging.set_verbosity(__lowercase )
transformers.utils.logging.set_verbosity(__lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
_UpperCamelCase : Tuple = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCamelCase : Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
_UpperCamelCase : List[Any] = {}
if data_args.train_file is not None:
_UpperCamelCase : int = data_args.train_file
if data_args.validation_file is not None:
_UpperCamelCase : Dict = data_args.validation_file
_UpperCamelCase : Tuple = data_args.train_file.split('''.''' )[-1]
_UpperCamelCase : List[Any] = load_dataset(
__lowercase ,data_files=__lowercase ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
else:
# Downloading and loading the swag dataset from the hub.
_UpperCamelCase : Union[str, Any] = load_dataset(
'''swag''' ,'''regular''' ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase : Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
_UpperCamelCase : int = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast_tokenizer ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
_UpperCamelCase : str = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) ,config=__lowercase ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# When using your own dataset or a different dataset from swag, you will probably need to change this.
_UpperCamelCase : Union[str, Any] = [f'''ending{i}''' for i in range(4 )]
_UpperCamelCase : List[Any] = "sent1"
_UpperCamelCase : Any = "sent2"
if data_args.max_seq_length is None:
_UpperCamelCase : int = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
'''The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'''
''' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'''
''' override this default with `--block_size xxx`.''' )
_UpperCamelCase : str = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
_UpperCamelCase : Tuple = min(data_args.max_seq_length ,tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(UpperCamelCase ):
_UpperCamelCase : List[str] = [[context] * 4 for context in examples[context_name]]
_UpperCamelCase : Dict = examples[question_header_name]
_UpperCamelCase : List[str] = [
[f'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(__lowercase )
]
# Flatten out
_UpperCamelCase : List[Any] = list(chain(*__lowercase ) )
_UpperCamelCase : str = list(chain(*__lowercase ) )
# Tokenize
_UpperCamelCase : Any = tokenizer(
__lowercase ,__lowercase ,truncation=__lowercase ,max_length=__lowercase ,padding='''max_length''' if data_args.pad_to_max_length else False ,)
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 ,len(__lowercase ) ,4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
_UpperCamelCase : Any = raw_datasets["train"]
if data_args.max_train_samples is not None:
_UpperCamelCase : str = min(len(__lowercase ) ,data_args.max_train_samples )
_UpperCamelCase : Union[str, Any] = train_dataset.select(range(__lowercase ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
_UpperCamelCase : Optional[Any] = train_dataset.map(
__lowercase ,batched=__lowercase ,num_proc=data_args.preprocessing_num_workers ,load_from_cache_file=not data_args.overwrite_cache ,)
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
_UpperCamelCase : Optional[int] = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
_UpperCamelCase : Any = min(len(__lowercase ) ,data_args.max_eval_samples )
_UpperCamelCase : Optional[Any] = eval_dataset.select(range(__lowercase ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
_UpperCamelCase : List[Any] = eval_dataset.map(
__lowercase ,batched=__lowercase ,num_proc=data_args.preprocessing_num_workers ,load_from_cache_file=not data_args.overwrite_cache ,)
# Data collator
_UpperCamelCase : int = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=__lowercase ,pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(UpperCamelCase ):
_UpperCamelCase : Dict = eval_predictions
_UpperCamelCase : List[str] = np.argmax(__lowercase ,axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
_UpperCamelCase : Dict = Trainer(
model=__lowercase ,args=__lowercase ,train_dataset=train_dataset if training_args.do_train else None ,eval_dataset=eval_dataset if training_args.do_eval else None ,tokenizer=__lowercase ,data_collator=__lowercase ,compute_metrics=__lowercase ,)
# Training
if training_args.do_train:
_UpperCamelCase : Tuple = None
if training_args.resume_from_checkpoint is not None:
_UpperCamelCase : Union[str, Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCamelCase : Union[str, Any] = last_checkpoint
_UpperCamelCase : Tuple = trainer.train(resume_from_checkpoint=__lowercase )
trainer.save_model() # Saves the tokenizer too for easy upload
_UpperCamelCase : List[str] = train_result.metrics
_UpperCamelCase : str = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__lowercase )
)
_UpperCamelCase : Tuple = min(__lowercase ,len(__lowercase ) )
trainer.log_metrics('''train''' ,__lowercase )
trainer.save_metrics('''train''' ,__lowercase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_UpperCamelCase : Any = trainer.evaluate()
_UpperCamelCase : Optional[int] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__lowercase )
_UpperCamelCase : Tuple = min(__lowercase ,len(__lowercase ) )
trainer.log_metrics('''eval''' ,__lowercase )
trainer.save_metrics('''eval''' ,__lowercase )
_UpperCamelCase : List[str] = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowercase )
else:
trainer.create_model_card(**__lowercase )
def snake_case__ ( UpperCamelCase ) -> Optional[int]:
main()
if __name__ == "__main__":
main()
| 683 | # NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
) | 670 | 0 |
"""simple docstring"""
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
A : int = logging.get_logger(__name__)
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224" , out_features=["stage1", "stage2", "stage3", "stage4"] )
__lowerCAmelCase = MaskFormerConfig(backbone_config=__lowercase )
__lowerCAmelCase = "huggingface/label-files"
if "ade20k-full" in model_name:
# this should be ok
__lowerCAmelCase = 847
__lowerCAmelCase = "maskformer-ade20k-full-id2label.json"
elif "ade" in model_name:
# this should be ok
__lowerCAmelCase = 150
__lowerCAmelCase = "ade20k-id2label.json"
elif "coco-stuff" in model_name:
# this should be ok
__lowerCAmelCase = 171
__lowerCAmelCase = "maskformer-coco-stuff-id2label.json"
elif "coco" in model_name:
# TODO
__lowerCAmelCase = 133
__lowerCAmelCase = "coco-panoptic-id2label.json"
elif "cityscapes" in model_name:
# this should be ok
__lowerCAmelCase = 19
__lowerCAmelCase = "cityscapes-id2label.json"
elif "vistas" in model_name:
# this should be ok
__lowerCAmelCase = 65
__lowerCAmelCase = "mapillary-vistas-id2label.json"
__lowerCAmelCase = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) )
__lowerCAmelCase = {int(__lowercase ): v for k, v in idalabel.items()}
return config
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = []
# stem
# fmt: off
rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.norm1.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.norm1.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.attn.relative_position_index", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.attn.proj.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.attn.proj.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.norm2.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.norm2.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.mlp.fc1.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.mlp.fc1.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.mlp.fc2.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.mlp.fc2.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((f"backbone.layers.{i}.downsample.reduction.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((f"backbone.layers.{i}.downsample.norm.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((f"backbone.layers.{i}.downsample.norm.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append((f"backbone.norm{i}.weight", f"model.pixel_level_module.encoder.hidden_states_norms.{i}.weight") )
rename_keys.append((f"backbone.norm{i}.bias", f"model.pixel_level_module.encoder.hidden_states_norms.{i}.bias") )
# FPN
rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((f"sem_seg_head.adapter_{source_index}.weight", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight") )
rename_keys.append((f"sem_seg_head.adapter_{source_index}.norm.weight", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight") )
rename_keys.append((f"sem_seg_head.adapter_{source_index}.norm.bias", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias") )
rename_keys.append((f"sem_seg_head.layer_{source_index}.weight", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight") )
rename_keys.append((f"sem_seg_head.layer_{source_index}.norm.weight", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight") )
rename_keys.append((f"sem_seg_head.layer_{source_index}.norm.bias", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias") )
rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") )
rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight", f"model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight") )
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias", f"model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias") )
# cross-attention out projection
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight", f"model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight") )
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias", f"model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias") )
# MLP 1
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight", f"model.transformer_module.decoder.layers.{idx}.fc1.weight") )
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias", f"model.transformer_module.decoder.layers.{idx}.fc1.bias") )
# MLP 2
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight", f"model.transformer_module.decoder.layers.{idx}.fc2.weight") )
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias", f"model.transformer_module.decoder.layers.{idx}.fc2.bias") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight", f"model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight") )
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias", f"model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight", f"model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight") )
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias", f"model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias") )
# layernorm 3 (final layernorm)
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight", f"model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight") )
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias", f"model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") )
# heads on top
rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") )
rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") )
rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") )
for i in range(3 ):
rename_keys.append((f"sem_seg_head.predictor.mask_embed.layers.{i}.weight", f"mask_embedder.{i}.0.weight") )
rename_keys.append((f"sem_seg_head.predictor.mask_embed.layers.{i}.bias", f"mask_embedder.{i}.0.bias") )
# fmt: on
return rename_keys
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = dct.pop(__lowercase )
__lowerCAmelCase = val
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__lowerCAmelCase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(f"backbone.layers.{i}.blocks.{j}.attn.qkv.weight" )
__lowerCAmelCase = state_dict.pop(f"backbone.layers.{i}.blocks.{j}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[:dim, :]
__lowerCAmelCase = in_proj_bias[: dim]
__lowerCAmelCase = in_proj_weight[
dim : dim * 2, :
]
__lowerCAmelCase = in_proj_bias[
dim : dim * 2
]
__lowerCAmelCase = in_proj_weight[
-dim :, :
]
__lowerCAmelCase = in_proj_bias[-dim :]
# fmt: on
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight" )
__lowerCAmelCase = state_dict.pop(f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[: hidden_size, :]
__lowerCAmelCase = in_proj_bias[:config.hidden_size]
__lowerCAmelCase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowerCAmelCase = in_proj_bias[hidden_size : hidden_size * 2]
__lowerCAmelCase = in_proj_weight[-hidden_size :, :]
__lowerCAmelCase = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight" )
__lowerCAmelCase = state_dict.pop(f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[: hidden_size, :]
__lowerCAmelCase = in_proj_bias[:config.hidden_size]
__lowerCAmelCase = in_proj_weight[hidden_size : hidden_size * 2, :]
__lowerCAmelCase = in_proj_bias[hidden_size : hidden_size * 2]
__lowerCAmelCase = in_proj_weight[-hidden_size :, :]
__lowerCAmelCase = in_proj_bias[-hidden_size :]
# fmt: on
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowerCAmelCase = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
@torch.no_grad()
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = False ):
'''simple docstring'''
__lowerCAmelCase = get_maskformer_config(__lowercase )
# load original state_dict
with open(__lowercase , "rb" ) as f:
__lowerCAmelCase = pickle.load(__lowercase )
__lowerCAmelCase = data["model"]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
__lowerCAmelCase = create_rename_keys(__lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
read_in_swin_q_k_v(__lowercase , config.backbone_config )
read_in_decoder_q_k_v(__lowercase , __lowercase )
# update to torch tensors
for key, value in state_dict.items():
__lowerCAmelCase = torch.from_numpy(__lowercase )
# load 🤗 model
__lowerCAmelCase = MaskFormerForInstanceSegmentation(__lowercase )
model.eval()
for name, param in model.named_parameters():
print(__lowercase , param.shape )
__lowerCAmelCase = model.load_state_dict(__lowercase , strict=__lowercase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(__lowercase ) == 0, f"Unexpected keys: {unexpected_keys}"
# verify results
__lowerCAmelCase = prepare_img()
if "vistas" in model_name:
__lowerCAmelCase = 65
elif "cityscapes" in model_name:
__lowerCAmelCase = 6_5535
else:
__lowerCAmelCase = 255
__lowerCAmelCase = True if "ade" in model_name else False
__lowerCAmelCase = MaskFormerImageProcessor(ignore_index=__lowercase , reduce_labels=__lowercase )
__lowerCAmelCase = image_processor(__lowercase , return_tensors="pt" )
__lowerCAmelCase = model(**__lowercase )
print("Logits:" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
__lowerCAmelCase = torch.tensor(
[[3.63_53, -4.47_70, -2.60_65], [0.50_81, -4.23_94, -3.53_43], [2.19_09, -5.03_53, -1.93_23]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowercase , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f"Saving model and image processor to {pytorch_dump_folder_path}" )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
model.save_pretrained(__lowercase )
image_processor.save_pretrained(__lowercase )
if push_to_hub:
print("Pushing model and image processor to the hub..." )
model.push_to_hub(f"nielsr/{model_name}" )
image_processor.push_to_hub(f"nielsr/{model_name}" )
if __name__ == "__main__":
A : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="maskformer-swin-tiny-ade",
type=str,
help=("Name of the MaskFormer model you\'d like to convert",),
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl",
type=str,
help="Path to the original state dict (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
A : Union[str, Any] = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 636 | from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class lowercase_ :
_lowerCamelCase = LEDConfig
_lowerCamelCase = {}
_lowerCamelCase = 'gelu'
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=7 , lowercase_=True , lowercase_=False , lowercase_=99 , lowercase_=32 , lowercase_=2 , lowercase_=4 , lowercase_=37 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=20 , lowercase_=2 , lowercase_=1 , lowercase_=0 , lowercase_=4 , ):
_snake_case : Optional[int] = parent
_snake_case : str = batch_size
_snake_case : int = seq_length
_snake_case : Dict = is_training
_snake_case : Optional[Any] = use_labels
_snake_case : Tuple = vocab_size
_snake_case : str = hidden_size
_snake_case : int = num_hidden_layers
_snake_case : Union[str, Any] = num_attention_heads
_snake_case : int = intermediate_size
_snake_case : List[str] = hidden_dropout_prob
_snake_case : List[Any] = attention_probs_dropout_prob
_snake_case : int = max_position_embeddings
_snake_case : Union[str, Any] = eos_token_id
_snake_case : str = pad_token_id
_snake_case : Any = bos_token_id
_snake_case : str = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_snake_case : List[Any] = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_snake_case : List[str] = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def UpperCamelCase ( self ):
_snake_case : str = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_snake_case : Any = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_snake_case : Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 )
_snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case : List[str] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_snake_case : Optional[Any] = prepare_led_inputs_dict(lowercase_ , lowercase_ , lowercase_ )
_snake_case : int = tf.concat(
[tf.zeros_like(lowercase_ )[:, :-1], tf.ones_like(lowercase_ )[:, -1:]] , axis=-1 , )
_snake_case : List[Any] = global_attention_mask
return config, inputs_dict
def UpperCamelCase ( self , lowercase_ , lowercase_ ):
_snake_case : Dict = TFLEDModel(config=lowercase_ ).get_decoder()
_snake_case : Optional[Any] = inputs_dict["input_ids"]
_snake_case : Optional[int] = input_ids[:1, :]
_snake_case : int = inputs_dict["attention_mask"][:1, :]
_snake_case : int = 1
# first forward pass
_snake_case : str = model(lowercase_ , attention_mask=lowercase_ , use_cache=lowercase_ )
_snake_case ,_snake_case : Optional[int] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_snake_case : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case : List[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_snake_case : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 )
_snake_case : List[str] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_snake_case : str = model(lowercase_ , attention_mask=lowercase_ )[0]
_snake_case : List[str] = model(lowercase_ , attention_mask=lowercase_ , past_key_values=lowercase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_snake_case : Optional[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_snake_case : List[str] = output_from_no_past[:, -3:, random_slice_idx]
_snake_case : List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase_ , lowercase_ , rtol=1e-3 )
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , ) -> List[Any]:
'''simple docstring'''
if attention_mask is None:
_snake_case : int = tf.cast(tf.math.not_equal(__lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_snake_case : Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_snake_case : Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_snake_case : Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class lowercase_ ( __snake_case , __snake_case , unittest.TestCase ):
_lowerCamelCase = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_lowerCamelCase = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_lowerCamelCase = (
{
'conversational': TFLEDForConditionalGeneration,
'feature-extraction': TFLEDModel,
'summarization': TFLEDForConditionalGeneration,
'text2text-generation': TFLEDForConditionalGeneration,
'translation': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def UpperCamelCase ( self ):
_snake_case : Optional[Any] = TFLEDModelTester(self )
_snake_case : List[Any] = ConfigTester(self , config_class=lowercase_ )
def UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase ( self ):
_snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase_ )
def UpperCamelCase ( self ):
_snake_case ,_snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Tuple = tf.zeros_like(inputs_dict["attention_mask"] )
_snake_case : Tuple = 2
_snake_case : Dict = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
_snake_case : Tuple = True
_snake_case : Union[str, Any] = self.model_tester.seq_length
_snake_case : Union[str, Any] = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowercase_ ):
_snake_case : Optional[Any] = outputs.decoder_attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(lowercase_ ):
_snake_case : int = [t.numpy() for t in outputs.encoder_attentions]
_snake_case : Optional[int] = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_snake_case : Union[str, Any] = True
_snake_case : Dict = False
_snake_case : Any = False
_snake_case : Any = model_class(lowercase_ )
_snake_case : Union[str, Any] = model(self._prepare_for_class(lowercase_ , lowercase_ ) )
_snake_case : Tuple = len(lowercase_ )
self.assertEqual(config.output_hidden_states , lowercase_ )
check_encoder_attentions_output(lowercase_ )
if self.is_encoder_decoder:
_snake_case : int = model_class(lowercase_ )
_snake_case : Union[str, Any] = model(self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(config.output_hidden_states , lowercase_ )
check_decoder_attentions_output(lowercase_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_snake_case : List[Any] = True
_snake_case : Any = model_class(lowercase_ )
_snake_case : Optional[Any] = model(self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(config.output_hidden_states , lowercase_ )
check_encoder_attentions_output(lowercase_ )
# Check attention is always last and order is fine
_snake_case : Optional[int] = True
_snake_case : Optional[int] = True
_snake_case : List[Any] = model_class(lowercase_ )
_snake_case : Union[str, Any] = model(self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase_ ) )
self.assertEqual(model.config.output_hidden_states , lowercase_ )
check_encoder_attentions_output(lowercase_ )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
# TODO: Head-masking not yet implement
pass
def snake_case (__lowercase ) -> Optional[Any]:
'''simple docstring'''
return tf.constant(__lowercase , dtype=tf.intaa )
__SCREAMING_SNAKE_CASE : List[Any] = 1E-4
@slow
@require_tf
class lowercase_ ( unittest.TestCase ):
def UpperCamelCase ( self ):
_snake_case : Dict = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
_snake_case : Union[str, Any] = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Optional[int] = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Union[str, Any] = prepare_led_inputs_dict(model.config , lowercase_ , lowercase_ )
_snake_case : Optional[Any] = model(**lowercase_ )[0]
_snake_case : str = (1, 1_024, 768)
self.assertEqual(output.shape , lowercase_ )
# change to expected output here
_snake_case : Optional[Any] = tf.convert_to_tensor(
[[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase_ , atol=1e-3 )
def UpperCamelCase ( self ):
_snake_case : List[Any] = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
_snake_case : int = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : int = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
_snake_case : Optional[Any] = prepare_led_inputs_dict(model.config , lowercase_ , lowercase_ )
_snake_case : Tuple = model(**lowercase_ )[0]
_snake_case : Any = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , lowercase_ )
# change to expected output here
_snake_case : Optional[int] = tf.convert_to_tensor(
[[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowercase_ , atol=1e-3 , rtol=1e-3 ) | 670 | 0 |
import itertools
import string
from collections.abc import Generator, Iterable
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Generator[tuple[str, ...], None, None]:
'''simple docstring'''
__UpperCAmelCase : int = iter(__lowercase )
while True:
__UpperCAmelCase : Tuple = tuple(itertools.islice(__lowercase , __lowercase ) )
if not chunk:
return
yield chunk
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : Any = "".join([c.upper() for c in dirty if c in string.ascii_letters] )
__UpperCAmelCase : Union[str, Any] = ""
if len(__lowercase ) < 2:
return dirty
for i in range(len(__lowercase ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(__lowercase ) & 1:
clean += "X"
return clean
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> list[str]:
'''simple docstring'''
__UpperCAmelCase : Any = "ABCDEFGHIKLMNOPQRSTUVWXYZ"
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
__UpperCAmelCase : int = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(__lowercase )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(__lowercase )
return table
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : List[str] = generate_table(__lowercase )
__UpperCAmelCase : List[str] = prepare_input(__lowercase )
__UpperCAmelCase : Tuple = ""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(__lowercase , 2 ):
__UpperCAmelCase : Tuple = divmod(table.index(__lowercase ) , 5 )
__UpperCAmelCase : Optional[Any] = divmod(table.index(__lowercase ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = generate_table(__lowercase )
__UpperCAmelCase : List[str] = ""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(__lowercase , 2 ):
__UpperCAmelCase : Tuple = divmod(table.index(__lowercase ) , 5 )
__UpperCAmelCase : Dict = divmod(table.index(__lowercase ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 462 | import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class lowercase_ ( __snake_case , unittest.TestCase ):
_lowerCamelCase = ReformerTokenizer
_lowerCamelCase = ReformerTokenizerFast
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = True
def UpperCamelCase ( self ):
super().setUp()
_snake_case : Union[str, Any] = ReformerTokenizer(lowercase_ , keep_accents=lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self ):
_snake_case : int = "<s>"
_snake_case : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(lowercase_ ) , 1_000 )
def UpperCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def UpperCamelCase ( self ):
if not self.test_rust_tokenizer:
return
_snake_case : Tuple = self.get_tokenizer()
_snake_case : List[str] = self.get_rust_tokenizer()
_snake_case : int = "I was born in 92000, and this is falsé."
_snake_case : Tuple = tokenizer.tokenize(lowercase_ )
_snake_case : List[Any] = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_snake_case : str = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
_snake_case : Tuple = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
_snake_case : Dict = self.get_rust_tokenizer()
_snake_case : List[Any] = tokenizer.encode(lowercase_ )
_snake_case : str = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def UpperCamelCase ( self , lowercase_=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_snake_case : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
# Simple input
_snake_case : List[str] = "This is a simple input"
_snake_case : Optional[Any] = ["This is a simple input 1", "This is a simple input 2"]
_snake_case : Union[str, Any] = ("This is a simple input", "This is a pair")
_snake_case : int = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(lowercase_ , tokenizer_r.encode , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Simple input
self.assertRaises(lowercase_ , tokenizer_r.encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Simple input
self.assertRaises(
lowercase_ , tokenizer_r.batch_encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" , )
# Pair input
self.assertRaises(lowercase_ , tokenizer_r.encode , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Pair input
self.assertRaises(lowercase_ , tokenizer_r.encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" )
# Pair input
self.assertRaises(
lowercase_ , tokenizer_r.batch_encode_plus , lowercase_ , max_length=lowercase_ , padding="max_length" , )
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
_snake_case : Dict = ReformerTokenizer(lowercase_ , keep_accents=lowercase_ )
_snake_case : Tuple = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowercase_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ) , [285, 46, 10, 170, 382] , )
_snake_case : str = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_snake_case : Any = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_snake_case : List[Any] = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def UpperCamelCase ( self ):
return ReformerTokenizer.from_pretrained("google/reformer-crime-and-punishment" )
@slow
def UpperCamelCase ( self ):
_snake_case : int = "Hello World!"
_snake_case : Dict = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@slow
def UpperCamelCase ( self ):
_snake_case : Optional[int] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
_snake_case : Dict = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@require_torch
@slow
def UpperCamelCase ( self ):
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
_snake_case : str = list(self.big_tokenizer.get_vocab().keys() )[:10]
_snake_case : str = " ".join(lowercase_ )
_snake_case : Tuple = self.big_tokenizer.encode_plus(lowercase_ , return_tensors="pt" )
_snake_case : Tuple = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="pt" )
_snake_case : int = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
_snake_case : Union[str, Any] = encoded_sequence["input_ids"].shape
_snake_case : List[str] = ReformerModel(lowercase_ )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowercase_ )
model(**lowercase_ )
@slow
def UpperCamelCase ( self ):
# fmt: off
_snake_case : Union[str, Any] = {"input_ids": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
_snake_case : Tuple = [
"This is a very simple sentence.",
"The quick brown fox jumps over the lazy dog.",
]
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name="google/reformer-crime-and-punishment" , revision="0e6c3decb8211d49bf881013425dc8b0448b3f5a" , padding=lowercase_ , sequences=lowercase_ , ) | 670 | 0 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __UpperCAmelCase( __snake_case ):
"""simple docstring"""
__lowerCamelCase = ["image_processor", "tokenizer"]
__lowerCamelCase = "CLIPImageProcessor"
__lowerCamelCase = ("XLMRobertaTokenizer", "XLMRobertaTokenizerFast")
def __init__( self , snake_case__=None , snake_case__=None , **snake_case__ ):
'''simple docstring'''
lowercase__ : Optional[Any]= None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowercase_ , )
lowercase__ : Dict= kwargs.pop("feature_extractor" )
lowercase__ : Any= image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowercase_ , lowercase_ )
def __call__( self , snake_case__=None , snake_case__=None , snake_case__=None , **snake_case__ ):
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
lowercase__ : str= self.tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if images is not None:
lowercase__ : List[str]= self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if text is not None and images is not None:
lowercase__ : Tuple= image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase_ ) , tensor_type=lowercase_ )
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Any= self.tokenizer.model_input_names
lowercase__ : int= self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 218 | import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class lowercase_ ( unittest.TestCase ):
def UpperCamelCase ( self ):
_snake_case : Any = tempfile.mkdtemp()
# fmt: off
_snake_case : Optional[Any] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
_snake_case : Dict = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
_snake_case : Dict = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
_snake_case : Optional[int] = {"unk_token": "<unk>"}
_snake_case : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_snake_case : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowercase_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowercase_ ) )
_snake_case : Any = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
_snake_case : Optional[Any] = os.path.join(self.tmpdirname , lowercase_ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(lowercase_ , lowercase_ )
def UpperCamelCase ( self , **lowercase_ ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCamelCase ( self , **lowercase_ ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCamelCase ( self , **lowercase_ ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self ):
_snake_case : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_snake_case : Union[str, Any] = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase ( self ):
_snake_case : Tuple = self.get_tokenizer()
_snake_case : Any = self.get_rust_tokenizer()
_snake_case : Optional[Any] = self.get_image_processor()
_snake_case : Any = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
processor_slow.save_pretrained(self.tmpdirname )
_snake_case : Optional[int] = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase_ )
_snake_case : List[Any] = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
processor_fast.save_pretrained(self.tmpdirname )
_snake_case : Optional[Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowercase_ )
self.assertIsInstance(processor_fast.tokenizer , lowercase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowercase_ )
self.assertIsInstance(processor_fast.image_processor , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : List[Any] = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_snake_case : List[Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_snake_case : Optional[Any] = self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0 )
_snake_case : Tuple = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowercase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : Union[str, Any] = self.get_image_processor()
_snake_case : Any = self.get_tokenizer()
_snake_case : int = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : Optional[int] = self.prepare_image_inputs()
_snake_case : Optional[Any] = image_processor(lowercase_ , return_tensors="np" )
_snake_case : str = processor(images=lowercase_ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase ( self ):
_snake_case : Optional[Any] = self.get_image_processor()
_snake_case : Any = self.get_tokenizer()
_snake_case : Dict = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : List[str] = "lower newer"
_snake_case : int = processor(text=lowercase_ )
_snake_case : str = tokenizer(lowercase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase ( self ):
_snake_case : List[Any] = self.get_image_processor()
_snake_case : int = self.get_tokenizer()
_snake_case : Tuple = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : List[Any] = "lower newer"
_snake_case : int = self.prepare_image_inputs()
_snake_case : Dict = processor(text=lowercase_ , images=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowercase_ ):
processor()
def UpperCamelCase ( self ):
_snake_case : Dict = self.get_image_processor()
_snake_case : List[str] = self.get_tokenizer()
_snake_case : Union[str, Any] = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : Optional[int] = self.prepare_image_inputs()
_snake_case : Dict = self.prepare_image_inputs()
_snake_case : List[Any] = processor(images=lowercase_ , visual_prompt=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "conditional_pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowercase_ ):
processor()
def UpperCamelCase ( self ):
_snake_case : Dict = self.get_image_processor()
_snake_case : List[Any] = self.get_tokenizer()
_snake_case : str = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_snake_case : Any = processor.batch_decode(lowercase_ )
_snake_case : Any = tokenizer.batch_decode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ ) | 670 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase_ = {
'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'],
'processing_layoutlmv2': ['LayoutLMv2Processor'],
'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['LayoutLMv2TokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['LayoutLMv2FeatureExtractor']
lowerCamelCase_ = ['LayoutLMv2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv2ForQuestionAnswering',
'LayoutLMv2ForSequenceClassification',
'LayoutLMv2ForTokenClassification',
'LayoutLMv2Layer',
'LayoutLMv2Model',
'LayoutLMv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 418 | from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
__SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__) # pylint: disable=invalid-name
def snake_case (__lowercase ) -> Any:
'''simple docstring'''
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(__lowercase ):
return ext
raise Exception(
F"""Unable to determine file format from file extension {path}. """
F"""Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}""" )
def snake_case (__lowercase ) -> Any:
'''simple docstring'''
_snake_case : int = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
_snake_case : List[Any] = try_infer_format_from_ext(args.input ) if args.format == "infer" else args.format
_snake_case : Optional[int] = PipelineDataFormat.from_str(
format=__lowercase , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(__lowercase , __lowercase )
class lowercase_ ( __snake_case ):
def __init__( self , lowercase_ , lowercase_ ):
_snake_case : str = nlp
_snake_case : str = reader
@staticmethod
def UpperCamelCase ( lowercase_ ):
_snake_case : Dict = parser.add_parser("run" , help="Run a pipeline through the CLI" )
run_parser.add_argument("--task" , choices=get_supported_tasks() , help="Task to run" )
run_parser.add_argument("--input" , type=lowercase_ , help="Path to the file to use for inference" )
run_parser.add_argument("--output" , type=lowercase_ , help="Path to the file that will be used post to write results." )
run_parser.add_argument("--model" , type=lowercase_ , help="Name or path to the model to instantiate." )
run_parser.add_argument("--config" , type=lowercase_ , help="Name or path to the model's config to instantiate." )
run_parser.add_argument(
"--tokenizer" , type=lowercase_ , help="Name of the tokenizer to use. (default: same as the model name)" )
run_parser.add_argument(
"--column" , type=lowercase_ , help="Name of the column to use as input. (For multi columns input as QA use column1,columns2)" , )
run_parser.add_argument(
"--format" , type=lowercase_ , default="infer" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="Input format to read from" , )
run_parser.add_argument(
"--device" , type=lowercase_ , default=-1 , help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" , )
run_parser.add_argument("--overwrite" , action="store_true" , help="Allow overwriting the output file." )
run_parser.set_defaults(func=lowercase_ )
def UpperCamelCase ( self ):
_snake_case ,_snake_case : Tuple = self._nlp, []
for entry in self._reader:
_snake_case : Optional[Any] = nlp(**lowercase_ ) if self._reader.is_multi_columns else nlp(lowercase_ )
if isinstance(lowercase_ , lowercase_ ):
outputs.append(lowercase_ )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
_snake_case : str = self._reader.save_binary(lowercase_ )
logger.warning(f"""Current pipeline requires output to be in binary format, saving at {binary_path}""" )
else:
self._reader.save(lowercase_ ) | 670 | 0 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__A = abspath(join(dirname(dirname(dirname(__file__))), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def __a ( lowerCAmelCase_ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__lowercase )
def __a ( lowerCAmelCase_ : int ) -> Optional[Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
UpperCAmelCase_= terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(__lowercase ,id=__lowercase )
| 593 | import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
__SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
class lowercase_ ( __snake_case ):
def __init__( self , lowercase_ ):
super().__init__()
_snake_case : List[str] = nn.ModuleList(lowercase_ )
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = False , lowercase_ = True , ):
for i, (image, scale, controlnet) in enumerate(zip(lowercase_ , lowercase_ , self.nets ) ):
_snake_case ,_snake_case : Optional[int] = controlnet(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , )
# merge samples
if i == 0:
_snake_case ,_snake_case : Tuple = down_samples, mid_sample
else:
_snake_case : Tuple = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowercase_ , lowercase_ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def UpperCamelCase ( self , lowercase_ , lowercase_ = True , lowercase_ = None , lowercase_ = False , lowercase_ = None , ):
_snake_case : Tuple = 0
_snake_case : Dict = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowercase_ , is_main_process=lowercase_ , save_function=lowercase_ , safe_serialization=lowercase_ , variant=lowercase_ , )
idx += 1
_snake_case : int = model_path_to_save + f"""_{idx}"""
@classmethod
def UpperCamelCase ( cls , lowercase_ , **lowercase_ ):
_snake_case : List[str] = 0
_snake_case : Optional[Any] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_snake_case : Optional[Any] = pretrained_model_path
while os.path.isdir(lowercase_ ):
_snake_case : int = ControlNetModel.from_pretrained(lowercase_ , **lowercase_ )
controlnets.append(lowercase_ )
idx += 1
_snake_case : str = pretrained_model_path + f"""_{idx}"""
logger.info(f"""{len(lowercase_ )} controlnets loaded from {pretrained_model_path}.""" )
if len(lowercase_ ) == 0:
raise ValueError(
f"""No ControlNets found under {os.path.dirname(lowercase_ )}. Expected at least {pretrained_model_path + '_0'}.""" )
return cls(lowercase_ ) | 670 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A_ = {
'configuration_layoutlmv3': [
'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LayoutLMv3Config',
'LayoutLMv3OnnxConfig',
],
'processing_layoutlmv3': ['LayoutLMv3Processor'],
'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['LayoutLMv3TokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv3ForQuestionAnswering',
'LayoutLMv3ForSequenceClassification',
'LayoutLMv3ForTokenClassification',
'LayoutLMv3Model',
'LayoutLMv3PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLayoutLMv3ForQuestionAnswering',
'TFLayoutLMv3ForSequenceClassification',
'TFLayoutLMv3ForTokenClassification',
'TFLayoutLMv3Model',
'TFLayoutLMv3PreTrainedModel',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['LayoutLMv3FeatureExtractor']
A_ = ['LayoutLMv3ImageProcessor']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 393 | import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase_ ( __snake_case ):
_lowerCamelCase = ['image_processor', 'tokenizer']
_lowerCamelCase = 'CLIPImageProcessor'
_lowerCamelCase = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self , lowercase_=None , lowercase_=None , **lowercase_ ):
_snake_case : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowercase_ , )
_snake_case : Dict = kwargs.pop("feature_extractor" )
_snake_case : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowercase_ , lowercase_ )
def __call__( self , lowercase_=None , lowercase_=None , lowercase_=None , **lowercase_ ):
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
_snake_case : str = self.tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if images is not None:
_snake_case : List[str] = self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if text is not None and images is not None:
_snake_case : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase_ ) , tensor_type=lowercase_ )
def UpperCamelCase ( self , *lowercase_ , **lowercase_ ):
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def UpperCamelCase ( self , *lowercase_ , **lowercase_ ):
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def UpperCamelCase ( self ):
_snake_case : Any = self.tokenizer.model_input_names
_snake_case : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 670 | 0 |
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class UpperCAmelCase ( __snake_case , unittest.TestCase ):
_A : List[str] = BarthezTokenizer
_A : Union[str, Any] = BarthezTokenizerFast
_A : Optional[int] = True
_A : Any = True
def __lowerCamelCase ( self ):
super().setUp()
__UpperCAmelCase = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=lowercase_ )
__UpperCAmelCase = tokenizer
def __lowerCamelCase ( self ):
__UpperCAmelCase = "<pad>"
__UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ )
def __lowerCamelCase ( self ):
__UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(lowercase_ ) , 101_122 )
def __lowerCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 101_122 )
@require_torch
def __lowerCamelCase ( self ):
__UpperCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
__UpperCAmelCase = [0, 57, 3_018, 70_307, 91, 2]
__UpperCAmelCase = self.tokenizer(
lowercase_ , max_length=len(lowercase_ ) , padding=lowercase_ , truncation=lowercase_ , return_tensors='pt' )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(lowercase_ , lowercase_ )
def __lowerCamelCase ( self ):
if not self.test_rust_tokenizer:
return
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = self.get_rust_tokenizer()
__UpperCAmelCase = "I was born in 92000, and this is falsé."
__UpperCAmelCase = tokenizer.tokenize(lowercase_ )
__UpperCAmelCase = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
__UpperCAmelCase = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
__UpperCAmelCase = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
__UpperCAmelCase = self.get_rust_tokenizer()
__UpperCAmelCase = tokenizer.encode(lowercase_ )
__UpperCAmelCase = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
@slow
def __lowerCamelCase ( self ):
# fmt: off
__UpperCAmelCase = {"input_ids": [[0, 490, 14_328, 4_507, 354, 47, 43_669, 95, 25, 78_117, 20_215, 19_779, 190, 22, 400, 4, 35_343, 80_310, 603, 86, 24_937, 105, 33_438, 94_762, 196, 39_642, 7, 15, 15_933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10_534, 87, 25, 66, 3_358, 196, 55_289, 8, 82_961, 81, 2_204, 75_203, 7, 15, 763, 12_956, 216, 178, 14_328, 9_595, 1_377, 69_693, 7, 448, 71_021, 196, 18_106, 1_437, 13_974, 108, 9_083, 4, 49_315, 7, 39, 86, 1_326, 2_793, 46_333, 4, 448, 196, 74_588, 7, 49_315, 7, 39, 21, 822, 38_470, 74, 21, 66_723, 62_480, 8, 22_050, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__UpperCAmelCase = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=lowercase_ , )
| 126 | from __future__ import annotations
def snake_case (__lowercase , __lowercase , __lowercase ) -> dict[str, float]:
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance < 0:
raise ValueError("Resistance cannot be negative" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod() | 670 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.txt'}
_SCREAMING_SNAKE_CASE = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
_SCREAMING_SNAKE_CASE = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
_SCREAMING_SNAKE_CASE = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class __magic_name__ ( __snake_case ):
_SCREAMING_SNAKE_CASE : Any = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : int = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_INIT_CONFIGURATION
_SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : List[str] = ConvBertTokenizer
def __init__( self : Optional[int] , snake_case_ : List[Any]=None , snake_case_ : List[str]=None , snake_case_ : List[Any]=True , snake_case_ : List[str]="[UNK]" , snake_case_ : Tuple="[SEP]" , snake_case_ : Optional[int]="[PAD]" , snake_case_ : str="[CLS]" , snake_case_ : Optional[Any]="[MASK]" , snake_case_ : Tuple=True , snake_case_ : str=None , **snake_case_ : List[Any] , ):
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , do_lower_case=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , tokenize_chinese_chars=lowercase_ , strip_accents=lowercase_ , **lowercase_ , )
__snake_case = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowercase_ ) != do_lower_case
or normalizer_state.get("strip_accents" , lowercase_ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowercase_ ) != tokenize_chinese_chars
):
__snake_case = getattr(lowercase_ , normalizer_state.pop("type" ) )
__snake_case = do_lower_case
__snake_case = strip_accents
__snake_case = tokenize_chinese_chars
__snake_case = normalizer_class(**lowercase_ )
__snake_case = do_lower_case
def lowerCAmelCase ( self : Any , snake_case_ : int , snake_case_ : Any=None ):
__snake_case = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase ( self : Dict , snake_case_ : Any , snake_case_ : Dict = None ):
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase ( self : List[str] , snake_case_ : int , snake_case_ : Dict = None ):
__snake_case = self._tokenizer.model.save(lowercase_ , name=lowercase_ )
return tuple(lowercase_ )
| 163 | import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def snake_case (*__lowercase ) -> Dict:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
_snake_case : Dict = list(__lowercase )
for i in range(len(__lowercase ) ):
_snake_case : List[str] = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def snake_case (__lowercase ) -> bool:
'''simple docstring'''
_snake_case : str = [
"CUDA out of memory.", # CUDA OOM
"cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.", # CUDNN SNAFU
"DefaultCPUAllocator: can't allocate memory", # CPU OOM
]
if isinstance(__lowercase , __lowercase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def snake_case (__lowercase = None , __lowercase = 128 ) -> Any:
'''simple docstring'''
if function is None:
return functools.partial(__lowercase , starting_batch_size=__lowercase )
_snake_case : List[str] = starting_batch_size
def decorator(*__lowercase , **__lowercase ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
_snake_case : Optional[Any] = list(inspect.signature(__lowercase ).parameters.keys() )
# Guard against user error
if len(__lowercase ) < (len(__lowercase ) + 1):
_snake_case : str = ", ".join([F"""{arg}={value}""" for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F"""Batch size was passed into `{function.__name__}` as the first argument when called."""
F"""Remove this as the decorator already does so: `{function.__name__}({arg_str})`""" )
while True:
if batch_size == 0:
raise RuntimeError("No executable batch size found, reached zero." )
try:
return function(__lowercase , *__lowercase , **__lowercase )
except Exception as e:
if should_reduce_batch_size(__lowercase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator | 670 | 0 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase=1_0 ):
'''simple docstring'''
A_ : List[str] = []
for _ in range(__lowercase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase=1_0 ):
'''simple docstring'''
A_ : Union[str, Any] = []
for step in range(__lowercase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : Dict = os.path.join(__lowercase ,"""schedule.bin""" )
torch.save(scheduler.state_dict() ,__lowercase )
A_ : List[Any] = torch.load(__lowercase )
scheduler.load_state_dict(__lowercase )
return lrs
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self , a__ , a__ , a__ ):
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for a, b in zip(lowercase_ , lowercase_ ):
self.assertAlmostEqual(lowercase_ , lowercase_ , delta=lowercase_ )
def _lowerCamelCase ( self ):
A_ : List[str] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowercase_ )
A_ : Union[str, Any] = torch.tensor([0.4, 0.2, -0.5] )
A_ : Optional[int] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A_ : List[str] = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(100 ):
A_ : str = criterion(lowercase_ , lowercase_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def _lowerCamelCase ( self ):
A_ : str = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowercase_ )
A_ : Tuple = torch.tensor([0.4, 0.2, -0.5] )
A_ : Tuple = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A_ : List[Any] = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=lowercase_ , weight_decay=0.0 , relative_step=lowercase_ , scale_parameter=lowercase_ , warmup_init=lowercase_ , )
for _ in range(1000 ):
A_ : Tuple = criterion(lowercase_ , lowercase_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
a = nn.Linear(50 , 50 ) if is_torch_available() else None
a = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
a = 10
def _lowerCamelCase ( self , a__ , a__ , a__ , a__=None ):
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for a, b in zip(lowercase_ , lowercase_ ):
self.assertAlmostEqual(lowercase_ , lowercase_ , delta=lowercase_ , msg=lowercase_ )
def _lowerCamelCase ( self ):
A_ : List[Any] = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
A_ : str = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
A_ : Tuple = data
A_ : Optional[Any] = scheduler_func(self.optimizer , **lowercase_ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
A_ : Union[str, Any] = unwrap_schedule(lowercase_ , self.num_steps )
self.assertListAlmostEqual(
lowercase_ , lowercase_ , tol=1E-2 , msg=F"""failed for {scheduler_func} in normal scheduler""" , )
A_ : List[str] = scheduler_func(self.optimizer , **lowercase_ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(lowercase_ ) # wrap to test picklability of the schedule
A_ : List[str] = unwrap_and_save_reload_schedule(lowercase_ , self.num_steps )
self.assertListEqual(lowercase_ , lowercase_ , msg=F"""failed for {scheduler_func} in save and reload""" )
class _UpperCAmelCase :
def __init__( self , a__ ):
A_ : Tuple = fn
def __call__( self , *a__ , **a__ ):
return self.fn(*lowercase_ , **lowercase_ )
@classmethod
def _lowerCamelCase ( self , a__ ):
A_ : str = list(map(self , scheduler.lr_lambdas ) )
| 569 | __SCREAMING_SNAKE_CASE : Union[str, Any] = {
'a': 'AAAAA',
'b': 'AAAAB',
'c': 'AAABA',
'd': 'AAABB',
'e': 'AABAA',
'f': 'AABAB',
'g': 'AABBA',
'h': 'AABBB',
'i': 'ABAAA',
'j': 'BBBAA',
'k': 'ABAAB',
'l': 'ABABA',
'm': 'ABABB',
'n': 'ABBAA',
'o': 'ABBAB',
'p': 'ABBBA',
'q': 'ABBBB',
'r': 'BAAAA',
's': 'BAAAB',
't': 'BAABA',
'u': 'BAABB',
'v': 'BBBAB',
'w': 'BABAA',
'x': 'BABAB',
'y': 'BABBA',
'z': 'BABBB',
' ': ' ',
}
__SCREAMING_SNAKE_CASE : int = {value: key for key, value in encode_dict.items()}
def snake_case (__lowercase ) -> str:
'''simple docstring'''
_snake_case : Any = ""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("encode() accepts only letters of the alphabet and spaces" )
return encoded
def snake_case (__lowercase ) -> str:
'''simple docstring'''
if set(__lowercase ) - {"A", "B", " "} != set():
raise Exception("decode() accepts only 'A', 'B' and spaces" )
_snake_case : str = ""
for word in coded.split():
while len(__lowercase ) != 0:
decoded += decode_dict[word[:5]]
_snake_case : int = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod() | 670 | 0 |
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
a_ = [[] for _ in range(__lowercase )]
a_ = key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1 or len(__lowercase ) <= key:
return input_string
for position, character in enumerate(__lowercase ):
a_ = position % (lowest * 2) # puts it in bounds
a_ = min(__lowercase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(__lowercase )
a_ = ["".join(__lowercase ) for row in temp_grid]
a_ = "".join(__lowercase )
return output_string
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
a_ = []
a_ = key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1:
return input_string
a_ = [[] for _ in range(__lowercase )] # generates template
for position in range(len(__lowercase ) ):
a_ = position % (lowest * 2) # puts it in bounds
a_ = min(__lowercase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("""*""" )
a_ = 0
for row in temp_grid: # fills in the characters
a_ = input_string[counter : counter + len(__lowercase )]
grid.append(list(__lowercase ) )
counter += len(__lowercase )
a_ = "" # reads as zigzag
for position in range(len(__lowercase ) ):
a_ = position % (lowest * 2) # puts it in bounds
a_ = min(__lowercase , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def lowerCamelCase_ ( UpperCAmelCase__ ):
"""simple docstring"""
a_ = {}
for key_guess in range(1 , len(__lowercase ) ): # tries every key
a_ = decrypt(__lowercase , __lowercase )
return results
if __name__ == "__main__":
import doctest
doctest.testmod() | 483 | import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowercase_ ( unittest.TestCase ):
def UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def UpperCamelCase ( self ):
_snake_case ,_snake_case : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-2" , revision="bf16" , dtype=jnp.bfloataa , )
_snake_case : List[Any] = "A painting of a squirrel eating a burger"
_snake_case : Union[str, Any] = jax.device_count()
_snake_case : List[Any] = num_samples * [prompt]
_snake_case : Tuple = sd_pipe.prepare_inputs(lowercase_ )
_snake_case : str = replicate(lowercase_ )
_snake_case : Dict = shard(lowercase_ )
_snake_case : List[Any] = jax.random.PRNGKey(0 )
_snake_case : List[Any] = jax.random.split(lowercase_ , jax.device_count() )
_snake_case : Tuple = sd_pipe(lowercase_ , lowercase_ , lowercase_ , num_inference_steps=25 , jit=lowercase_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
_snake_case : List[Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_snake_case : str = images[0, 253:256, 253:256, -1]
_snake_case : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_snake_case : Optional[Any] = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def UpperCamelCase ( self ):
_snake_case : Optional[Any] = "stabilityai/stable-diffusion-2"
_snake_case ,_snake_case : List[Any] = FlaxDPMSolverMultistepScheduler.from_pretrained(lowercase_ , subfolder="scheduler" )
_snake_case ,_snake_case : int = FlaxStableDiffusionPipeline.from_pretrained(
lowercase_ , scheduler=lowercase_ , revision="bf16" , dtype=jnp.bfloataa , )
_snake_case : str = scheduler_params
_snake_case : Dict = "A painting of a squirrel eating a burger"
_snake_case : Dict = jax.device_count()
_snake_case : Optional[int] = num_samples * [prompt]
_snake_case : List[str] = sd_pipe.prepare_inputs(lowercase_ )
_snake_case : Optional[int] = replicate(lowercase_ )
_snake_case : Union[str, Any] = shard(lowercase_ )
_snake_case : List[Any] = jax.random.PRNGKey(0 )
_snake_case : Union[str, Any] = jax.random.split(lowercase_ , jax.device_count() )
_snake_case : str = sd_pipe(lowercase_ , lowercase_ , lowercase_ , num_inference_steps=25 , jit=lowercase_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
_snake_case : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_snake_case : List[str] = images[0, 253:256, 253:256, -1]
_snake_case : Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_snake_case : Dict = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 | 670 | 0 |
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class UpperCAmelCase ( __snake_case ):
"""simple docstring"""
A__ : List[Any] = ['image_processor', 'tokenizer']
A__ : Tuple = 'OwlViTImageProcessor'
A__ : Optional[Any] = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self , _snake_case=None , _snake_case=None , **_snake_case ) -> Dict:
_UpperCamelCase : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowercase_ , )
_UpperCamelCase : str = kwargs.pop('''feature_extractor''' )
_UpperCamelCase : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowercase_ , lowercase_ )
def __call__( self , _snake_case=None , _snake_case=None , _snake_case=None , _snake_case="max_length" , _snake_case="np" , **_snake_case ) -> List[str]:
if text is None and query_images is None and images is None:
raise ValueError(
'''You have to specify at least one text or query image or image. All three cannot be none.''' )
if text is not None:
if isinstance(lowercase_ , lowercase_ ) or (isinstance(lowercase_ , lowercase_ ) and not isinstance(text[0] , lowercase_ )):
_UpperCamelCase : List[Any] = [self.tokenizer(lowercase_ , padding=lowercase_ , return_tensors=lowercase_ , **lowercase_ )]
elif isinstance(lowercase_ , lowercase_ ) and isinstance(text[0] , lowercase_ ):
_UpperCamelCase : str = []
# Maximum number of queries across batch
_UpperCamelCase : Tuple = max([len(lowercase_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(lowercase_ ) != max_num_queries:
_UpperCamelCase : Optional[int] = t + [" "] * (max_num_queries - len(lowercase_ ))
_UpperCamelCase : Optional[int] = self.tokenizer(lowercase_ , padding=lowercase_ , return_tensors=lowercase_ , **lowercase_ )
encodings.append(lowercase_ )
else:
raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' )
if return_tensors == "np":
_UpperCamelCase : List[Any] = np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
_UpperCamelCase : Tuple = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_UpperCamelCase : Any = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
_UpperCamelCase : List[str] = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
_UpperCamelCase : List[str] = torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0 )
_UpperCamelCase : int = torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_UpperCamelCase : Optional[int] = tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
_UpperCamelCase : Optional[Any] = tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
else:
raise ValueError('''Target return tensor type could not be returned''' )
_UpperCamelCase : Optional[int] = BatchEncoding()
_UpperCamelCase : List[str] = input_ids
_UpperCamelCase : str = attention_mask
if query_images is not None:
_UpperCamelCase : List[str] = BatchEncoding()
_UpperCamelCase : Any = self.image_processor(
lowercase_ , return_tensors=lowercase_ , **lowercase_ ).pixel_values
_UpperCamelCase : List[Any] = query_pixel_values
if images is not None:
_UpperCamelCase : Union[str, Any] = self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_ )
if text is not None and images is not None:
_UpperCamelCase : Union[str, Any] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_UpperCamelCase : Tuple = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase_ ) , tensor_type=lowercase_ )
def _lowercase ( self , *_snake_case , **_snake_case ) -> List[Any]:
return self.image_processor.post_process(*lowercase_ , **lowercase_ )
def _lowercase ( self , *_snake_case , **_snake_case ) -> List[Any]:
return self.image_processor.post_process_object_detection(*lowercase_ , **lowercase_ )
def _lowercase ( self , *_snake_case , **_snake_case ) -> Optional[int]:
return self.image_processor.post_process_image_guided_detection(*lowercase_ , **lowercase_ )
def _lowercase ( self , *_snake_case , **_snake_case ) -> List[str]:
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def _lowercase ( self , *_snake_case , **_snake_case ) -> Any:
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def _lowercase ( self ) -> Optional[Any]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowercase_ , )
return self.image_processor_class
@property
def _lowercase ( self ) -> List[str]:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowercase_ , )
return self.image_processor
| 683 | from manim import *
class lowercase_ ( __snake_case ):
def UpperCamelCase ( self ):
_snake_case : Tuple = Rectangle(height=0.5 , width=0.5 )
_snake_case : List[str] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_snake_case : List[str] = [mem.copy() for i in range(6 )]
_snake_case : Any = [mem.copy() for i in range(6 )]
_snake_case : Any = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : Any = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : str = VGroup(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : int = Text("CPU" , font_size=24 )
_snake_case : str = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowercase_ )
_snake_case : int = [mem.copy() for i in range(4 )]
_snake_case : Dict = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : str = Text("GPU" , font_size=24 )
_snake_case : Optional[int] = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
gpu.move_to([-1, -1, 0] )
self.add(lowercase_ )
_snake_case : Any = [mem.copy() for i in range(6 )]
_snake_case : Any = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : Dict = Text("Model" , font_size=24 )
_snake_case : Dict = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
model.move_to([3, -1.0, 0] )
self.add(lowercase_ )
_snake_case : str = []
for i, rect in enumerate(lowercase_ ):
rect.set_stroke(lowercase_ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_snake_case : Union[str, Any] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowercase_ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowercase_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowercase_ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase_ , buff=0.0 )
self.add(lowercase_ )
cpu_targs.append(lowercase_ )
_snake_case : List[Any] = [mem.copy() for i in range(6 )]
_snake_case : Union[str, Any] = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : Optional[Any] = Text("Loaded Checkpoint" , font_size=24 )
_snake_case : Union[str, Any] = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , aligned_edge=lowercase_ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
_snake_case : Optional[int] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_snake_case : Optional[Any] = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowercase_ , lowercase_ )
_snake_case : Union[str, Any] = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(lowercase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
_snake_case : List[Any] = MarkupText(
f"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowercase_ ) , Write(lowercase_ ) )
self.play(Write(lowercase_ , run_time=1 ) , Create(lowercase_ , run_time=1 ) )
_snake_case : int = []
_snake_case : str = []
for i, rect in enumerate(lowercase_ ):
_snake_case : Dict = fill.copy().set_fill(lowercase_ , opacity=0.7 )
target.move_to(lowercase_ )
first_animations.append(GrowFromCenter(lowercase_ , run_time=1 ) )
_snake_case : Dict = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(lowercase_ , run_time=1.5 ) )
self.play(*lowercase_ )
self.play(*lowercase_ )
self.wait() | 670 | 0 |
"""simple docstring"""
import requests
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = {"Content-Type": "application/json"}
__lowerCAmelCase = requests.post(__lowercase , json={"text": message_body} , headers=__lowercase )
if response.status_code != 200:
__lowerCAmelCase = (
"Request to slack returned an error "
f"{response.status_code}, the response is:\n{response.text}"
)
raise ValueError(__lowercase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("<YOUR MESSAGE BODY>", "<SLACK CHANNEL URL>")
| 636 | import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
class lowercase_ ( __snake_case ):
_lowerCamelCase = 'linear'
_lowerCamelCase = 'cosine'
_lowerCamelCase = 'cosine_with_restarts'
_lowerCamelCase = 'polynomial'
_lowerCamelCase = 'constant'
_lowerCamelCase = 'constant_with_warmup'
_lowerCamelCase = 'piecewise_constant'
def snake_case (__lowercase , __lowercase = -1 ) -> List[Any]:
'''simple docstring'''
return LambdaLR(__lowercase , lambda __lowercase : 1 , last_epoch=__lowercase )
def snake_case (__lowercase , __lowercase , __lowercase = -1 ) -> List[str]:
'''simple docstring'''
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1.0 , __lowercase ) )
return 1.0
return LambdaLR(__lowercase , __lowercase , last_epoch=__lowercase )
def snake_case (__lowercase , __lowercase , __lowercase = -1 ) -> Optional[int]:
'''simple docstring'''
_snake_case : Optional[Any] = {}
_snake_case : Optional[int] = step_rules.split("," )
for rule_str in rule_list[:-1]:
_snake_case ,_snake_case : str = rule_str.split(":" )
_snake_case : Dict = int(__lowercase )
_snake_case : List[str] = float(__lowercase )
_snake_case : Tuple = value
_snake_case : str = float(rule_list[-1] )
def create_rules_function(__lowercase , __lowercase ):
def rule_func(__lowercase ) -> float:
_snake_case : List[str] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__lowercase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
_snake_case : int = create_rules_function(__lowercase , __lowercase )
return LambdaLR(__lowercase , __lowercase , last_epoch=__lowercase )
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase=-1 ) -> List[str]:
'''simple docstring'''
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1 , __lowercase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__lowercase , __lowercase , __lowercase )
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase = 0.5 , __lowercase = -1 ) -> Dict:
'''simple docstring'''
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1 , __lowercase ) )
_snake_case : Optional[int] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__lowercase ) * 2.0 * progress )) )
return LambdaLR(__lowercase , __lowercase , __lowercase )
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase = 1 , __lowercase = -1 ) -> Optional[int]:
'''simple docstring'''
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1 , __lowercase ) )
_snake_case : Any = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__lowercase ) * progress) % 1.0) )) )
return LambdaLR(__lowercase , __lowercase , __lowercase )
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase=1e-7 , __lowercase=1.0 , __lowercase=-1 ) -> List[Any]:
'''simple docstring'''
_snake_case : List[Any] = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(__lowercase ):
if current_step < num_warmup_steps:
return float(__lowercase ) / float(max(1 , __lowercase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
_snake_case : Tuple = lr_init - lr_end
_snake_case : Any = num_training_steps - num_warmup_steps
_snake_case : Optional[int] = 1 - (current_step - num_warmup_steps) / decay_steps
_snake_case : Optional[Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__lowercase , __lowercase , __lowercase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def snake_case (__lowercase , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = 1 , __lowercase = 1.0 , __lowercase = -1 , ) -> List[Any]:
'''simple docstring'''
_snake_case : Any = SchedulerType(__lowercase )
_snake_case : Union[str, Any] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__lowercase , last_epoch=__lowercase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__lowercase , step_rules=__lowercase , last_epoch=__lowercase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__lowercase , num_warmup_steps=__lowercase , last_epoch=__lowercase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__lowercase , num_warmup_steps=__lowercase , num_training_steps=__lowercase , num_cycles=__lowercase , last_epoch=__lowercase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__lowercase , num_warmup_steps=__lowercase , num_training_steps=__lowercase , power=__lowercase , last_epoch=__lowercase , )
return schedule_func(
__lowercase , num_warmup_steps=__lowercase , num_training_steps=__lowercase , last_epoch=__lowercase ) | 670 | 0 |
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> list:
'''simple docstring'''
__UpperCAmelCase : str = int(__lowercase )
if n_element < 1:
__UpperCAmelCase : List[str] = ValueError('''a should be a positive number''' )
raise my_error
__UpperCAmelCase : List[str] = [1]
__UpperCAmelCase : Optional[int] = (0, 0, 0)
__UpperCAmelCase : Tuple = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowerCAmelCase = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
lowerCAmelCase = hamming(int(n))
print("""-----------------------------------------------------""")
print(F'The list with nth numbers is: {hamming_numbers}')
print("""-----------------------------------------------------""")
| 462 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : int = {
'weiweishi/roc-bert-base-zh': 'https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json',
}
class lowercase_ ( __snake_case ):
_lowerCamelCase = 'roc_bert'
def __init__( self , lowercase_=30_522 , lowercase_=768 , lowercase_=12 , lowercase_=12 , lowercase_=3_072 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=512 , lowercase_=2 , lowercase_=0.02 , lowercase_=1e-12 , lowercase_=True , lowercase_=0 , lowercase_="absolute" , lowercase_=None , lowercase_=True , lowercase_=True , lowercase_=768 , lowercase_=910 , lowercase_=512 , lowercase_=24_858 , lowercase_=True , **lowercase_ , ):
_snake_case : int = vocab_size
_snake_case : Union[str, Any] = max_position_embeddings
_snake_case : Union[str, Any] = hidden_size
_snake_case : Dict = num_hidden_layers
_snake_case : Any = num_attention_heads
_snake_case : Dict = intermediate_size
_snake_case : List[Any] = hidden_act
_snake_case : Optional[int] = hidden_dropout_prob
_snake_case : Union[str, Any] = attention_probs_dropout_prob
_snake_case : Union[str, Any] = initializer_range
_snake_case : List[Any] = type_vocab_size
_snake_case : int = layer_norm_eps
_snake_case : Optional[Any] = use_cache
_snake_case : List[Any] = enable_pronunciation
_snake_case : Dict = enable_shape
_snake_case : Dict = pronunciation_embed_dim
_snake_case : Tuple = pronunciation_vocab_size
_snake_case : Tuple = shape_embed_dim
_snake_case : List[str] = shape_vocab_size
_snake_case : Dict = concat_input
_snake_case : int = position_embedding_type
_snake_case : int = classifier_dropout
super().__init__(pad_token_id=lowercase_ , **lowercase_ ) | 670 | 0 |
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
a : int = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
a : str = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
a : Dict = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def lowercase__(A , A ) ->tuple[str, float]:
"""simple docstring"""
lowercase__ : List[str]= len([g for position, g in enumerate(__lowercase ) if g == main_target[position]] )
return (item, float(__lowercase ))
def lowercase__(A , A ) ->tuple[str, str]:
"""simple docstring"""
lowercase__ : Optional[int]= random.randint(0 , len(__lowercase ) - 1 )
lowercase__ : Union[str, Any]= parent_a[:random_slice] + parent_a[random_slice:]
lowercase__ : Optional[Any]= parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowercase__(A , A ) ->str:
"""simple docstring"""
lowercase__ : List[Any]= list(__lowercase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
lowercase__ : Union[str, Any]= random.choice(__lowercase )
return "".join(__lowercase )
def lowercase__(A , A , A , ) ->list[str]:
"""simple docstring"""
lowercase__ : Optional[int]= []
# Generate more children proportionally to the fitness score.
lowercase__ : Union[str, Any]= int(parent_a[1] * 100 ) + 1
lowercase__ : Optional[int]= 10 if child_n >= 10 else child_n
for _ in range(__lowercase ):
lowercase__ : List[Any]= population_score[random.randint(0 , __lowercase )][0]
lowercase__ : List[str]= crossover(parent_a[0] , __lowercase )
# Append new string to the population list.
pop.append(mutate(__lowercase , __lowercase ) )
pop.append(mutate(__lowercase , __lowercase ) )
return pop
def lowercase__(A , A , A = True ) ->tuple[int, int, str]:
"""simple docstring"""
if N_POPULATION < N_SELECTED:
lowercase__ : List[Any]= f'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(__lowercase )
# Verify that the target contains no genes besides the ones inside genes variable.
lowercase__ : str= sorted({c for c in target if c not in genes} )
if not_in_genes_list:
lowercase__ : List[str]= f'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(__lowercase )
# Generate random starting population.
lowercase__ : List[str]= []
for _ in range(__lowercase ):
population.append("".join([random.choice(__lowercase ) for i in range(len(__lowercase ) )] ) )
# Just some logs to know what the algorithms is doing.
lowercase__ : List[Any]= 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(__lowercase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
lowercase__ : List[Any]= [evaluate(__lowercase , __lowercase ) for item in population]
# Check if there is a matching evolution.
lowercase__ : Union[str, Any]= sorted(__lowercase , key=lambda A : x[1] , reverse=__lowercase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f'''\nGeneration: {generation}'''
f'''\nTotal Population:{total_population}'''
f'''\nBest score: {population_score[0][1]}'''
f'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
lowercase__ : Optional[Any]= population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(__lowercase )
# Normalize population score to be between 0 and 1.
lowercase__ : List[str]= [
(item, score / len(__lowercase )) for item, score in population_score
]
# This is selection
for i in range(__lowercase ):
population.extend(select(population_score[int(__lowercase )] , __lowercase , __lowercase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(__lowercase ) > N_POPULATION:
break
if __name__ == "__main__":
a : Tuple = (
'This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'
)
a : int = list(
""" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"""
"""nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\"""
)
a : List[Any] = basic(target_str, genes_list)
print(
F"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 218 | from cva import destroyAllWindows, imread, imshow, waitKey
def snake_case (__lowercase ) -> Tuple:
'''simple docstring'''
_snake_case ,_snake_case : int = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(__lowercase ):
for j in range(__lowercase ):
_snake_case : Optional[Any] = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
__SCREAMING_SNAKE_CASE : Optional[Any] = imread('image_data/lena.jpg', 1)
# convert to its negative
__SCREAMING_SNAKE_CASE : Tuple = convert_to_negative(img)
# show result image
imshow('negative of original image', img)
waitKey(0)
destroyAllWindows() | 670 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.